diff --git a/.taskcluster.yml b/.taskcluster.yml index f8bbc9939f78..f5ce9752aba1 100644 --- a/.taskcluster.yml +++ b/.taskcluster.yml @@ -48,12 +48,18 @@ tasks: tags: $if: 'tasks_for == "hg-push"' - then: {createdForUser: "${ownerEmail}"} + then: + createdForUser: "${ownerEmail}" + kind: decision-task else: $if: 'tasks_for == "action"' then: createdForUser: '${ownerEmail}' kind: 'action-callback' + else: + $if: 'tasks_for == "cron"' + then: + kind: cron-task routes: $flatten: diff --git a/Cargo.lock b/Cargo.lock index cec05e5f2680..ce37071015e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -69,7 +69,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -79,7 +79,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -88,19 +88,19 @@ name = "audioipc" version = "0.2.4" dependencies = [ "bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", "cubeb 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.66 (git+https://github.com/servo/serde?branch=deserialize_from_enums8)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -111,11 +111,11 @@ dependencies = [ "audioipc 0.2.4", "cubeb-backend 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -124,15 +124,15 @@ name = "audioipc-server" version = "0.2.3" dependencies = [ "audioipc 0.2.4", - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", "cubeb-core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -221,11 +221,6 @@ name = "bit-vec" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "bitflags" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "bitflags" version = "1.0.1" @@ -253,11 +248,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bytes" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -266,7 +261,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bzip2-sys 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -275,7 +270,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -311,7 +306,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -340,7 +335,7 @@ dependencies = [ [[package]] name = "cookie" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", @@ -352,7 +347,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -368,7 +363,7 @@ dependencies = [ "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -379,7 +374,7 @@ dependencies = [ "core-foundation 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-graphics 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -415,6 +410,15 @@ dependencies = [ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-deque" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crossbeam-epoch" version = "0.3.1" @@ -429,6 +433,19 @@ dependencies = [ "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-epoch" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crossbeam-utils" version = "0.2.2" @@ -437,6 +454,14 @@ dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-utils" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "cssparser" version = "0.24.0" @@ -564,7 +589,7 @@ name = "devd-rs" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "nom 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -613,7 +638,7 @@ dependencies = [ "gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.66 (git+https://github.com/servo/serde?branch=deserialize_from_enums8)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -718,7 +743,7 @@ name = "flate2" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide_c_api 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -737,7 +762,7 @@ name = "freetype" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -746,29 +771,27 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fuchsia-zircon" -version = "0.2.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fuchsia-zircon-sys" -version = "0.2.0" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "futures" -version = "0.1.18" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -776,7 +799,7 @@ name = "futures-cpupool" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -809,7 +832,7 @@ dependencies = [ "base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.2.25 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "mozprofile 0.3.0", @@ -831,7 +854,7 @@ dependencies = [ "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "cssparser 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)", "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "malloc_size_of 0.0.1", "nsstring 0.1.0", @@ -914,11 +937,38 @@ name = "glob" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "h2" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hashglobe" version = "0.1.0" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -936,20 +986,27 @@ dependencies = [ [[package]] name = "hyper" -version = "0.10.13" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -967,12 +1024,17 @@ dependencies = [ "unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "indexmap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "iovec" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -998,7 +1060,7 @@ dependencies = [ "env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "mozjs_sys 0.0.0", "num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1073,11 +1135,6 @@ name = "lalrpop-util" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "lazy_static" version = "1.0.1" @@ -1088,9 +1145,14 @@ name = "lazycell" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "lazycell" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "libc" -version = "0.2.39" +version = "0.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1107,7 +1169,7 @@ name = "libudev" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libudev-sys 0.1.3", ] @@ -1116,7 +1178,7 @@ name = "libudev-sys" version = "0.1.3" dependencies = [ "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1125,7 +1187,7 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1141,7 +1203,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "lmdb-sys 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1151,7 +1213,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1215,7 +1277,7 @@ name = "memchr" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1223,7 +1285,7 @@ name = "memchr" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1233,7 +1295,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1242,21 +1304,13 @@ name = "memoffset" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "mime" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "miniz_oxide" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "adler32 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1266,23 +1320,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", "crc 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "mio" -version = "0.6.9" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1291,8 +1347,8 @@ name = "mio-uds" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1301,7 +1357,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1315,7 +1371,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "mozjs_sys" version = "0.0.0" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1397,14 +1453,12 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1528,7 +1582,7 @@ name = "num_cpus" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1559,8 +1613,8 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1607,7 +1661,7 @@ version = "0.7.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1681,7 +1735,7 @@ dependencies = [ name = "pulse-ffi" version = "0.1.0" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1712,11 +1766,22 @@ dependencies = [ [[package]] name = "rand" -version = "0.3.18" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1735,9 +1800,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1819,7 +1884,7 @@ dependencies = [ name = "rsdparsa_capi" version = "0.1.0" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "nserror 0.1.0", "rsdparsa 0.1.0", @@ -1969,6 +2034,11 @@ name = "slab" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "slab" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "smallbitvec" version = "2.1.1" @@ -1987,6 +2057,11 @@ name = "stable_deref_trait" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "string" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "string_cache" version = "0.7.3" @@ -2111,7 +2186,7 @@ dependencies = [ "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "geckoservo 0.0.1", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "malloc_size_of 0.0.1", "num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2170,7 +2245,7 @@ name = "tempdir" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2188,7 +2263,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2233,34 +2308,144 @@ name = "time" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "tokio-core" +name = "tokio" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-fs 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-executor" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-fs" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-io" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-reactor" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-tcp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-threadpool" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-udp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2268,15 +2453,15 @@ name = "tokio-uds" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", - "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2288,13 +2473,8 @@ dependencies = [ ] [[package]] -name = "traitobject" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "typeable" -version = "0.1.2" +name = "try-lock" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2306,10 +2486,10 @@ dependencies = [ "core-foundation 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation-sys 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "devd-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2327,14 +2507,6 @@ dependencies = [ "arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "unicode-bidi" version = "0.3.4" @@ -2391,7 +2563,7 @@ name = "uuid" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2404,11 +2576,6 @@ name = "vec_map" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "version_check" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "void" version = "1.0.2" @@ -2423,13 +2590,25 @@ dependencies = [ "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "want" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "webdriver" version = "0.36.0" dependencies = [ "base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cookie 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", + "cookie 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2437,6 +2616,7 @@ dependencies = [ "serde_derive 1.0.66 (git+https://github.com/servo/serde?branch=deserialize_from_enums8)", "serde_json 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2524,7 +2704,7 @@ name = "which" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2593,7 +2773,7 @@ dependencies = [ name = "xpcom" version = "0.1.0" dependencies = [ - "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "nserror 0.1.0", "nsstring 0.1.0", "xpcom_macros 0.1.0", @@ -2656,13 +2836,12 @@ dependencies = [ "checksum binjs_meta 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "fd7ca5635f1c6f94aaef7de76cb834c5920578355ce41dbcaf731b7ebe348518" "checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c" "checksum bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" -"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" "checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" "checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707" "checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75" "checksum build_const 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e90dc84f5e62d2ebe7676b83c22d33b6db8bd27340fb6ffbff0a364efa0cb9c9" "checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23" -"checksum bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6" +"checksum bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8" "checksum bzip2 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3eafc42c44e0d827de6b1c131175098fe7fb53b8ce8a47e65cb3ea94688be24" "checksum bzip2-sys 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2c5162604199bbb17690ede847eaa6120a3f33d5ab4dcc8e7c25b16d849ae79b" "checksum cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "2119ea4867bd2b8ed3aecab467709720b2d55b1bcfe09f772fd68066eaf15275" @@ -2672,7 +2851,7 @@ dependencies = [ "checksum clang-sys 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7f7c04e52c35222fffcc3a115b5daf5f7e2bfb71c13c4e2321afe1fc71859c2" "checksum clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0f16b89cbb9ee36d87483dc939fe9f1e13c05898d56d7b230a0d4dff033a536" "checksum cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "56d741ea7a69e577f6d06b36b7dff4738f680593dc27a701ffa8506b73ce28bb" -"checksum cookie 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "746858cae4eae40fff37e1998320068df317bc247dc91a67c6cfa053afdc2abb" +"checksum cookie 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1465f8134efa296b4c19db34d909637cb2bf0f7aaf21299e23e18fa29ac557cf" "checksum core-foundation 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7caa6cb9e76ddddbea09a03266d6b3bc98cd41e9fb9b017c473e7cca593ec25" "checksum core-foundation-sys 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b2a53cce0ddcf7e7e1f998738d757d5a3bf08bf799a180e50ebe50d298f52f5a" "checksum core-graphics 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "92801c908ea6301ae619ed842a72e01098085fc321b9c2f3f833dad555bba055" @@ -2681,8 +2860,11 @@ dependencies = [ "checksum cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "49726015ab0ca765144fcca61e4a7a543a16b795a777fa53f554da2fffff9a94" "checksum crc 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bd5d02c0aac6bd68393ed69e00bbc2457f3e89075c6349db7189618dc4ddc1d7" "checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" +"checksum crossbeam-deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fe8153ef04a7594ded05b427ffad46ddeaf22e63fd48d42b3e1e3bb4db07cae7" "checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" +"checksum crossbeam-epoch 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2af0e75710d6181e234c8ecc79f14a97907850a541b13b0be1dd10992f2e4620" "checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" +"checksum crossbeam-utils 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d636a8b3bcc1b409d7ffd3facef8f21dcb4009626adbd0c5e6c4305c07253c7b" "checksum cssparser 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)" = "495beddc39b1987b8e9f029354eccbd5ef88eb5f1cd24badb764dce338acf2e0" "checksum cssparser-macros 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a5383ae18dbfdeb569ed62019f5bddb2a95cd2d3833313c475a0d014777805" "checksum cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b6557bdb1dc9647eae1cf7f5601b14cd45fc3c7ccf2df618387416fe542da6ea" @@ -2715,9 +2897,9 @@ dependencies = [ "checksum foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5ebc04f19019fff1f2d627b5581574ead502f80c48c88900575a46e0840fe5d0" "checksum freetype 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b659e75b7a7338fe75afd7f909fc2b71937845cffb6ebe54ba2e50f13d8e903d" "checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" -"checksum fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159" -"checksum fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43f3795b4bae048dc6123a6b972cadde2e676f9ded08aef6bb77f5f157684a82" -"checksum futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0bab5b5e94f5c31fc764ba5dd9ad16568aae5d4825538c01d6bca680c9bf94a7" +"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +"checksum futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)" = "884dbe32a6ae4cd7da5c6db9b78114449df9953b8d490c9d7e1b51720b922c62" "checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" "checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" "checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" @@ -2725,12 +2907,15 @@ dependencies = [ "checksum gl_generator 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a795170cbd85b5a7baa58d6d7525cae6a03e486859860c220f7ebbbdd379d0a" "checksum gleam 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0d41e7ac812597988fdae31c9baec3c6d35cadb8ad9ab88a9bf9c0f119ed66c2" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum h2 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "a27e7ed946e8335bdf9a191bc1b9b14a03ba822d013d2f58437f4fabcbd7fc2c" +"checksum http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dca621d0fa606a5ff2850b6e337b57ad6137ee4d67e940449643ff45af6874c6" "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" -"checksum hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2" +"checksum hyper 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c087746de95e20e4dabe86606c3a019964a8fde2d5f386152939063c116c5971" "checksum ident_case 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c9826188e666f2ed92071d2dadef6edc430b11b158b5b2b3f4babbcc891eaaa" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" -"checksum iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be" +"checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220" +"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" "checksum itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b07332223953b5051bceb67e8c4700aa65291535568e1f12408c43c4a42c0394" "checksum itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c069bbec61e1ca5a596166e55dfe4773ff745c3d16b700013bcaff9a6df2c682" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" @@ -2739,10 +2924,10 @@ dependencies = [ "checksum lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc4fd87be4a815fd373e02773983940f0d75fb26fde8c098e9e45f7af03154c0" "checksum lalrpop-snap 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f244285324e4e33d486910b66fd3b7cb37e2072c5bf63319f506fe99ed72650" "checksum lalrpop-util 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "de408fd50dea8ad7a77107144983a25c7fdabf5f8faf707a6e020d68874ed06c" -"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e6412c5e2ad9584b0b8e979393122026cdd6d2a80b933f890dcd694ddbe73739" "checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b" -"checksum libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)" = "f54263ad99207254cf58b5f701ecb432c717445ea2ee8af387334bdd1a03fdff" +"checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef" +"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" "checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" "checksum libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea626d3bdf40a1c5aee3bcd4f40826970cae8d80a8fec934c82a63840094dcfe" "checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8" @@ -2757,16 +2942,15 @@ dependencies = [ "checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d" "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" -"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum miniz_oxide 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aaa2d3ad070f428fffbd7d3ca2ea20bb0d8cffe9024405c44e1840bc1418b398" "checksum miniz_oxide_c_api 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "92d98fdbd6145645828069b37ea92ca3de225e000d80702da25c20d3584b38a5" -"checksum mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7" +"checksum mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4fcfcb32d63961fb6f367bfd5d21e4600b92cd310f71f9dca25acae196eb1560" "checksum mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum moz_cbor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20c82a57087fd5990d7122dbff1607c3b20c3d2958e9d9ad9765aab415e2c91c" "checksum mp4parse_fallible 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6626c2aef76eb8f984eef02e475883d3fe9112e114720446c5810fc5f045cd30" "checksum msdos_time 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "65ba9d75bcea84e07812618fedf284a64776c2f2ea0cad6bca7f69739695a958" -"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" +"checksum net2 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)" = "9044faf1413a1057267be51b5afba8eb1090bd2231c693664aa1db716fe1eae0" "checksum new-ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8ccbebba6fb53a6d2bdcfaf79cb339bc136dee3bfff54dc337a334bafe36476a" "checksum new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cdc457076c78ab54d5e0d6fa7c47981757f1e34dc39ff92787f217dede586c4" "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2" @@ -2801,7 +2985,8 @@ dependencies = [ "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" "checksum quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9949cfe66888ffe1d53e6ec9d9f3b70714083854be20fd5e271b232a017401e8" "checksum quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e44651a0dc4cdd99f71c83b561e221f714912d11af1a4dff0631f923d53af035" -"checksum rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6475140dfd8655aeb72e1fd4b7a1cc1c202be65d71669476e392fe62532b9edd" +"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1" +"checksum rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8356f47b32624fef5b3301c1be97e5944ecdd595409cc5da11d05f211db6cfbd" "checksum rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "485541959c8ecc49865526fe6c4de9653dd6e60d829d6edf0be228167b60372d" "checksum rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8" "checksum redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "ab105df655884ede59d45b7070c8a65002d921461ee813a024558ca16030eea0" @@ -2829,9 +3014,11 @@ dependencies = [ "checksum simd 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ed3686dd9418ebcc3a26a0c0ae56deab0681e53fe899af91f5bbcee667ebffb1" "checksum siphasher 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2ffc669b726f2bc9a3bcff66e5e23b56ba6bf70e22a34c3d7b6d0b3450b65b84" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" +"checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d" "checksum smallbitvec 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c63726029f0069f88467873e47f392575f28f9f16b72ac65465263db4b3a13c" "checksum smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "26df3bb03ca5eac2e64192b723d51f56c1b1e0860e7c766281f4598f181acdc8" "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" +"checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970" "checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423" "checksum string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "479cde50c3539481f33906a387f2bd17c8e87cb848c35b6021d41fb81ff9b4d7" "checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" @@ -2850,15 +3037,22 @@ dependencies = [ "checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963" "checksum thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf947d192a9be60ef5131cc7a4648886ba89d712f16700ebbf80c8a69d05d48f" "checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b" -"checksum tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "febd81b3e2ef615c6c8077347b33f3f3deec3d708ecd08194c9707b7a1eccfc9" -"checksum tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af" +"checksum tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8ee337e5f4e501fc32966fec6fe0ca0cc1c237b0b1b14a335f8bfe3c5f06e286" +"checksum tokio-codec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "881e9645b81c2ce95fcb799ded2c29ffb9f25ef5bef909089a420e5961dd8ccb" +"checksum tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" +"checksum tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "424f0c87ecd66b863045d84e384cb7ce0ae384d8b065b9f0363d29c0d1b30b2f" +"checksum tokio-fs 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5cbe4ca6e71cb0b62a66e4e6f53a8c06a6eefe46cc5f665ad6f274c9906f135" +"checksum tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a5c9635ee806f26d302b8baa1e145689a280d8f5aa8d0552e7344808da54cc21" +"checksum tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8703a5762ff6913510dc64272c714c4389ffd8c4b3cf602879b8bd14ff06b604" +"checksum tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4c329b47f071eb8a746040465fa751bd95e4716e98daef6a9b4e434c17d565" +"checksum tokio-threadpool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "24ab84f574027b0e875378f31575cf175360891919e93a3490f07e76e00e4efb" +"checksum tokio-timer 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1c76b4e97a4f61030edff8bd272364e4f731b9f54c7307eb4eb733c3926eb96a" +"checksum tokio-udp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "43eb534af6e8f37d43ab1b612660df14755c42bd003c5f8d2475ee78cc4600c0" "checksum tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9" "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" -"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" -"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" +"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" "checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" "checksum uluru 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "519130f0ea964ba540a9d8af1373738c2226f1d465eda07e61db29feb5479db9" -"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f" "checksum unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa6024fc12ddfd1c6dbc14a80fa2324d4568849869b779f6bd37e5e4c03344d1" @@ -2870,9 +3064,9 @@ dependencies = [ "checksum uuid 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcc7e3b898aa6f6c08e5295b6c89258d1331e9ac578cc992fb818759951bdc22" "checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b" "checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c" -"checksum version_check 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6b772017e347561807c1aa192438c5fd74242a670a6cffacc40f2defd1dc069d" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63636bd0eb3d00ccb8b9036381b526efac53caf112b7783b730ab3f8e44da369" +"checksum want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "797464475f30ddb8830cc529aaaae648d581f99e2036a928877dfde027ddf6b3" "checksum webidl 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dc14e4b71f94b5bb4c6d696e3b3be4d2e9ee6750a60870ecae09ff7138a131a7" "checksum which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" diff --git a/browser/base/content/test/general/browser.ini b/browser/base/content/test/general/browser.ini index c2dff3ba6892..dc86d67d070a 100644 --- a/browser/base/content/test/general/browser.ini +++ b/browser/base/content/test/general/browser.ini @@ -416,7 +416,7 @@ skip-if = verify [browser_tab_drag_drop_perwindow.js] # DO NOT ADD MORE TESTS HERE. USE A TOPICAL DIRECTORY INSTEAD. [browser_tab_dragdrop.js] -skip-if = debug || (os == 'linux') || (os == 'mac') # Bug 1312436, Bug 1388973 +skip-if = debug || (os == 'linux') || (os == 'mac') || (os == 'win' && asan) # Bug 1312436, Bug 1388973 # DO NOT ADD MORE TESTS HERE. USE A TOPICAL DIRECTORY INSTEAD. [browser_tab_dragdrop2.js] # DO NOT ADD MORE TESTS HERE. USE A TOPICAL DIRECTORY INSTEAD. diff --git a/browser/base/content/urlbarBindings.xml b/browser/base/content/urlbarBindings.xml index d5042016c1b9..0ba6166d107c 100644 --- a/browser/base/content/urlbarBindings.xml +++ b/browser/base/content/urlbarBindings.xml @@ -589,11 +589,12 @@ file, You can obtain one at http://mozilla.org/MPL/2.0/. // invoked regardless, thus this should be enough. if (this._formattingInstance != instance) return; - let isDomainRTL = window.windowUtils.getDirectionFromText(domain); + let directionality = window.windowUtils.getDirectionFromText(domain); // In the future, for example in bug 525831, we may add a forceRTL // char just after the domain, and in such a case we should not // scroll to the left. - if (isDomainRTL && value[preDomain.length + domain.length] != "\u200E") { + if (directionality == window.windowUtils.DIRECTION_RTL && + value[preDomain.length + domain.length] != "\u200E") { this.inputField.scrollLeft = this.inputField.scrollLeftMax; } }); diff --git a/browser/themes/shared/identity-block/identity-block.inc.css b/browser/themes/shared/identity-block/identity-block.inc.css index f6d9c3157dc5..5ec935ab1f03 100644 --- a/browser/themes/shared/identity-block/identity-block.inc.css +++ b/browser/themes/shared/identity-block/identity-block.inc.css @@ -239,7 +239,6 @@ } 50% { transform: translateX(-1232px); - fill: var(--tracking-protection-shield-color); } 65% { fill: var(--tracking-protection-shield-color); @@ -266,7 +265,6 @@ } 50% { transform: scaleX(-1) translateX(-1232px); - fill: var(--tracking-protection-shield-color); } 65% { fill: var(--tracking-protection-shield-color); diff --git a/devtools/client/debugger/new/README.mozilla b/devtools/client/debugger/new/README.mozilla index 1878c04672af..f5b8ad4fdfee 100644 --- a/devtools/client/debugger/new/README.mozilla +++ b/devtools/client/debugger/new/README.mozilla @@ -1,9 +1,9 @@ This is the debugger.html project output. See https://github.com/devtools-html/debugger.html -Version 84 +Version 85 -Comparison: https://github.com/devtools-html/debugger.html/compare/release-83...release-84 +Comparison: https://github.com/devtools-html/debugger.html/compare/release-84...release-85 Packages: - babel-plugin-transform-es2015-modules-commonjs @6.26.2 diff --git a/devtools/client/debugger/new/dist/vendors.js b/devtools/client/debugger/new/dist/vendors.js index 5cf1a1da7a31..ba5082ee5185 100644 --- a/devtools/client/debugger/new/dist/vendors.js +++ b/devtools/client/debugger/new/dist/vendors.js @@ -1,13 +1,13 @@ (function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') - module.exports = factory(require("devtools/client/shared/vendor/react"), require("devtools/client/shared/vendor/react-dom"), require("Services"), require("devtools/shared/flags"), require("devtools/client/shared/vendor/react-prop-types"), require("devtools/client/shared/vendor/react-dom-factories")); + module.exports = factory(require("devtools/client/shared/vendor/react"), require("devtools/client/shared/vendor/lodash"), require("devtools/client/shared/vendor/react-dom"), require("Services"), require("devtools/shared/flags"), require("devtools/client/shared/vendor/react-prop-types"), require("devtools/client/shared/vendor/react-dom-factories")); else if(typeof define === 'function' && define.amd) - define(["devtools/client/shared/vendor/react", "devtools/client/shared/vendor/react-dom", "Services", "devtools/shared/flags", "devtools/client/shared/vendor/react-prop-types", "devtools/client/shared/vendor/react-dom-factories"], factory); + define(["devtools/client/shared/vendor/react", "devtools/client/shared/vendor/lodash", "devtools/client/shared/vendor/react-dom", "Services", "devtools/shared/flags", "devtools/client/shared/vendor/react-prop-types", "devtools/client/shared/vendor/react-dom-factories"], factory); else { - var a = typeof exports === 'object' ? factory(require("devtools/client/shared/vendor/react"), require("devtools/client/shared/vendor/react-dom"), require("Services"), require("devtools/shared/flags"), require("devtools/client/shared/vendor/react-prop-types"), require("devtools/client/shared/vendor/react-dom-factories")) : factory(root["devtools/client/shared/vendor/react"], root["devtools/client/shared/vendor/react-dom"], root["Services"], root["devtools/shared/flags"], root["devtools/client/shared/vendor/react-prop-types"], root["devtools/client/shared/vendor/react-dom-factories"]); + var a = typeof exports === 'object' ? factory(require("devtools/client/shared/vendor/react"), require("devtools/client/shared/vendor/lodash"), require("devtools/client/shared/vendor/react-dom"), require("Services"), require("devtools/shared/flags"), require("devtools/client/shared/vendor/react-prop-types"), require("devtools/client/shared/vendor/react-dom-factories")) : factory(root["devtools/client/shared/vendor/react"], root["devtools/client/shared/vendor/lodash"], root["devtools/client/shared/vendor/react-dom"], root["Services"], root["devtools/shared/flags"], root["devtools/client/shared/vendor/react-prop-types"], root["devtools/client/shared/vendor/react-dom-factories"]); for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i]; } -})(typeof self !== 'undefined' ? self : this, function(__WEBPACK_EXTERNAL_MODULE_0__, __WEBPACK_EXTERNAL_MODULE_4__, __WEBPACK_EXTERNAL_MODULE_22__, __WEBPACK_EXTERNAL_MODULE_52__, __WEBPACK_EXTERNAL_MODULE_3642__, __WEBPACK_EXTERNAL_MODULE_3643__) { +})(typeof self !== 'undefined' ? self : this, function(__WEBPACK_EXTERNAL_MODULE_0__, __WEBPACK_EXTERNAL_MODULE_2__, __WEBPACK_EXTERNAL_MODULE_4__, __WEBPACK_EXTERNAL_MODULE_22__, __WEBPACK_EXTERNAL_MODULE_52__, __WEBPACK_EXTERNAL_MODULE_3642__, __WEBPACK_EXTERNAL_MODULE_3643__) { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; @@ -80,92 +80,6 @@ return /******/ (function(modules) { // webpackBootstrap module.exports = __WEBPACK_EXTERNAL_MODULE_0__; -/***/ }), - -/***/ 10: -/***/ (function(module, exports, __webpack_require__) { - -var Symbol = __webpack_require__(7); - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** Used to check objects for own properties. */ -var hasOwnProperty = objectProto.hasOwnProperty; - -/** - * Used to resolve the - * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) - * of values. - */ -var nativeObjectToString = objectProto.toString; - -/** Built-in value references. */ -var symToStringTag = Symbol ? Symbol.toStringTag : undefined; - -/** - * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the raw `toStringTag`. - */ -function getRawTag(value) { - var isOwn = hasOwnProperty.call(value, symToStringTag), - tag = value[symToStringTag]; - - try { - value[symToStringTag] = undefined; - var unmasked = true; - } catch (e) {} - - var result = nativeObjectToString.call(value); - if (unmasked) { - if (isOwn) { - value[symToStringTag] = tag; - } else { - delete value[symToStringTag]; - } - } - return result; -} - -module.exports = getRawTag; - - -/***/ }), - -/***/ 100: -/***/ (function(module, exports, __webpack_require__) { - -var assocIndexOf = __webpack_require__(96); - -/** - * Sets the list cache `key` to `value`. - * - * @private - * @name set - * @memberOf ListCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the list cache instance. - */ -function listCacheSet(key, value) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - ++this.size; - data.push([key, value]); - } else { - data[index][1] = value; - } - return this; -} - -module.exports = listCacheSet; - - /***/ }), /***/ 1000: @@ -201,92 +115,6 @@ module.exports = "" -/***/ }), - -/***/ 101: -/***/ (function(module, exports, __webpack_require__) { - -var getNative = __webpack_require__(81), - root = __webpack_require__(8); - -/* Built-in method references that are verified to be native. */ -var Map = getNative(root, 'Map'); - -module.exports = Map; - - -/***/ }), - -/***/ 102: -/***/ (function(module, exports, __webpack_require__) { - -var getMapData = __webpack_require__(103); - -/** - * Removes `key` and its value from the map. - * - * @private - * @name delete - * @memberOf MapCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ -function mapCacheDelete(key) { - var result = getMapData(this, key)['delete'](key); - this.size -= result ? 1 : 0; - return result; -} - -module.exports = mapCacheDelete; - - -/***/ }), - -/***/ 103: -/***/ (function(module, exports, __webpack_require__) { - -var isKeyable = __webpack_require__(104); - -/** - * Gets the data for `map`. - * - * @private - * @param {Object} map The map to query. - * @param {string} key The reference key. - * @returns {*} Returns the map data. - */ -function getMapData(map, key) { - var data = map.__data__; - return isKeyable(key) - ? data[typeof key == 'string' ? 'string' : 'hash'] - : data.map; -} - -module.exports = getMapData; - - -/***/ }), - -/***/ 104: -/***/ (function(module, exports) { - -/** - * Checks if `value` is suitable for use as unique object key. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is suitable, else `false`. - */ -function isKeyable(value) { - var type = typeof value; - return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean') - ? (value !== '__proto__') - : (value === null); -} - -module.exports = isKeyable; - - /***/ }), /***/ 1043: @@ -308,245 +136,6 @@ module.exports = "Created with Sketch." -/***/ }), - -/***/ 105: -/***/ (function(module, exports, __webpack_require__) { - -var getMapData = __webpack_require__(103); - -/** - * Gets the map value for `key`. - * - * @private - * @name get - * @memberOf MapCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ -function mapCacheGet(key) { - return getMapData(this, key).get(key); -} - -module.exports = mapCacheGet; - - -/***/ }), - -/***/ 106: -/***/ (function(module, exports, __webpack_require__) { - -var getMapData = __webpack_require__(103); - -/** - * Checks if a map value for `key` exists. - * - * @private - * @name has - * @memberOf MapCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ -function mapCacheHas(key) { - return getMapData(this, key).has(key); -} - -module.exports = mapCacheHas; - - -/***/ }), - -/***/ 107: -/***/ (function(module, exports, __webpack_require__) { - -var getMapData = __webpack_require__(103); - -/** - * Sets the map `key` to `value`. - * - * @private - * @name set - * @memberOf MapCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the map cache instance. - */ -function mapCacheSet(key, value) { - var data = getMapData(this, key), - size = data.size; - - data.set(key, value); - this.size += data.size == size ? 0 : 1; - return this; -} - -module.exports = mapCacheSet; - - -/***/ }), - -/***/ 108: -/***/ (function(module, exports, __webpack_require__) { - -var baseToString = __webpack_require__(109); - -/** - * Converts `value` to a string. An empty string is returned for `null` - * and `undefined` values. The sign of `-0` is preserved. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.toString(null); - * // => '' - * - * _.toString(-0); - * // => '-0' - * - * _.toString([1, 2, 3]); - * // => '1,2,3' - */ -function toString(value) { - return value == null ? '' : baseToString(value); -} - -module.exports = toString; - - -/***/ }), - -/***/ 109: -/***/ (function(module, exports, __webpack_require__) { - -var Symbol = __webpack_require__(7), - arrayMap = __webpack_require__(110), - isArray = __webpack_require__(70), - isSymbol = __webpack_require__(72); - -/** Used as references for various `Number` constants. */ -var INFINITY = 1 / 0; - -/** Used to convert symbols to primitives and strings. */ -var symbolProto = Symbol ? Symbol.prototype : undefined, - symbolToString = symbolProto ? symbolProto.toString : undefined; - -/** - * The base implementation of `_.toString` which doesn't convert nullish - * values to empty strings. - * - * @private - * @param {*} value The value to process. - * @returns {string} Returns the string. - */ -function baseToString(value) { - // Exit early for strings to avoid a performance hit in some environments. - if (typeof value == 'string') { - return value; - } - if (isArray(value)) { - // Recursively convert values (susceptible to call stack limits). - return arrayMap(value, baseToString) + ''; - } - if (isSymbol(value)) { - return symbolToString ? symbolToString.call(value) : ''; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; -} - -module.exports = baseToString; - - -/***/ }), - -/***/ 11: -/***/ (function(module, exports) { - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** - * Used to resolve the - * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) - * of values. - */ -var nativeObjectToString = objectProto.toString; - -/** - * Converts `value` to a string using `Object.prototype.toString`. - * - * @private - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - */ -function objectToString(value) { - return nativeObjectToString.call(value); -} - -module.exports = objectToString; - - -/***/ }), - -/***/ 110: -/***/ (function(module, exports) { - -/** - * A specialized version of `_.map` for arrays without support for iteratee - * shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - */ -function arrayMap(array, iteratee) { - var index = -1, - length = array == null ? 0 : array.length, - result = Array(length); - - while (++index < length) { - result[index] = iteratee(array[index], index, array); - } - return result; -} - -module.exports = arrayMap; - - -/***/ }), - -/***/ 111: -/***/ (function(module, exports, __webpack_require__) { - -var isSymbol = __webpack_require__(72); - -/** Used as references for various `Number` constants. */ -var INFINITY = 1 / 0; - -/** - * Converts `value` to a string key if it's not a string or symbol. - * - * @private - * @param {*} value The value to inspect. - * @returns {string|symbol} Returns the key. - */ -function toKey(value) { - if (typeof value == 'string' || isSymbol(value)) { - return value; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; -} - -module.exports = toKey; - - /***/ }), /***/ 1117: @@ -568,219 +157,6 @@ module.exports = "" -/***/ }), - -/***/ 112: -/***/ (function(module, exports, __webpack_require__) { - -var baseSet = __webpack_require__(113); - -/** - * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, - * it's created. Arrays are created for missing index properties while objects - * are created for all other missing properties. Use `_.setWith` to customize - * `path` creation. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 3.7.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @returns {Object} Returns `object`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.set(object, 'a[0].b.c', 4); - * console.log(object.a[0].b.c); - * // => 4 - * - * _.set(object, ['x', '0', 'y', 'z'], 5); - * console.log(object.x[0].y.z); - * // => 5 - */ -function set(object, path, value) { - return object == null ? object : baseSet(object, path, value); -} - -module.exports = set; - - -/***/ }), - -/***/ 113: -/***/ (function(module, exports, __webpack_require__) { - -var assignValue = __webpack_require__(114), - castPath = __webpack_require__(69), - isIndex = __webpack_require__(117), - isObject = __webpack_require__(84), - toKey = __webpack_require__(111); - -/** - * The base implementation of `_.set`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @param {Function} [customizer] The function to customize path creation. - * @returns {Object} Returns `object`. - */ -function baseSet(object, path, value, customizer) { - if (!isObject(object)) { - return object; - } - path = castPath(path, object); - - var index = -1, - length = path.length, - lastIndex = length - 1, - nested = object; - - while (nested != null && ++index < length) { - var key = toKey(path[index]), - newValue = value; - - if (index != lastIndex) { - var objValue = nested[key]; - newValue = customizer ? customizer(objValue, key, nested) : undefined; - if (newValue === undefined) { - newValue = isObject(objValue) - ? objValue - : (isIndex(path[index + 1]) ? [] : {}); - } - } - assignValue(nested, key, newValue); - nested = nested[key]; - } - return object; -} - -module.exports = baseSet; - - -/***/ }), - -/***/ 114: -/***/ (function(module, exports, __webpack_require__) { - -var baseAssignValue = __webpack_require__(115), - eq = __webpack_require__(97); - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** Used to check objects for own properties. */ -var hasOwnProperty = objectProto.hasOwnProperty; - -/** - * Assigns `value` to `key` of `object` if the existing value is not equivalent - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ -function assignValue(object, key, value) { - var objValue = object[key]; - if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) || - (value === undefined && !(key in object))) { - baseAssignValue(object, key, value); - } -} - -module.exports = assignValue; - - -/***/ }), - -/***/ 115: -/***/ (function(module, exports, __webpack_require__) { - -var defineProperty = __webpack_require__(116); - -/** - * The base implementation of `assignValue` and `assignMergeValue` without - * value checks. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ -function baseAssignValue(object, key, value) { - if (key == '__proto__' && defineProperty) { - defineProperty(object, key, { - 'configurable': true, - 'enumerable': true, - 'value': value, - 'writable': true - }); - } else { - object[key] = value; - } -} - -module.exports = baseAssignValue; - - -/***/ }), - -/***/ 116: -/***/ (function(module, exports, __webpack_require__) { - -var getNative = __webpack_require__(81); - -var defineProperty = (function() { - try { - var func = getNative(Object, 'defineProperty'); - func({}, '', {}); - return func; - } catch (e) {} -}()); - -module.exports = defineProperty; - - -/***/ }), - -/***/ 117: -/***/ (function(module, exports) { - -/** Used as references for various `Number` constants. */ -var MAX_SAFE_INTEGER = 9007199254740991; - -/** Used to detect unsigned integer values. */ -var reIsUint = /^(?:0|[1-9]\d*)$/; - -/** - * Checks if `value` is a valid array-like index. - * - * @private - * @param {*} value The value to check. - * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. - * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. - */ -function isIndex(value, length) { - var type = typeof value; - length = length == null ? MAX_SAFE_INTEGER : length; - - return !!length && - (type == 'number' || - (type != 'symbol' && reIsUint.test(value))) && - (value > -1 && value % 1 == 0 && value < length); -} - -module.exports = isIndex; - - /***/ }), /***/ 1174: @@ -1275,6 +651,10 @@ module.exports = "dojo_square" -/***/ }), - -/***/ 81: -/***/ (function(module, exports, __webpack_require__) { - -var baseIsNative = __webpack_require__(82), - getValue = __webpack_require__(88); - -/** - * Gets the native function at `key` of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {string} key The key of the method to get. - * @returns {*} Returns the function if it's native, else `undefined`. - */ -function getNative(object, key) { - var value = getValue(object, key); - return baseIsNative(value) ? value : undefined; -} - -module.exports = getNative; - - -/***/ }), - -/***/ 82: -/***/ (function(module, exports, __webpack_require__) { - -var isFunction = __webpack_require__(83), - isMasked = __webpack_require__(85), - isObject = __webpack_require__(84), - toSource = __webpack_require__(87); - -/** - * Used to match `RegExp` - * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns). - */ -var reRegExpChar = /[\\^$.*+?()[\]{}|]/g; - -/** Used to detect host constructors (Safari). */ -var reIsHostCtor = /^\[object .+?Constructor\]$/; - -/** Used for built-in method references. */ -var funcProto = Function.prototype, - objectProto = Object.prototype; - -/** Used to resolve the decompiled source of functions. */ -var funcToString = funcProto.toString; - -/** Used to check objects for own properties. */ -var hasOwnProperty = objectProto.hasOwnProperty; - -/** Used to detect if a method is native. */ -var reIsNative = RegExp('^' + - funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&') - .replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$' -); - -/** - * The base implementation of `_.isNative` without bad shim checks. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a native function, - * else `false`. - */ -function baseIsNative(value) { - if (!isObject(value) || isMasked(value)) { - return false; - } - var pattern = isFunction(value) ? reIsNative : reIsHostCtor; - return pattern.test(toSource(value)); -} - -module.exports = baseIsNative; - - -/***/ }), - -/***/ 83: -/***/ (function(module, exports, __webpack_require__) { - -var baseGetTag = __webpack_require__(6), - isObject = __webpack_require__(84); - -/** `Object#toString` result references. */ -var asyncTag = '[object AsyncFunction]', - funcTag = '[object Function]', - genTag = '[object GeneratorFunction]', - proxyTag = '[object Proxy]'; - -/** - * Checks if `value` is classified as a `Function` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a function, else `false`. - * @example - * - * _.isFunction(_); - * // => true - * - * _.isFunction(/abc/); - * // => false - */ -function isFunction(value) { - if (!isObject(value)) { - return false; - } - // The use of `Object#toString` avoids issues with the `typeof` operator - // in Safari 9 which returns 'object' for typed arrays and other constructors. - var tag = baseGetTag(value); - return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; -} - -module.exports = isFunction; - - -/***/ }), - -/***/ 84: -/***/ (function(module, exports) { - -/** - * Checks if `value` is the - * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) - * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an object, else `false`. - * @example - * - * _.isObject({}); - * // => true - * - * _.isObject([1, 2, 3]); - * // => true - * - * _.isObject(_.noop); - * // => true - * - * _.isObject(null); - * // => false - */ -function isObject(value) { - var type = typeof value; - return value != null && (type == 'object' || type == 'function'); -} - -module.exports = isObject; - - -/***/ }), - -/***/ 85: -/***/ (function(module, exports, __webpack_require__) { - -var coreJsData = __webpack_require__(86); - -/** Used to detect methods masquerading as native. */ -var maskSrcKey = (function() { - var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || ''); - return uid ? ('Symbol(src)_1.' + uid) : ''; -}()); - -/** - * Checks if `func` has its source masked. - * - * @private - * @param {Function} func The function to check. - * @returns {boolean} Returns `true` if `func` is masked, else `false`. - */ -function isMasked(func) { - return !!maskSrcKey && (maskSrcKey in func); -} - -module.exports = isMasked; - - -/***/ }), - -/***/ 86: -/***/ (function(module, exports, __webpack_require__) { - -var root = __webpack_require__(8); - -/** Used to detect overreaching core-js shims. */ -var coreJsData = root['__core-js_shared__']; - -module.exports = coreJsData; - - -/***/ }), - -/***/ 87: -/***/ (function(module, exports) { - -/** Used for built-in method references. */ -var funcProto = Function.prototype; - -/** Used to resolve the decompiled source of functions. */ -var funcToString = funcProto.toString; - -/** - * Converts `func` to its source code. - * - * @private - * @param {Function} func The function to convert. - * @returns {string} Returns the source code. - */ -function toSource(func) { - if (func != null) { - try { - return funcToString.call(func); - } catch (e) {} - try { - return (func + ''); - } catch (e) {} - } - return ''; -} - -module.exports = toSource; - - -/***/ }), - -/***/ 88: -/***/ (function(module, exports) { - -/** - * Gets the value at `key` of `object`. - * - * @private - * @param {Object} [object] The object to query. - * @param {string} key The key of the property to get. - * @returns {*} Returns the property value. - */ -function getValue(object, key) { - return object == null ? undefined : object[key]; -} - -module.exports = getValue; - - -/***/ }), - -/***/ 89: -/***/ (function(module, exports) { - -/** - * Removes `key` and its value from the hash. - * - * @private - * @name delete - * @memberOf Hash - * @param {Object} hash The hash to modify. - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ -function hashDelete(key) { - var result = this.has(key) && delete this.__data__[key]; - this.size -= result ? 1 : 0; - return result; -} - -module.exports = hashDelete; - - -/***/ }), - -/***/ 9: -/***/ (function(module, exports, __webpack_require__) { - -/* WEBPACK VAR INJECTION */(function(global) {/** Detect free variable `global` from Node.js. */ -var freeGlobal = typeof global == 'object' && global && global.Object === Object && global; - -module.exports = freeGlobal; - -/* WEBPACK VAR INJECTION */}.call(exports, __webpack_require__(792))) - -/***/ }), - -/***/ 90: -/***/ (function(module, exports, __webpack_require__) { - -var nativeCreate = __webpack_require__(80); - -/** Used to stand-in for `undefined` hash values. */ -var HASH_UNDEFINED = '__lodash_hash_undefined__'; - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** Used to check objects for own properties. */ -var hasOwnProperty = objectProto.hasOwnProperty; - -/** - * Gets the hash value for `key`. - * - * @private - * @name get - * @memberOf Hash - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ -function hashGet(key) { - var data = this.__data__; - if (nativeCreate) { - var result = data[key]; - return result === HASH_UNDEFINED ? undefined : result; - } - return hasOwnProperty.call(data, key) ? data[key] : undefined; -} - -module.exports = hashGet; - - -/***/ }), - -/***/ 91: -/***/ (function(module, exports, __webpack_require__) { - -var nativeCreate = __webpack_require__(80); - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** Used to check objects for own properties. */ -var hasOwnProperty = objectProto.hasOwnProperty; - -/** - * Checks if a hash value for `key` exists. - * - * @private - * @name has - * @memberOf Hash - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ -function hashHas(key) { - var data = this.__data__; - return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key); -} - -module.exports = hashHas; - - /***/ }), /***/ 916: @@ -9526,36 +7918,6 @@ module.exports = hashHas; module.exports = "" -/***/ }), - -/***/ 92: -/***/ (function(module, exports, __webpack_require__) { - -var nativeCreate = __webpack_require__(80); - -/** Used to stand-in for `undefined` hash values. */ -var HASH_UNDEFINED = '__lodash_hash_undefined__'; - -/** - * Sets the hash `key` to `value`. - * - * @private - * @name set - * @memberOf Hash - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the hash instance. - */ -function hashSet(key, value) { - var data = this.__data__; - this.size += this.has(key) ? 0 : 1; - data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value; - return this; -} - -module.exports = hashSet; - - /***/ }), /***/ 920: @@ -9563,228 +7925,6 @@ module.exports = hashSet; module.exports = "" -/***/ }), - -/***/ 93: -/***/ (function(module, exports, __webpack_require__) { - -var listCacheClear = __webpack_require__(94), - listCacheDelete = __webpack_require__(95), - listCacheGet = __webpack_require__(98), - listCacheHas = __webpack_require__(99), - listCacheSet = __webpack_require__(100); - -/** - * Creates an list cache object. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ -function ListCache(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } -} - -// Add methods to `ListCache`. -ListCache.prototype.clear = listCacheClear; -ListCache.prototype['delete'] = listCacheDelete; -ListCache.prototype.get = listCacheGet; -ListCache.prototype.has = listCacheHas; -ListCache.prototype.set = listCacheSet; - -module.exports = ListCache; - - -/***/ }), - -/***/ 94: -/***/ (function(module, exports) { - -/** - * Removes all key-value entries from the list cache. - * - * @private - * @name clear - * @memberOf ListCache - */ -function listCacheClear() { - this.__data__ = []; - this.size = 0; -} - -module.exports = listCacheClear; - - -/***/ }), - -/***/ 95: -/***/ (function(module, exports, __webpack_require__) { - -var assocIndexOf = __webpack_require__(96); - -/** Used for built-in method references. */ -var arrayProto = Array.prototype; - -/** Built-in value references. */ -var splice = arrayProto.splice; - -/** - * Removes `key` and its value from the list cache. - * - * @private - * @name delete - * @memberOf ListCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ -function listCacheDelete(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - return false; - } - var lastIndex = data.length - 1; - if (index == lastIndex) { - data.pop(); - } else { - splice.call(data, index, 1); - } - --this.size; - return true; -} - -module.exports = listCacheDelete; - - -/***/ }), - -/***/ 96: -/***/ (function(module, exports, __webpack_require__) { - -var eq = __webpack_require__(97); - -/** - * Gets the index at which the `key` is found in `array` of key-value pairs. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} key The key to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - */ -function assocIndexOf(array, key) { - var length = array.length; - while (length--) { - if (eq(array[length][0], key)) { - return length; - } - } - return -1; -} - -module.exports = assocIndexOf; - - -/***/ }), - -/***/ 97: -/***/ (function(module, exports) { - -/** - * Performs a - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * comparison between two values to determine if they are equivalent. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * var object = { 'a': 1 }; - * var other = { 'a': 1 }; - * - * _.eq(object, object); - * // => true - * - * _.eq(object, other); - * // => false - * - * _.eq('a', 'a'); - * // => true - * - * _.eq('a', Object('a')); - * // => false - * - * _.eq(NaN, NaN); - * // => true - */ -function eq(value, other) { - return value === other || (value !== value && other !== other); -} - -module.exports = eq; - - -/***/ }), - -/***/ 98: -/***/ (function(module, exports, __webpack_require__) { - -var assocIndexOf = __webpack_require__(96); - -/** - * Gets the list cache value for `key`. - * - * @private - * @name get - * @memberOf ListCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ -function listCacheGet(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - return index < 0 ? undefined : data[index][1]; -} - -module.exports = listCacheGet; - - -/***/ }), - -/***/ 99: -/***/ (function(module, exports, __webpack_require__) { - -var assocIndexOf = __webpack_require__(96); - -/** - * Checks if a list cache value for `key` exists. - * - * @private - * @name has - * @memberOf ListCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ -function listCacheHas(key) { - return assocIndexOf(this.__data__, key) > -1; -} - -module.exports = listCacheHas; - - /***/ }), /***/ 993: diff --git a/devtools/client/debugger/new/src/actions/sources/select.js b/devtools/client/debugger/new/src/actions/sources/select.js index b4947dae3995..56eed6e5ab31 100644 --- a/devtools/client/debugger/new/src/actions/sources/select.js +++ b/devtools/client/debugger/new/src/actions/sources/select.js @@ -8,7 +8,6 @@ exports.selectSourceURL = selectSourceURL; exports.selectSource = selectSource; exports.selectLocation = selectLocation; exports.selectSpecificLocation = selectSpecificLocation; -exports.selectSpecificSource = selectSpecificSource; exports.jumpToMappedLocation = jumpToMappedLocation; exports.jumpToMappedSelectedLocation = jumpToMappedSelectedLocation; @@ -106,7 +105,7 @@ function selectSource(sourceId) { const location = (0, _location.createLocation)({ sourceId }); - return await dispatch(selectLocation(location)); + return await dispatch(selectSpecificLocation(location)); }; } /** @@ -196,22 +195,6 @@ function selectSpecificLocation(location) { */ -function selectSpecificSource(sourceId) { - return async ({ - dispatch - }) => { - const location = (0, _location.createLocation)({ - sourceId - }); - return await dispatch(selectSpecificLocation(location)); - }; -} -/** - * @memberof actions/sources - * @static - */ - - function jumpToMappedLocation(location) { return async function ({ dispatch, diff --git a/devtools/client/debugger/new/src/client/firefox/commands.js b/devtools/client/debugger/new/src/client/firefox/commands.js index 54445a9945cf..4837edb3950c 100644 --- a/devtools/client/debugger/new/src/client/firefox/commands.js +++ b/devtools/client/debugger/new/src/client/firefox/commands.js @@ -366,7 +366,7 @@ async function checkServerSupportsListWorkers() { return false; } - const deviceFront = await (0, _frontsDevice.getDeviceFront)(debuggerClient, root); + const deviceFront = await debuggerClient.mainRoot.getFront("device"); const description = await deviceFront.getDescription(); const isFennec = description.apptype === "mobile/android"; @@ -438,4 +438,4 @@ const clientCommands = { setSkipPausing }; exports.setupCommands = setupCommands; -exports.clientCommands = clientCommands; \ No newline at end of file +exports.clientCommands = clientCommands; diff --git a/devtools/client/debugger/new/src/components/Editor/Tab.js b/devtools/client/debugger/new/src/components/Editor/Tab.js index f90f2c1db37a..7547d47c079d 100644 --- a/devtools/client/debugger/new/src/components/Editor/Tab.js +++ b/devtools/client/debugger/new/src/components/Editor/Tab.js @@ -132,7 +132,7 @@ class Tab extends _react.PureComponent { render() { const { selectedSource, - selectSpecificSource, + selectSource, closeTab, source, tabSources @@ -149,7 +149,7 @@ class Tab extends _react.PureComponent { function handleTabClick(e) { e.preventDefault(); e.stopPropagation(); - return selectSpecificSource(sourceId); + return selectSource(sourceId); } const className = (0, _classnames2.default)("source-tab", { @@ -190,7 +190,7 @@ const mapStateToProps = (state, { }; exports.default = (0, _reactRedux.connect)(mapStateToProps, { - selectSpecificSource: _actions2.default.selectSpecificSource, + selectSource: _actions2.default.selectSource, closeTab: _actions2.default.closeTab, closeTabs: _actions2.default.closeTabs, togglePrettyPrint: _actions2.default.togglePrettyPrint, diff --git a/devtools/client/debugger/new/src/components/Editor/Tabs.js b/devtools/client/debugger/new/src/components/Editor/Tabs.js index 5881ed5b6acc..fa125acc66f5 100644 --- a/devtools/client/debugger/new/src/components/Editor/Tabs.js +++ b/devtools/client/debugger/new/src/components/Editor/Tabs.js @@ -67,11 +67,11 @@ class Tabs extends _react.PureComponent { this.renderDropdownSource = source => { const { - selectSpecificSource + selectSource } = this.props; const filename = (0, _source.getFilename)(source); - const onClick = () => selectSpecificSource(source.id); + const onClick = () => selectSource(source.id); return _react2.default.createElement("li", { key: source.id, @@ -206,7 +206,7 @@ const mapStateToProps = state => ({ }); exports.default = (0, _reactRedux.connect)(mapStateToProps, { - selectSpecificSource: _actions2.default.selectSpecificSource, + selectSource: _actions2.default.selectSource, moveTab: _actions2.default.moveTab, closeTab: _actions2.default.closeTab, togglePaneCollapse: _actions2.default.togglePaneCollapse, diff --git a/devtools/client/debugger/new/src/components/PrimaryPanes/SourcesTree.js b/devtools/client/debugger/new/src/components/PrimaryPanes/SourcesTree.js index 03e8e32cc4d2..8c6d18860c74 100644 --- a/devtools/client/debugger/new/src/components/PrimaryPanes/SourcesTree.js +++ b/devtools/client/debugger/new/src/components/PrimaryPanes/SourcesTree.js @@ -247,17 +247,14 @@ var _initialiseProps = function () { this.getPath = item => { const path = `${item.path}/${item.name}`; + const source = this.getSource(item); - if ((0, _sourcesTree.isDirectory)(item)) { + if (!source || (0, _sourcesTree.isDirectory)(item)) { return path; } - const source = this.getSource(item); - const blackBoxedPart = source && source.isBlackBoxed ? ":blackboxed" : ""; // Original and generated sources can point to the same path - // therefore necessary to distinguish as path is used as keys. - - const generatedPart = source && source.sourceMapURL ? ":generated" : ""; - return `${path}${blackBoxedPart}${generatedPart}`; + const blackBoxedPart = source.isBlackBoxed ? ":blackboxed" : ""; + return `${path}/${source.id}/${blackBoxedPart}`; }; this.onExpand = (item, expandedState) => { diff --git a/devtools/client/debugger/new/src/utils/dbg.js b/devtools/client/debugger/new/src/utils/dbg.js index 13dab870abfc..0db09838edfc 100644 --- a/devtools/client/debugger/new/src/utils/dbg.js +++ b/devtools/client/debugger/new/src/utils/dbg.js @@ -21,14 +21,13 @@ function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at . */ function findSource(dbg, url) { - const sources = dbg.selectors.getSources(); - const source = sources.find(s => (s.url || "").includes(url)); + const sources = dbg.selectors.getSourceList(); + return sources.find(s => (s.url || "").includes(url)); +} - if (!source) { - return; - } - - return source; +function findSources(dbg, url) { + const sources = dbg.selectors.getSourceList(); + return sources.filter(s => (s.url || "").includes(url)); } function sendPacket(dbg, packet, callback) { @@ -76,6 +75,7 @@ function setupHelper(obj) { getCM, helpers: { findSource: url => findSource(dbg, url), + findSources: url => findSources(dbg, url), evaluate: (expression, cbk) => evaluate(dbg, expression, cbk), sendPacketToThread: (packet, cbk) => sendPacketToThread(dbg, packet, cbk), sendPacket: (packet, cbk) => sendPacket(dbg, packet, cbk) diff --git a/devtools/client/debugger/new/src/utils/pause/mapScopes/index.js b/devtools/client/debugger/new/src/utils/pause/mapScopes/index.js index bf7e7eaf045b..caae3ab10e21 100644 --- a/devtools/client/debugger/new/src/utils/pause/mapScopes/index.js +++ b/devtools/client/debugger/new/src/utils/pause/mapScopes/index.js @@ -31,8 +31,13 @@ async function buildMappedScopes(source, frame, scopes, sourceMaps, client) { return null; } - const generatedAstBindings = (0, _buildGeneratedBindingList.buildGeneratedBindingList)(scopes, generatedAstScopes, frame.this); const originalRanges = await (0, _rangeMetadata.loadRangeMetadata)(source, frame, originalAstScopes, sourceMaps); + + if (hasLineMappings(originalRanges)) { + return null; + } + + const generatedAstBindings = (0, _buildGeneratedBindingList.buildGeneratedBindingList)(scopes, generatedAstScopes, frame.this); const { mappedOriginalScopes, expressionLookup @@ -104,6 +109,10 @@ function isReliableScope(scope) { return totalBindings === 0 || unknownBindings / totalBindings < 0.25; } +function hasLineMappings(ranges) { + return ranges.every(range => range.columnStart === 0 && range.columnEnd === Infinity); +} + function batchScopeMappings(originalAstScopes, source, sourceMaps) { const precalculatedRanges = new Map(); const precalculatedLocations = new Map(); // Explicitly dispatch all of the sourcemap requests synchronously up front so diff --git a/devtools/client/debugger/new/src/utils/prefs.js b/devtools/client/debugger/new/src/utils/prefs.js index 5ebc7076ed9c..9f41954d694d 100644 --- a/devtools/client/debugger/new/src/utils/prefs.js +++ b/devtools/client/debugger/new/src/utils/prefs.js @@ -20,7 +20,7 @@ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { de /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at . */ -const prefsSchemaVersion = "1.0.3"; +const prefsSchemaVersion = "1.0.4"; const pref = _devtoolsServices2.default.pref; if ((0, _devtoolsEnvironment.isDevelopment)()) { @@ -128,5 +128,6 @@ const asyncStore = exports.asyncStore = (0, _asyncStoreHelper.asyncStoreHelper)( if (prefs.debuggerPrefsSchemaVersion !== prefsSchemaVersion) { // clear pending Breakpoints prefs.pendingBreakpoints = {}; + prefs.tabs = []; prefs.debuggerPrefsSchemaVersion = prefsSchemaVersion; } \ No newline at end of file diff --git a/devtools/client/debugger/new/test/mochitest/browser_dbg-preview-source-maps.js b/devtools/client/debugger/new/test/mochitest/browser_dbg-preview-source-maps.js index 8f6aed1fc07c..b6b37c1331fc 100644 --- a/devtools/client/debugger/new/test/mochitest/browser_dbg-preview-source-maps.js +++ b/devtools/client/debugger/new/test/mochitest/browser_dbg-preview-source-maps.js @@ -60,7 +60,7 @@ add_task(async function() { ]); info(`Test that you can not preview in another original file`); - await selectSpecificSource(dbg, "output"); + await selectSource(dbg, "output"); await hoverAtPos(dbg, { line: 2, ch: 16 }); await assertNoTooltip(dbg); }); diff --git a/devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js b/devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js index 4d5e6fc31d77..f8296c0363e2 100644 --- a/devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js +++ b/devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js @@ -49,7 +49,7 @@ add_task(async function() { ok(true, "Original sources exist"); const bundleSrc = findSource(dbg, "bundle.js"); - await selectSpecificSource(dbg, bundleSrc); + await selectSource(dbg, bundleSrc); await clickGutter(dbg, 13); await waitForDispatch(dbg, "ADD_BREAKPOINT"); @@ -61,7 +61,7 @@ add_task(async function() { const entrySrc = findSource(dbg, "entry.js"); - await selectSpecificSource(dbg, entrySrc); + await selectSource(dbg, entrySrc); ok( getCM(dbg) .getValue() diff --git a/devtools/client/debugger/new/test/mochitest/browser_dbg-wasm-sourcemaps.js b/devtools/client/debugger/new/test/mochitest/browser_dbg-wasm-sourcemaps.js index 293bcea56ad7..2af1ecc4c07e 100644 --- a/devtools/client/debugger/new/test/mochitest/browser_dbg-wasm-sourcemaps.js +++ b/devtools/client/debugger/new/test/mochitest/browser_dbg-wasm-sourcemaps.js @@ -24,7 +24,7 @@ add_task(async function() { ok(true, "Original sources exist"); const mainSrc = findSource(dbg, "fib.c"); - await selectSpecificSource(dbg, mainSrc); + await selectSource(dbg, mainSrc); await addBreakpoint(dbg, "fib.c", 10); resume(dbg); diff --git a/devtools/client/debugger/new/test/mochitest/helpers.js b/devtools/client/debugger/new/test/mochitest/helpers.js index 1fbcb48cf104..abfbda51ae30 100644 --- a/devtools/client/debugger/new/test/mochitest/helpers.js +++ b/devtools/client/debugger/new/test/mochitest/helpers.js @@ -598,12 +598,6 @@ function waitForLoadedSources(dbg) { * @static */ async function selectSource(dbg, url, line) { - const source = findSource(dbg, url); - await dbg.actions.selectLocation({ sourceId: source.id, line }); - return waitForSelectedSource(dbg, url); -} - -async function selectSpecificSource(dbg, url, line) { const source = findSource(dbg, url); await dbg.actions.selectLocation({ sourceId: source.id, line }, {keepContext: false}); return waitForSelectedSource(dbg, url); diff --git a/devtools/client/framework/source-map-url-service.js b/devtools/client/framework/source-map-url-service.js index 44323a66937c..5eba1e1225ec 100644 --- a/devtools/client/framework/source-map-url-service.js +++ b/devtools/client/framework/source-map-url-service.js @@ -53,30 +53,33 @@ function SourceMapURLService(toolbox, sourceMapService) { */ SourceMapURLService.prototype._getLoadingPromise = function() { if (!this._loadingPromise) { - let styleSheetsLoadingPromise = null; - this._stylesheetsFront = this._toolbox.initStyleSheetsFront(); - if (this._stylesheetsFront) { + this._loadingPromise = (async () => { + if (this._target.isWorkerTarget) { + return; + } + this._stylesheetsFront = await this._target.getFront("stylesheets"); this._stylesheetsFront.on("stylesheet-added", this._onNewStyleSheet); - styleSheetsLoadingPromise = + const styleSheetsLoadingPromise = this._stylesheetsFront.getStyleSheets().then(sheets => { sheets.forEach(this._onNewStyleSheet); }, () => { // Ignore any protocol-based errors. }); - } - // Start fetching the sources now. - const loadingPromise = this._toolbox.threadClient.getSources().then(({sources}) => { - // Ignore errors. Register the sources we got; we can't rely on - // an event to arrive if the source actor already existed. - for (const source of sources) { - this._onSourceUpdated({source}); - } - }, e => { - // Also ignore any protocol-based errors. - }); + // Start fetching the sources now. + const loadingPromise = this._toolbox.threadClient.getSources().then(({sources}) => { + // Ignore errors. Register the sources we got; we can't rely on + // an event to arrive if the source actor already existed. + for (const source of sources) { + this._onSourceUpdated({source}); + } + }, e => { + // Also ignore any protocol-based errors. + }); - this._loadingPromise = Promise.all([styleSheetsLoadingPromise, loadingPromise]); + await styleSheetsLoadingPromise; + await loadingPromise; + })(); } return this._loadingPromise; }; diff --git a/devtools/client/framework/target.js b/devtools/client/framework/target.js index adae057d0e0a..32dc05367f7f 100644 --- a/devtools/client/framework/target.js +++ b/devtools/client/framework/target.js @@ -13,6 +13,7 @@ loader.lazyRequireGetter(this, "DebuggerClient", "devtools/shared/client/debugger-client", true); loader.lazyRequireGetter(this, "gDevTools", "devtools/client/framework/devtools", true); +loader.lazyRequireGetter(this, "getFront", "devtools/shared/protocol", true); const targets = new WeakMap(); const promiseTargets = new WeakMap(); @@ -138,6 +139,9 @@ function TabTarget(tab) { } else { this._isBrowsingContext = true; } + // Cache of already created targed-scoped fronts + // [typeName:string => Front instance] + this.fronts = new Map(); } exports.TabTarget = TabTarget; @@ -271,26 +275,21 @@ TabTarget.prototype = { return this._form; }, - // Get a promise of the root form returned by a getRoot request. This promise - // is cached. + // Get a promise of the RootActor's form get root() { - if (!this._root) { - this._root = this._getRoot(); - } - return this._root; + return this.client.mainRoot.rootForm; }, - _getRoot: function() { - return new Promise((resolve, reject) => { - this.client.mainRoot.getRoot(response => { - if (response.error) { - reject(new Error(response.error + ": " + response.message)); - return; - } - - resolve(response); - }); - }); + // Get a Front for a target-scoped actor. + // i.e. an actor served by RootActor.listTabs or RootActorActor.getTab requests + getFront(typeName) { + let front = this.fronts.get(typeName); + if (front) { + return front; + } + front = getFront(this.client, typeName, this.form); + this.fronts.set(typeName, front); + return front; }, get client() { @@ -506,7 +505,6 @@ TabTarget.prototype = { */ _setupListeners: function() { this.tab.addEventListener("TabClose", this); - this.tab.parentNode.addEventListener("TabSelect", this); this.tab.ownerDocument.defaultView.addEventListener("unload", this); this.tab.addEventListener("TabRemotenessChange", this); }, @@ -517,7 +515,6 @@ TabTarget.prototype = { _teardownListeners: function() { this._tab.ownerDocument.defaultView.removeEventListener("unload", this); this._tab.removeEventListener("TabClose", this); - this._tab.parentNode.removeEventListener("TabSelect", this); this._tab.removeEventListener("TabRemotenessChange", this); }, @@ -599,13 +596,6 @@ TabTarget.prototype = { case "unload": this.destroy(); break; - case "TabSelect": - if (this.tab.selected) { - this.emit("visible", event); - } else { - this.emit("hidden", event); - } - break; case "TabRemotenessChange": this.onRemotenessChange(); break; @@ -650,10 +640,14 @@ TabTarget.prototype = { return this._destroyer; } - this._destroyer = new Promise(resolve => { + this._destroyer = new Promise(async (resolve) => { // Before taking any action, notify listeners that destruction is imminent. this.emit("close"); + for (const [, front] of this.fronts) { + await front.destroy(); + } + if (this._tab) { this._teardownListeners(); } diff --git a/devtools/client/framework/test/browser_target_events.js b/devtools/client/framework/test/browser_target_events.js index a16bcdafa0b7..45c8c766eb3c 100644 --- a/devtools/client/framework/test/browser_target_events.js +++ b/devtools/client/framework/test/browser_target_events.js @@ -9,16 +9,6 @@ add_task(async function() { await target.makeRemote(); is(target.tab, gBrowser.selectedTab, "Target linked to the right tab."); - const hidden = once(target, "hidden"); - gBrowser.selectedTab = BrowserTestUtils.addTab(gBrowser); - await hidden; - ok(true, "Hidden event received"); - - const visible = once(target, "visible"); - gBrowser.removeCurrentTab(); - await visible; - ok(true, "Visible event received"); - const willNavigate = once(target, "will-navigate"); const navigate = once(target, "navigate"); ContentTask.spawn(gBrowser.selectedBrowser, null, () => { diff --git a/devtools/client/framework/test/browser_toolbox_tool_remote_reopen.js b/devtools/client/framework/test/browser_toolbox_tool_remote_reopen.js index ddcc5f8c5be8..66c26d8aed96 100644 --- a/devtools/client/framework/test/browser_toolbox_tool_remote_reopen.js +++ b/devtools/client/framework/test/browser_toolbox_tool_remote_reopen.js @@ -92,12 +92,21 @@ function test() { const target = await getTarget(client); await runTools(target); + const rootFronts = [...client.mainRoot.fronts.values()]; + // Actor fronts should be destroyed now that the toolbox has closed, but // look for any that remain. for (const pool of client.__pools) { if (!pool.__poolMap) { continue; } + + // Ignore the root fronts, which are top-level pools and aren't released + // on toolbox destroy, but on client close. + if (rootFronts.includes(pool)) { + continue; + } + for (const actor of pool.__poolMap.keys()) { // Bug 1056342: Profiler fails today because of framerate actor, but // this appears more complex to rework, so leave it for that bug to diff --git a/devtools/client/framework/toolbox.js b/devtools/client/framework/toolbox.js index fe5a5da6ca13..472d133c9884 100644 --- a/devtools/client/framework/toolbox.js +++ b/devtools/client/framework/toolbox.js @@ -45,10 +45,6 @@ loader.lazyRequireGetter(this, "InspectorFront", "devtools/shared/fronts/inspector", true); loader.lazyRequireGetter(this, "flags", "devtools/shared/flags"); -loader.lazyRequireGetter(this, "createPerformanceFront", - "devtools/shared/fronts/performance", true); -loader.lazyRequireGetter(this, "getPreferenceFront", - "devtools/shared/fronts/preference", true); loader.lazyRequireGetter(this, "KeyShortcuts", "devtools/client/shared/key-shortcuts"); loader.lazyRequireGetter(this, "ZoomKeys", @@ -63,12 +59,8 @@ loader.lazyRequireGetter(this, "HUDService", "devtools/client/webconsole/hudservice", true); loader.lazyRequireGetter(this, "viewSource", "devtools/client/shared/view-source"); -loader.lazyRequireGetter(this, "StyleSheetsFront", - "devtools/shared/fronts/stylesheets", true); loader.lazyRequireGetter(this, "buildHarLog", "devtools/client/netmonitor/src/har/har-builder-utils", true); -loader.lazyRequireGetter(this, "getKnownDeviceFront", - "devtools/shared/fronts/device", true); loader.lazyRequireGetter(this, "NetMonitorAPI", "devtools/client/netmonitor/src/api", true); loader.lazyRequireGetter(this, "sortPanelDefinitions", @@ -125,7 +117,6 @@ function Toolbox(target, selectedTool, hostType, contentWindow, frameId, this._initInspector = null; this._inspector = null; - this._styleSheets = null; this._netMonitorAPI = null; // Map of frames (id => frame-info) and currently selected frame id. @@ -2220,16 +2211,7 @@ Toolbox.prototype = { * client. See the definition of the preference actor for more information. */ get preferenceFront() { - if (this._preferenceFront) { - return Promise.resolve(this._preferenceFront); - } - return this.isOpen.then(() => { - return this.target.root.then(rootForm => { - const front = getPreferenceFront(this.target.client, rootForm); - this._preferenceFront = front; - return front; - }); - }); + return this.target.client.mainRoot.getFront("preference"); }, // Is the disable auto-hide of pop-ups feature available in this context? @@ -2935,22 +2917,8 @@ Toolbox.prototype = { // Destroy the profiler connection outstanding.push(this.destroyPerformance()); - // Destroy the preference front - outstanding.push(this.destroyPreference()); - - // Destroy the style sheet front. - if (this._styleSheets) { - this._styleSheets.destroy(); - this._styleSheets = null; - } - - // Destroy the device front for the current client if any. - // A given DeviceFront instance can cached and shared between different panels, so - // destroying it is the responsibility of the toolbox. - const deviceFront = getKnownDeviceFront(this.target.client); - if (deviceFront) { - deviceFront.destroy(); - } + // Reset preferences set by the toolbox + outstanding.push(this.resetPreference()); // Detach the thread detachThread(this._threadClient); @@ -3104,7 +3072,7 @@ Toolbox.prototype = { resolvePerformance = resolve; }); - this._performance = createPerformanceFront(this._target); + this._performance = this.target.getFront("performance"); await this.performance.connect(); // Emit an event when connected, but don't wait on startup for this. @@ -3135,20 +3103,9 @@ Toolbox.prototype = { }, /** - * Return the style sheets front, creating it if necessary. If the - * style sheets front is not supported by the target, returns null. + * Reset preferences set by the toolbox. */ - initStyleSheetsFront: function() { - if (!this._styleSheets && this.target.hasActor("styleSheets")) { - this._styleSheets = StyleSheetsFront(this.target.client, this.target.form); - } - return this._styleSheets; - }, - - /** - * Destroy the preferences actor when the toolbox is unloaded. - */ - async destroyPreference() { + async resetPreference() { if (!this._preferenceFront) { return; } @@ -3159,7 +3116,6 @@ Toolbox.prototype = { await this._preferenceFront.clearUserPref(DISABLE_AUTOHIDE_PREF); } - this._preferenceFront.destroy(); this._preferenceFront = null; }, diff --git a/devtools/client/performance-new/panel.js b/devtools/client/performance-new/panel.js index bc03c5d80f3c..5e1dfc3cf803 100644 --- a/devtools/client/performance-new/panel.js +++ b/devtools/client/performance-new/panel.js @@ -4,7 +4,6 @@ "use strict"; const { PerfFront } = require("devtools/shared/fronts/perf"); -const { getPreferenceFront } = require("devtools/shared/fronts/preference"); loader.lazyRequireGetter(this, "EventEmitter", "devtools/shared/event-emitter"); class PerformancePanel { @@ -32,7 +31,7 @@ class PerformancePanel { const rootForm = await this.target.root; const perfFront = new PerfFront(this.target.client, rootForm); - const preferenceFront = getPreferenceFront(this.target.client, rootForm); + const preferenceFront = this.target.client.mainRoot.getFront("preference"); this.isReady = true; this.emit("ready"); diff --git a/devtools/client/preferences/devtools-client.js b/devtools/client/preferences/devtools-client.js index 44220d721e94..d879802bcd38 100644 --- a/devtools/client/preferences/devtools-client.js +++ b/devtools/client/preferences/devtools-client.js @@ -335,8 +335,8 @@ pref("devtools.aboutdebugging.network-locations", "[]"); #endif // Map top-level await expressions in the console -#if defined(RELEASE_OR_BETA) -pref("devtools.debugger.features.map-await-expression", false); -#else +#if defined(NIGHTLY_BUILD) || defined(MOZ_DEV_EDITION) pref("devtools.debugger.features.map-await-expression", true); +#else +pref("devtools.debugger.features.map-await-expression", false); #endif diff --git a/devtools/client/styleeditor/panel.js b/devtools/client/styleeditor/panel.js index 9a9cccedf786..d94be9843408 100644 --- a/devtools/client/styleeditor/panel.js +++ b/devtools/client/styleeditor/panel.js @@ -47,7 +47,7 @@ StyleEditorPanel.prototype = { this.target.on("close", this.destroy); - this._debuggee = this._toolbox.initStyleSheetsFront(); + this._debuggee = await this._target.getFront("stylesheets"); // Initialize the CSS properties database. const {cssProperties} = await initCssProperties(this._toolbox); diff --git a/devtools/client/webide/modules/app-manager.js b/devtools/client/webide/modules/app-manager.js index a68cf22faca6..be5e62f97fbe 100644 --- a/devtools/client/webide/modules/app-manager.js +++ b/devtools/client/webide/modules/app-manager.js @@ -11,8 +11,6 @@ const {AppProjects} = require("devtools/client/webide/modules/app-projects"); const TabStore = require("devtools/client/webide/modules/tab-store"); const {AppValidator} = require("devtools/client/webide/modules/app-validator"); const {ConnectionManager, Connection} = require("devtools/shared/client/connection-manager"); -const {getDeviceFront} = require("devtools/shared/fronts/device"); -const {getPreferenceFront} = require("devtools/shared/fronts/preference"); const {RuntimeScanners} = require("devtools/client/webide/modules/runtimes"); const {RuntimeTypes} = require("devtools/client/webide/modules/runtime-types"); const {NetUtil} = require("resource://gre/modules/NetUtil.jsm"); @@ -141,7 +139,7 @@ var AppManager = exports.AppManager = { } }, - onConnectionChanged: function() { + onConnectionChanged: async function() { console.log("Connection status changed: " + this.connection.status); if (this.connection.status == Connection.Status.DISCONNECTED) { @@ -150,12 +148,21 @@ var AppManager = exports.AppManager = { if (!this.connected) { this._listTabsResponse = null; + this.deviceFront = null; + this.preferenceFront = null; } else { - this.connection.client.listTabs().then((response) => { - this._listTabsResponse = response; - this._recordRuntimeInfo(); - this.update("runtime-global-actors"); + const response = await this.connection.client.listTabs(); + // RootClient.getRoot request was introduced in FF59, but RootClient.getFront + // expects it to work. Override its root form with the listTabs results (which is + // an equivalent) in orfer to fix RootClient.getFront. + Object.defineProperty(this.connection.client.mainRoot, "rootForm", { + value: response }); + this._listTabsResponse = response; + this.deviceFront = await this.connection.client.mainRoot.getFront("device"); + this.preferenceFront = await this.connection.client.mainRoot.getFront("preference"); + this._recordRuntimeInfo(); + this.update("runtime-global-actors"); } this.update("connection"); @@ -509,20 +516,6 @@ var AppManager = exports.AppManager = { return this._listTabsResponse; }, - get deviceFront() { - if (!this._listTabsResponse) { - return null; - } - return getDeviceFront(this.connection.client, this._listTabsResponse); - }, - - get preferenceFront() { - if (!this._listTabsResponse) { - return null; - } - return getPreferenceFront(this.connection.client, this._listTabsResponse); - }, - disconnectRuntime: function() { if (!this.connected) { return Promise.resolve(); diff --git a/devtools/client/webide/test/test_fullscreenToolbox.html b/devtools/client/webide/test/test_fullscreenToolbox.html index 6a5c1a4c519d..1a51432be892 100644 --- a/devtools/client/webide/test/test_fullscreenToolbox.html +++ b/devtools/client/webide/test/test_fullscreenToolbox.html @@ -33,10 +33,13 @@ const docRuntime = getRuntimeDocument(win); win.AppManager.update("runtime-list"); + const onGlobalActors = waitForUpdate(win, "runtime-global-actors"); + const onRuntimeTargets = waitForUpdate(win, "runtime-targets"); connectToLocal(win, docRuntime); + await onGlobalActors; + await onRuntimeTargets; // Select main process - await waitForUpdate(win, "runtime-targets"); SimpleTest.executeSoon(() => { docProject.querySelectorAll("#project-panel-runtimeapps .panel-item")[0].click(); }); diff --git a/devtools/client/webide/test/test_runtime.html b/devtools/client/webide/test/test_runtime.html index 7f95879da84e..cf4d07d73353 100644 --- a/devtools/client/webide/test/test_runtime.html +++ b/devtools/client/webide/test/test_runtime.html @@ -136,11 +136,13 @@ ok(!isStopActive(), "stop button is disabled"); connectionsChanged = waitForConnectionChange("opened", 2); + const onGlobalActors = waitForUpdate(win, "runtime-global-actors"); + const onRuntimeTargets = waitForUpdate(win, "runtime-targets"); docRuntime.querySelectorAll(".runtime-panel-item-other")[1].click(); - - await waitForUpdate(win, "runtime-targets"); - await connectionsChanged; + await onGlobalActors; + await onRuntimeTargets; + is(Object.keys(DebuggerServer._connections).length, 2, "Locally connected"); ok(win.AppManager.isMainProcessDebuggable(), "Main process available"); diff --git a/devtools/server/tests/mochitest/test_device.html b/devtools/server/tests/mochitest/test_device.html index 3ada49f0c628..0c2b44c907d5 100644 --- a/devtools/server/tests/mochitest/test_device.html +++ b/devtools/server/tests/mochitest/test_device.html @@ -22,16 +22,14 @@ window.onload = function() { SimpleTest.waitForExplicitFinish(); - const {getDeviceFront} = require("devtools/shared/fronts/device"); - DebuggerServer.init(); DebuggerServer.registerAllActors(); const client = new DebuggerClient(DebuggerServer.connectPipe()); client.connect().then(function onConnect() { client.listTabs().then(function onListTabs(response) { - const d = getDeviceFront(client, response); - + return client.mainRoot.getFront("device"); + }).then(function(d) { let desc; const appInfo = Services.appinfo; const utils = window.windowUtils; diff --git a/devtools/server/tests/mochitest/test_preference.html b/devtools/server/tests/mochitest/test_preference.html index 1ae9ef0a3770..aec7ff0d52ff 100644 --- a/devtools/server/tests/mochitest/test_preference.html +++ b/devtools/server/tests/mochitest/test_preference.html @@ -22,16 +22,14 @@ function runTests() { SimpleTest.waitForExplicitFinish(); - const {getPreferenceFront} = require("devtools/shared/fronts/preference"); - DebuggerServer.init(); DebuggerServer.registerAllActors(); const client = new DebuggerClient(DebuggerServer.connectPipe()); client.connect().then(function onConnect() { client.listTabs().then(function onListTabs(response) { - const p = getPreferenceFront(client, response); - + return client.mainRoot.getFront("preference"); + }).then(function(p) { const prefs = {}; const localPref = { diff --git a/devtools/server/tests/unit/test_xpcshell_debugging.js b/devtools/server/tests/unit/test_xpcshell_debugging.js index 47121ed02591..c48b24e2dec6 100644 --- a/devtools/server/tests/unit/test_xpcshell_debugging.js +++ b/devtools/server/tests/unit/test_xpcshell_debugging.js @@ -7,8 +7,6 @@ // Test the xpcshell-test debug support. Ideally we should have this test // next to the xpcshell support code, but that's tricky... -const {getDeviceFront} = require("devtools/shared/fronts/device"); - add_task(async function() { const testFile = do_get_file("xpcshell_debugging_script.js"); @@ -23,8 +21,7 @@ add_task(async function() { await client.connect(); // Ensure that global actors are available. Just test the device actor. - const rootForm = await client.mainRoot.getRoot(); - const deviceFront = await getDeviceFront(client, rootForm); + const deviceFront = await client.mainRoot.getFront("device"); const desc = await deviceFront.getDescription(); equal(desc.geckobuildid, Services.appinfo.platformBuildID, "device actor works"); diff --git a/devtools/shared/client/debugger-client.js b/devtools/shared/client/debugger-client.js index 216a208c8d22..ef69ecf79d34 100644 --- a/devtools/shared/client/debugger-client.js +++ b/devtools/shared/client/debugger-client.js @@ -19,7 +19,6 @@ const { loader.lazyRequireGetter(this, "Authentication", "devtools/shared/security/auth"); loader.lazyRequireGetter(this, "DebuggerSocket", "devtools/shared/security/socket", true); loader.lazyRequireGetter(this, "EventEmitter", "devtools/shared/event-emitter"); -loader.lazyRequireGetter(this, "getDeviceFront", "devtools/shared/fronts/device", true); loader.lazyRequireGetter(this, "WebConsoleClient", "devtools/shared/webconsole/client", true); loader.lazyRequireGetter(this, "AddonClient", "devtools/shared/client/addon-client"); @@ -204,9 +203,7 @@ DebuggerClient.prototype = { async checkRuntimeVersion(listTabsForm) { let incompatible = null; - // Instead of requiring to pass `listTabsForm` here, - // we can call getRoot() instead, but only once Firefox ESR59 is released - const deviceFront = await getDeviceFront(this, listTabsForm); + const deviceFront = await this.mainRoot.getFront("device"); const desc = await deviceFront.getDescription(); // 1) Check for Firefox too recent on device. diff --git a/devtools/shared/client/root-client.js b/devtools/shared/client/root-client.js index 691c7b90bb3e..e3cd26e6b9f5 100644 --- a/devtools/shared/client/root-client.js +++ b/devtools/shared/client/root-client.js @@ -6,6 +6,7 @@ const { Ci } = require("chrome"); const { arg, DebuggerClient } = require("devtools/shared/client/debugger-client"); +loader.lazyRequireGetter(this, "getFront", "devtools/shared/protocol", true); /** * A RootClient object represents a root actor on the server. Each @@ -32,6 +33,23 @@ function RootClient(client, greeting) { this.actor = greeting.from; this.applicationType = greeting.applicationType; this.traits = greeting.traits; + + // Cache root form as this will always be the same value. + // + // Note that rootForm is overloaded by DebuggerClient.checkRuntimeVersion + // in order to support Front instance] + this.fronts = new Map(); } exports.RootClient = RootClient; @@ -43,7 +61,7 @@ RootClient.prototype = { * browser. This can replace usages of `listTabs` that only wanted the global actors * and didn't actually care about tabs. */ - getRoot: DebuggerClient.requester({ type: "getRoot" }), + _getRoot: DebuggerClient.requester({ type: "getRoot" }), /** * List the open tabs. @@ -259,6 +277,24 @@ RootClient.prototype = { return this.request(packet); }, + /* + * This function returns a protocol.js Front for any root actor. + * i.e. the one directly served from RootActor.listTabs or getRoot. + * + * @param String typeName + * The type name used in protocol.js's spec for this actor. + */ + async getFront(typeName) { + let front = this.fronts.get(typeName); + if (front) { + return front; + } + const rootForm = await this.rootForm; + front = getFront(this._client, typeName, rootForm); + this.fronts.set(typeName, front); + return front; + }, + /** * Description of protocol's actors and methods. * diff --git a/devtools/shared/fronts/device.js b/devtools/shared/fronts/device.js index ed40e59f20a2..b6b85d400a55 100644 --- a/devtools/shared/fronts/device.js +++ b/devtools/shared/fronts/device.js @@ -36,29 +36,4 @@ const DeviceFront = protocol.FrontClassWithSpec(deviceSpec, { }, }); -const _knownDeviceFronts = new WeakMap(); - -/** - * Retrieve the device front already created for the provided client, if available. - */ -exports.getKnownDeviceFront = function(client) { - return _knownDeviceFronts.get(client); -}; - -/** - * Only one DeviceFront is created for a given client, afterwards the instance is cached - * and returned immediately. - */ -exports.getDeviceFront = function(client, form) { - if (!form.deviceActor) { - return null; - } - - if (_knownDeviceFronts.has(client)) { - return _knownDeviceFronts.get(client); - } - - const front = new DeviceFront(client, form); - _knownDeviceFronts.set(client, front); - return front; -}; +exports.DeviceFront = DeviceFront; diff --git a/devtools/shared/fronts/performance.js b/devtools/shared/fronts/performance.js index 9ef9f444320b..f215edd14060 100644 --- a/devtools/shared/fronts/performance.js +++ b/devtools/shared/fronts/performance.js @@ -131,7 +131,3 @@ const PerformanceFront = FrontClassWithSpec(performanceSpec, { }); exports.PerformanceFront = PerformanceFront; - -exports.createPerformanceFront = function createPerformanceFront(target) { - return new PerformanceFront(target.client, target.form); -}; diff --git a/devtools/shared/fronts/preference.js b/devtools/shared/fronts/preference.js index 6fbed6513c73..dcf8f280e1c4 100644 --- a/devtools/shared/fronts/preference.js +++ b/devtools/shared/fronts/preference.js @@ -14,18 +14,4 @@ const PreferenceFront = protocol.FrontClassWithSpec(preferenceSpec, { }, }); -const _knownPreferenceFronts = new WeakMap(); - -exports.getPreferenceFront = function(client, form) { - if (!form.preferenceActor) { - return null; - } - - if (_knownPreferenceFronts.has(client)) { - return _knownPreferenceFronts.get(client); - } - - const front = new PreferenceFront(client, form); - _knownPreferenceFronts.set(client, front); - return front; -}; +exports.PreferenceFront = PreferenceFront; diff --git a/devtools/shared/protocol.js b/devtools/shared/protocol.js index 64a66950cf71..3eedc497c8b7 100644 --- a/devtools/shared/protocol.js +++ b/devtools/shared/protocol.js @@ -1642,3 +1642,15 @@ exports.dumpProtocolSpec = function() { return ret; }; + +function getFront(client, typeName, form) { + const type = types.getType(typeName); + if (!type) { + throw new Error(`No spec for front type '${typeName}'.`); + } + if (!type.frontClass) { + lazyLoadFront(typeName); + } + return type.frontClass(client, form); +} +exports.getFront = getFront; diff --git a/dom/indexedDB/IDBObjectStore.cpp b/dom/indexedDB/IDBObjectStore.cpp index f2ad610166d4..1149b4dbb6a5 100644 --- a/dom/indexedDB/IDBObjectStore.cpp +++ b/dom/indexedDB/IDBObjectStore.cpp @@ -906,12 +906,10 @@ CopyingStructuredCloneReadCallback(JSContext* aCx, if (aTag == SCTAG_DOM_BLOB) { MOZ_ASSERT(file.mType == StructuredCloneFile::eBlob); - - RefPtr blob = file.mBlob; - MOZ_ASSERT(!blob->IsFile()); + MOZ_ASSERT(!file.mBlob->IsFile()); JS::Rooted wrappedBlob(aCx); - if (NS_WARN_IF(!ToJSValue(aCx, blob, &wrappedBlob))) { + if (NS_WARN_IF(!ToJSValue(aCx, file.mBlob, &wrappedBlob))) { return nullptr; } @@ -923,19 +921,23 @@ CopyingStructuredCloneReadCallback(JSContext* aCx, if (aTag == SCTAG_DOM_FILE) { MOZ_ASSERT(file.mType == StructuredCloneFile::eBlob); - RefPtr blob = file.mBlob; - MOZ_ASSERT(blob->IsFile()); + { + // Create a scope so ~RefPtr fires before returning an unwrapped + // JS::Value. + RefPtr blob = file.mBlob; + MOZ_ASSERT(blob->IsFile()); - RefPtr file = blob->ToFile(); - MOZ_ASSERT(file); + RefPtr file = blob->ToFile(); + MOZ_ASSERT(file); - JS::Rooted wrappedFile(aCx); - if (NS_WARN_IF(!ToJSValue(aCx, file, &wrappedFile))) { - return nullptr; + JS::Rooted wrappedFile(aCx); + if (NS_WARN_IF(!ToJSValue(aCx, file, &wrappedFile))) { + return nullptr; + } + + result.set(&wrappedFile.toObject()); } - result.set(&wrappedFile.toObject()); - return result; } diff --git a/dom/ipc/ContentChild.cpp b/dom/ipc/ContentChild.cpp index fc1d2e9c1d29..511ebf884c21 100644 --- a/dom/ipc/ContentChild.cpp +++ b/dom/ipc/ContentChild.cpp @@ -109,6 +109,8 @@ #include "CubebUtils.h" #elif defined(XP_MACOSX) #include "mozilla/Sandbox.h" +#elif defined(__OpenBSD__) +#include #endif #endif @@ -1783,6 +1785,8 @@ ContentChild::RecvSetProcessSandbox(const MaybeFileDesc& aBroker) mozilla::SandboxTarget::Instance()->StartSandbox(); #elif defined(XP_MACOSX) sandboxEnabled = StartMacOSContentSandbox(); +#elif defined(__OpenBSD__) + sandboxEnabled = StartOpenBSDSandbox(GeckoProcessType_Content); #endif CrashReporter::AnnotateCrashReport( @@ -3921,6 +3925,55 @@ ContentChild::OnMessageReceived(const Message& aMsg, Message*& aReply) } // namespace dom +#if defined(__OpenBSD__) && defined(MOZ_CONTENT_SANDBOX) +#include + +static LazyLogModule sPledgeLog("SandboxPledge"); + +bool +StartOpenBSDSandbox(GeckoProcessType type) +{ + nsAutoCString promisesString; + nsAutoCString processTypeString; + + switch (type) { + case GeckoProcessType_Default: + processTypeString = "main"; + Preferences::GetCString("security.sandbox.pledge.main", + promisesString); + break; + + case GeckoProcessType_Content: + processTypeString = "content"; + Preferences::GetCString("security.sandbox.pledge.content", + promisesString); + break; + + default: + MOZ_ASSERT(false, "unknown process type"); + return false; + }; + + if (pledge(promisesString.get(), NULL) == -1) { + if (errno == EINVAL) { + MOZ_LOG(sPledgeLog, LogLevel::Error, + ("pledge promises for %s process is a malformed string: '%s'\n", + processTypeString.get(), promisesString.get())); + } else if (errno == EPERM) { + MOZ_LOG(sPledgeLog, LogLevel::Error, + ("pledge promises for %s process can't elevate privileges: '%s'\n", + processTypeString.get(), promisesString.get())); + } + return false; + } else { + MOZ_LOG(sPledgeLog, LogLevel::Debug, + ("pledged %s process with promises: '%s'\n", + processTypeString.get(), promisesString.get())); + } + return true; +} +#endif + #if !defined(XP_WIN) bool IsDevelopmentBuild() { diff --git a/dom/tests/mochitest/general/test_datatransfer_disallowed.html b/dom/tests/mochitest/general/test_datatransfer_disallowed.html index 726db26a870c..bcf6915fa1b6 100644 --- a/dom/tests/mochitest/general/test_datatransfer_disallowed.html +++ b/dom/tests/mochitest/general/test_datatransfer_disallowed.html @@ -11,7 +11,7 @@ function run_test() { SpecialPowers.pushPrefEnv({"set": [ - ["dom.datatransfer.moz", false], + ["dom.datatransfer.mozAtAPIs", false], ]}, function() { let hiddenMethods = ["mozTypesAt", "mozClearDataAt", "mozGetDataAt", "mozSetDataAt", "mozItemCount"]; let exposedMethods = Object.getOwnPropertyNames(DataTransfer.prototype); diff --git a/editor/txmgr/TransactionManagerFactory.cpp b/editor/txmgr/TransactionManagerFactory.cpp deleted file mode 100644 index 94876e8462cc..000000000000 --- a/editor/txmgr/TransactionManagerFactory.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include - -#include "mozilla/Module.h" -#include "mozilla/ModuleUtils.h" -#include "mozilla/TransactionManager.h" -#include "nsID.h" -#include "nsITransactionManager.h" - -using mozilla::TransactionManager; - -//////////////////////////////////////////////////////////////////////// -// Define the contructor function for the objects -// -// NOTE: This creates an instance of objects by using the default constructor -// -NS_GENERIC_FACTORY_CONSTRUCTOR(TransactionManager) -NS_DEFINE_NAMED_CID(NS_TRANSACTIONMANAGER_CID); - -static const mozilla::Module::CIDEntry kTxMgrCIDs[] = { - { &kNS_TRANSACTIONMANAGER_CID, false, nullptr, - TransactionManagerConstructor }, - { nullptr } -}; - -static const mozilla::Module::ContractIDEntry kTxMgrContracts[] = { - { NS_TRANSACTIONMANAGER_CONTRACTID, &kNS_TRANSACTIONMANAGER_CID }, - { nullptr } -}; - -static const mozilla::Module kTxMgrModule = { - mozilla::Module::kVersion, - kTxMgrCIDs, - kTxMgrContracts -}; - -NSMODULE_DEFN(nsTransactionManagerModule) = &kTxMgrModule; diff --git a/editor/txmgr/moz.build b/editor/txmgr/moz.build index 98957516202a..fe2ff825e383 100644 --- a/editor/txmgr/moz.build +++ b/editor/txmgr/moz.build @@ -26,7 +26,6 @@ EXPORTS.mozilla += [ UNIFIED_SOURCES += [ 'TransactionItem.cpp', 'TransactionManager.cpp', - 'TransactionManagerFactory.cpp', 'TransactionStack.cpp', ] diff --git a/editor/txmgr/nsITransactionManager.idl b/editor/txmgr/nsITransactionManager.idl index 1443eb8357b9..0baaf0423da6 100644 --- a/editor/txmgr/nsITransactionManager.idl +++ b/editor/txmgr/nsITransactionManager.idl @@ -160,14 +160,3 @@ interface nsITransactionManager : nsISupports inline mozilla::TransactionManager* AsTransactionManager(); %} }; - -%{ C++ - -#define NS_TRANSACTIONMANAGER_CONTRACTID "@mozilla.org/transactionmanager;1" - -// 9C8F9601-801A-11d2-98BA-00805F297D89 -#define NS_TRANSACTIONMANAGER_CID \ -{ 0x9c8f9601, 0x801a, 0x11d2, \ - { 0x98, 0xba, 0x0, 0x80, 0x5f, 0x29, 0x7d, 0x89 } } - -%} C++ diff --git a/editor/txmgr/tests/TestTXMgr.cpp b/editor/txmgr/tests/TestTXMgr.cpp index ccaca6c67b9a..361c9913d303 100644 --- a/editor/txmgr/tests/TestTXMgr.cpp +++ b/editor/txmgr/tests/TestTXMgr.cpp @@ -8,6 +8,9 @@ #include "nsITransactionManager.h" #include "nsComponentManagerUtils.h" #include "mozilla/Likely.h" +#include "mozilla/TransactionManager.h" + +using mozilla::TransactionManager; static int32_t sConstructorCount = 0; static int32_t sDoCount = 0; @@ -533,10 +536,7 @@ quick_test(TestTransactionFactory *factory) * *******************************************************************/ - nsresult rv; - nsCOMPtr mgr = - do_CreateInstance(NS_TRANSACTIONMANAGER_CONTRACTID, &rv); - ASSERT_TRUE(NS_SUCCEEDED(rv)); + nsCOMPtr mgr = new TransactionManager(); /******************************************************************* * @@ -544,7 +544,7 @@ quick_test(TestTransactionFactory *factory) * *******************************************************************/ - rv = mgr->DoTransaction(0); + nsresult rv = mgr->DoTransaction(0); EXPECT_EQ(rv, NS_ERROR_NULL_POINTER); /******************************************************************* @@ -1285,11 +1285,7 @@ quick_batch_test(TestTransactionFactory *factory) * *******************************************************************/ - nsresult rv; - nsCOMPtr mgr = - do_CreateInstance(NS_TRANSACTIONMANAGER_CONTRACTID, &rv); - ASSERT_TRUE(mgr); - ASSERT_TRUE(NS_SUCCEEDED(rv)); + nsCOMPtr mgr = new TransactionManager(); int32_t numitems; @@ -1300,7 +1296,7 @@ quick_batch_test(TestTransactionFactory *factory) * *******************************************************************/ - rv = mgr->GetNumberOfUndoItems(&numitems); + nsresult rv = mgr->GetNumberOfUndoItems(&numitems); EXPECT_TRUE(NS_SUCCEEDED(rv)); EXPECT_EQ(numitems, 0); @@ -1920,12 +1916,9 @@ stress_test(TestTransactionFactory *factory, int32_t iterations) * *******************************************************************/ - nsresult rv; - nsCOMPtr mgr = - do_CreateInstance(NS_TRANSACTIONMANAGER_CONTRACTID, &rv); - ASSERT_TRUE(NS_SUCCEEDED(rv)); - ASSERT_TRUE(mgr); + nsCOMPtr mgr = new TransactionManager(); + nsresult rv; int32_t i, j; for (i = 1; i <= iterations; i++) { diff --git a/gfx/thebes/gfxFcPlatformFontList.cpp b/gfx/thebes/gfxFcPlatformFontList.cpp index 1cb6cbfd6014..23e0d6e4c9b8 100644 --- a/gfx/thebes/gfxFcPlatformFontList.cpp +++ b/gfx/thebes/gfxFcPlatformFontList.cpp @@ -1543,7 +1543,7 @@ gfxFcPlatformFontList::AddFontSetFamilies(FcFontSet* aFontSet, continue; } -#ifdef MOZ_CONTENT_SANDBOX +#if defined(MOZ_CONTENT_SANDBOX) && defined (XP_LINUX) // Skip any fonts that will be blocked by the content-process sandbox // policy. if (aPolicy && !(aPolicy->Lookup(reinterpret_cast(path)) & @@ -1705,7 +1705,7 @@ gfxFcPlatformFontList::InitFontListForPlatform() UniquePtr policy; -#ifdef MOZ_CONTENT_SANDBOX +#if defined(MOZ_CONTENT_SANDBOX) && defined (XP_LINUX) // If read sandboxing is enabled, create a temporary SandboxPolicy to // check font paths; use a fake PID to avoid picking up any PID-specific // rules by accident. diff --git a/gfx/thebes/gfxFcPlatformFontList.h b/gfx/thebes/gfxFcPlatformFontList.h index 510a4a21b86a..886223134f17 100644 --- a/gfx/thebes/gfxFcPlatformFontList.h +++ b/gfx/thebes/gfxFcPlatformFontList.h @@ -340,7 +340,7 @@ public: protected: virtual ~gfxFcPlatformFontList(); -#ifdef MOZ_CONTENT_SANDBOX +#if defined(MOZ_CONTENT_SANDBOX) && defined(XP_LINUX) typedef mozilla::SandboxBroker::Policy SandboxPolicy; #else // Dummy type just so we can still have a SandboxPolicy* parameter. diff --git a/ipc/chromium/src/build/build_config.h b/ipc/chromium/src/build/build_config.h index dbbf0f3c7963..3965492aa467 100644 --- a/ipc/chromium/src/build/build_config.h +++ b/ipc/chromium/src/build/build_config.h @@ -117,7 +117,7 @@ #elif defined(__alpha__) #define ARCH_CPU_ALPHA 1 #define ARCH_CPU_64_BITS 1 -#elif defined(__aarch64__) +#elif defined(__aarch64__) || defined(_M_ARM64) #define ARCH_CPU_ARM_FAMILY 1 #define ARCH_CPU_ARM64 1 #define ARCH_CPU_64_BITS 1 diff --git a/js/public/AllocPolicy.h b/js/public/AllocPolicy.h index 7df6d24c426b..0ab9417989bb 100644 --- a/js/public/AllocPolicy.h +++ b/js/public/AllocPolicy.h @@ -128,40 +128,6 @@ class TempAllocPolicy : public AllocPolicyBase } }; -/* - * Allocation policy that uses Zone::pod_malloc and friends, so that memory - * pressure is accounted for on the zone. This is suitable for memory associated - * with GC things allocated in the zone. - * - * Since it doesn't hold a JSContext (those may not live long enough), it can't - * report out-of-memory conditions itself; the caller must check for OOM and - * take the appropriate action. - * - * FIXME bug 647103 - replace these *AllocPolicy names. - */ -class ZoneAllocPolicy -{ - JS::Zone* const zone; - - public: - MOZ_IMPLICIT ZoneAllocPolicy(JS::Zone* z) : zone(z) {} - - // These methods are defined in gc/Zone.h. - template inline T* maybe_pod_malloc(size_t numElems); - template inline T* maybe_pod_calloc(size_t numElems); - template inline T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize); - template inline T* pod_malloc(size_t numElems); - template inline T* pod_calloc(size_t numElems); - template inline T* pod_realloc(T* p, size_t oldSize, size_t newSize); - - template void free_(T* p, size_t numElems = 0) { js_free(p); } - void reportAllocOverflow() const {} - - MOZ_MUST_USE bool checkSimulatedOOM() const { - return !js::oom::ShouldFailWithOOM(); - } -}; - } /* namespace js */ #endif /* js_AllocPolicy_h */ diff --git a/js/public/GCAnnotations.h b/js/public/GCAnnotations.h index 8b47b24041e8..7b8f9becd43f 100644 --- a/js/public/GCAnnotations.h +++ b/js/public/GCAnnotations.h @@ -26,6 +26,12 @@ // is not itself a GC pointer. # define JS_HAZ_GC_INVALIDATED __attribute__((tag("Invalidated by GC"))) +// Mark a class as a base class of rooted types, eg CustomAutoRooter. All +// descendants of this class will be considered rooted, though classes that +// merely contain these as a field member will not be. "Inherited" by +// templatized types with MOZ_INHERIT_TYPE_ANNOTATIONS_FROM_TEMPLATE_ARGS +# define JS_HAZ_ROOTED_BASE __attribute__((tag("Rooted Base"))) + // Mark a type that would otherwise be considered a GC Pointer (eg because it // contains a JS::Value field) as a non-GC pointer. It is handled almost the // same in the analysis as a rooted pointer, except it will not be reported as @@ -52,6 +58,7 @@ # define JS_HAZ_GC_POINTER # define JS_HAZ_ROOTED # define JS_HAZ_GC_INVALIDATED +# define JS_HAZ_ROOTED_BASE # define JS_HAZ_NON_GC_POINTER # define JS_HAZ_GC_CALL # define JS_HAZ_GC_SUPPRESSED diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h index c39c757b9508..b3acc89287d3 100644 --- a/js/public/RootingAPI.h +++ b/js/public/RootingAPI.h @@ -945,7 +945,7 @@ class JS_PUBLIC_API(AutoGCRooter) /* No copy or assignment semantics. */ AutoGCRooter(AutoGCRooter& ida) = delete; void operator=(AutoGCRooter& ida) = delete; -}; +} JS_HAZ_ROOTED_BASE; namespace detail { diff --git a/js/src/builtin/AtomicsObject.cpp b/js/src/builtin/AtomicsObject.cpp index a421cbcf1572..3de47aa6fc75 100644 --- a/js/src/builtin/AtomicsObject.cpp +++ b/js/src/builtin/AtomicsObject.cpp @@ -689,10 +689,10 @@ js::atomics_wait(JSContext* cx, unsigned argc, Value* vp) } int64_t -js::atomics_wake_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t count) +js::atomics_notify_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t count) { // Validation should ensure this does not happen. - MOZ_ASSERT(sarb, "wake is only applicable to shared memory"); + MOZ_ASSERT(sarb, "notify is only applicable to shared memory"); AutoLockFutexAPI lock; @@ -706,7 +706,7 @@ js::atomics_wake_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t c iter = iter->lower_pri; if (c->offset != byteOffset || !c->cx->fx.isWaiting()) continue; - c->cx->fx.wake(FutexThread::WakeExplicit); + c->cx->fx.notify(FutexThread::NotifyExplicit); // Overflow will be a problem only in two cases: // (1) 128-bit systems with substantially more than 2^64 bytes of // memory per process, and a very lightweight @@ -723,7 +723,7 @@ js::atomics_wake_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t c } bool -js::atomics_wake(JSContext* cx, unsigned argc, Value* vp) +js::atomics_notify(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); HandleValue objv = args.get(0); @@ -758,7 +758,7 @@ js::atomics_wake(JSContext* cx, unsigned argc, Value* vp) (view->viewDataShared().cast().unwrap(/* arithmetic */) - sab->dataPointerShared().unwrap(/* arithmetic */)); - r.setNumber(double(atomics_wake_impl(sab->rawBufferObject(), byteOffset, count))); + r.setNumber(double(atomics_notify_impl(sab->rawBufferObject(), byteOffset, count))); return true; } @@ -830,7 +830,7 @@ js::FutexThread::isWaiting() // When a worker is awoken for an interrupt it goes into state // WaitingNotifiedForInterrupt for a short time before it actually // wakes up and goes into WaitingInterrupted. In those states the - // worker is still waiting, and if an explicit wake arrives the + // worker is still waiting, and if an explicit notify arrives the // worker transitions to Woken. See further comments in // FutexThread::wait(). return state_ == Waiting || state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt; @@ -909,14 +909,14 @@ js::FutexThread::wait(JSContext* cx, js::UniqueLock& locked, // should be woken when the interrupt handler returns. // To that end, we flag the thread as interrupted around // the interrupt and check state_ when the interrupt - // handler returns. A wake() call that reaches the + // handler returns. A notify() call that reaches the // runtime during the interrupt sets state_ to Woken. // // - It is in principle possible for wait() to be // reentered on the same thread/runtime and waiting on the // same location and to yet again be interrupted and enter // the interrupt handler. In this case, it is important - // that when another agent wakes waiters, all waiters using + // that when another agent notifies waiters, all waiters using // the same runtime on the same location are woken in LIFO // order; FIFO may be the required order, but FIFO would // fail to wake up the innermost call. Interrupts are @@ -947,25 +947,25 @@ js::FutexThread::wait(JSContext* cx, js::UniqueLock& locked, } void -js::FutexThread::wake(WakeReason reason) +js::FutexThread::notify(NotifyReason reason) { MOZ_ASSERT(isWaiting()); - if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) && reason == WakeExplicit) { + if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) && reason == NotifyExplicit) { state_ = Woken; return; } switch (reason) { - case WakeExplicit: + case NotifyExplicit: state_ = Woken; break; - case WakeForJSInterrupt: + case NotifyForJSInterrupt: if (state_ == WaitingNotifiedForInterrupt) return; state_ = WaitingNotifiedForInterrupt; break; default: - MOZ_CRASH("bad WakeReason in FutexThread::wake()"); + MOZ_CRASH("bad NotifyReason in FutexThread::notify()"); } cond_->notify_all(); } @@ -982,7 +982,8 @@ const JSFunctionSpec AtomicsMethods[] = { JS_INLINABLE_FN("xor", atomics_xor, 3,0, AtomicsXor), JS_INLINABLE_FN("isLockFree", atomics_isLockFree, 1,0, AtomicsIsLockFree), JS_FN("wait", atomics_wait, 4,0), - JS_FN("wake", atomics_wake, 3,0), + JS_FN("notify", atomics_notify, 3,0), + JS_FN("wake", atomics_notify, 3,0), // Legacy name JS_FS_END }; diff --git a/js/src/builtin/AtomicsObject.h b/js/src/builtin/AtomicsObject.h index 42ed57cc819d..e1078a158e0c 100644 --- a/js/src/builtin/AtomicsObject.h +++ b/js/src/builtin/AtomicsObject.h @@ -39,7 +39,7 @@ MOZ_MUST_USE bool atomics_or(JSContext* cx, unsigned argc, Value* vp); MOZ_MUST_USE bool atomics_xor(JSContext* cx, unsigned argc, Value* vp); MOZ_MUST_USE bool atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp); MOZ_MUST_USE bool atomics_wait(JSContext* cx, unsigned argc, Value* vp); -MOZ_MUST_USE bool atomics_wake(JSContext* cx, unsigned argc, Value* vp); +MOZ_MUST_USE bool atomics_notify(JSContext* cx, unsigned argc, Value* vp); class FutexThread { @@ -56,10 +56,10 @@ public: MOZ_MUST_USE bool initInstance(); void destroyInstance(); - // Parameters to wake(). - enum WakeReason { - WakeExplicit, // Being asked to wake up by another thread - WakeForJSInterrupt // Interrupt requested + // Parameters to notify(). + enum NotifyReason { + NotifyExplicit, // Being asked to wake up by another thread + NotifyForJSInterrupt // Interrupt requested }; // Result codes from wait() and atomics_wait_impl(). @@ -78,29 +78,27 @@ public: // times allowed; specify mozilla::Nothing() for an indefinite // wait. // - // wait() will not wake up spuriously. It will return true and - // set *result to a return code appropriate for - // Atomics.wait() on success, and return false on error. + // wait() will not wake up spuriously. MOZ_MUST_USE WaitResult wait(JSContext* cx, js::UniqueLock& locked, const mozilla::Maybe& timeout); - // Wake the thread this is associated with. + // Notify the thread this is associated with. // // The futex lock must be held around this call. (The sleeping - // thread will not wake up until the caller of Atomics.wake() + // thread will not wake up until the caller of Atomics.notify() // releases the lock.) // // If the thread is not waiting then this method does nothing. // // If the thread is waiting in a call to wait() and the - // reason is WakeExplicit then the wait() call will return + // reason is NotifyExplicit then the wait() call will return // with Woken. // // If the thread is waiting in a call to wait() and the - // reason is WakeForJSInterrupt then the wait() will return + // reason is NotifyForJSInterrupt then the wait() will return // with WaitingNotifiedForInterrupt; in the latter case the caller // of wait() must handle the interrupt. - void wake(WakeReason reason); + void notify(NotifyReason reason); bool isWaiting(); @@ -123,7 +121,7 @@ public: // interrupt handler WaitingInterrupted, // We are waiting, but have been interrupted // and are running the interrupt handler - Woken // Woken by a script call to Atomics.wake + Woken // Woken by a script call to Atomics.notify }; // Condition variable that this runtime will wait on. @@ -157,12 +155,12 @@ MOZ_MUST_USE FutexThread::WaitResult atomics_wait_impl(JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t value, const mozilla::Maybe& timeout); -// Wake some waiters on the given address. If `count` is negative then wake +// Notify some waiters on the given address. If `count` is negative then notify // all. The return value is nonnegative and is the number of waiters woken. If // the number of waiters woken exceeds INT64_MAX then this never returns. If // `count` is nonnegative then the return value is never greater than `count`. MOZ_MUST_USE int64_t -atomics_wake_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t count); +atomics_notify_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset, int64_t count); } /* namespace js */ diff --git a/js/src/builtin/DataViewObject.cpp b/js/src/builtin/DataViewObject.cpp index e4797163954e..d9f6174523a6 100644 --- a/js/src/builtin/DataViewObject.cpp +++ b/js/src/builtin/DataViewObject.cpp @@ -171,7 +171,7 @@ bool DataViewObject::constructSameCompartment(JSContext* cx, HandleObject bufobj, const CallArgs& args) { MOZ_ASSERT(args.isConstructing()); - assertSameCompartment(cx, bufobj); + cx->check(bufobj); uint32_t byteOffset, byteLength; if (!getAndCheckConstructorArgs(cx, bufobj, args, &byteOffset, &byteLength)) diff --git a/js/src/builtin/Eval.cpp b/js/src/builtin/Eval.cpp index db96a837bf12..f78aac37b23e 100644 --- a/js/src/builtin/Eval.cpp +++ b/js/src/builtin/Eval.cpp @@ -440,7 +440,7 @@ static bool ExecuteInExtensibleLexicalEnvironment(JSContext* cx, HandleScript scriptArg, HandleObject env) { CHECK_REQUEST(cx); - assertSameCompartment(cx, env); + cx->check(env); MOZ_ASSERT(IsExtensibleLexicalEnvironment(env)); MOZ_RELEASE_ASSERT(scriptArg->hasNonSyntacticScope()); @@ -518,7 +518,7 @@ JS_FRIEND_API(bool) js::ExecuteInJSMEnvironment(JSContext* cx, HandleScript scriptArg, HandleObject varEnv, AutoObjectVector& targetObj) { - assertSameCompartment(cx, varEnv); + cx->check(varEnv); MOZ_ASSERT(ObjectRealm::get(varEnv).getNonSyntacticLexicalEnvironment(varEnv)); MOZ_DIAGNOSTIC_ASSERT(scriptArg->noScriptRval()); diff --git a/js/src/builtin/MapObject.cpp b/js/src/builtin/MapObject.cpp index 5b1e4c4571ba..ef4845fcc382 100644 --- a/js/src/builtin/MapObject.cpp +++ b/js/src/builtin/MapObject.cpp @@ -1711,7 +1711,7 @@ RetT CallObjFunc(RetT(*ObjFunc)(JSContext*, HandleObject), JSContext* cx, HandleObject obj) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); // Always unwrap, in case this is an xray or cross-compartment wrapper. RootedObject unwrappedObj(cx); @@ -1729,7 +1729,7 @@ CallObjFunc(bool(*ObjFunc)(JSContext *cx, HandleObject obj, HandleValue key, boo JSContext *cx, HandleObject obj, HandleValue key, bool *rval) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, key); + cx->check(obj, key); // Always unwrap, in case this is an xray or cross-compartment wrapper. RootedObject unwrappedObj(cx); @@ -1754,7 +1754,7 @@ CallObjFunc(bool(*ObjFunc)(JSContext* cx, Iter kind, JSContext *cx, Iter iterType, HandleObject obj, MutableHandleValue rval) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); // Always unwrap, in case this is an xray or cross-compartment wrapper. RootedObject unwrappedObj(cx); @@ -1794,7 +1794,7 @@ JS_PUBLIC_API(bool) JS::MapGet(JSContext* cx, HandleObject obj, HandleValue key, MutableHandleValue rval) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, key, rval); + cx->check(obj, key, rval); // Unwrap the object, and enter its realm. If object isn't wrapped, // this is essentially a noop. @@ -1825,7 +1825,7 @@ JS_PUBLIC_API(bool) JS::MapSet(JSContext *cx, HandleObject obj, HandleValue key, HandleValue val) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, key, val); + cx->check(obj, key, val); // Unwrap the object, and enter its compartment. If object isn't wrapped, // this is essentially a noop. @@ -1906,7 +1906,7 @@ JS_PUBLIC_API(bool) JS::SetAdd(JSContext *cx, HandleObject obj, HandleValue key) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, key); + cx->check(obj, key); // Unwrap the object, and enter its compartment. If object isn't wrapped, // this is essentially a noop. diff --git a/js/src/builtin/ModuleObject.cpp b/js/src/builtin/ModuleObject.cpp index 1c39b38bb2da..cadfe92c7410 100644 --- a/js/src/builtin/ModuleObject.cpp +++ b/js/src/builtin/ModuleObject.cpp @@ -1192,8 +1192,6 @@ GlobalObject::initModuleProto(JSContext* cx, Handle global) static const JSFunctionSpec protoFunctions[] = { JS_SELF_HOSTED_FN("getExportedNames", "ModuleGetExportedNames", 1, 0), JS_SELF_HOSTED_FN("resolveExport", "ModuleResolveExport", 2, 0), - JS_SELF_HOSTED_FN("declarationInstantiation", "ModuleInstantiate", 0, 0), - JS_SELF_HOSTED_FN("evaluation", "ModuleEvaluate", 0, 0), JS_FS_END }; diff --git a/js/src/builtin/ModuleObject.h b/js/src/builtin/ModuleObject.h index 68b9607ff08c..b3e7090c45c9 100644 --- a/js/src/builtin/ModuleObject.h +++ b/js/src/builtin/ModuleObject.h @@ -12,6 +12,7 @@ #include "jsapi.h" #include "builtin/SelfHostingDefines.h" +#include "gc/Zone.h" #include "js/GCVector.h" #include "js/Id.h" #include "js/UniquePtr.h" diff --git a/js/src/builtin/Profilers.cpp b/js/src/builtin/Profilers.cpp index 7507c4925d2e..843d7849320e 100644 --- a/js/src/builtin/Profilers.cpp +++ b/js/src/builtin/Profilers.cpp @@ -400,7 +400,7 @@ static const JSFunctionSpec profiling_functions[] = { JS_PUBLIC_API(bool) JS_DefineProfilingFunctions(JSContext* cx, HandleObject obj) { - assertSameCompartment(cx, obj); + cx->check(obj); #ifdef MOZ_PROFILING return JS_DefineFunctions(cx, obj, profiling_functions); #else diff --git a/js/src/builtin/Promise.cpp b/js/src/builtin/Promise.cpp index 407dd01a7581..f52a99082339 100644 --- a/js/src/builtin/Promise.cpp +++ b/js/src/builtin/Promise.cpp @@ -232,9 +232,9 @@ NewPromiseAllDataHolder(JSContext* cx, HandleObject resultPromise, HandleValue v if (!dataHolder) return nullptr; - assertSameCompartment(cx, resultPromise); - assertSameCompartment(cx, valuesArray); - assertSameCompartment(cx, resolve); + cx->check(resultPromise); + cx->check(valuesArray); + cx->check(resolve); dataHolder->setFixedSlot(PromiseAllDataHolderSlot_Promise, ObjectValue(*resultPromise)); dataHolder->setFixedSlot(PromiseAllDataHolderSlot_RemainingElements, Int32Value(1)); @@ -772,7 +772,7 @@ static bool Promise_then_impl(JSContext* cx, HandleValue promiseVal, HandleValue static MOZ_MUST_USE bool ResolvePromiseInternal(JSContext* cx, HandleObject promise, HandleValue resolutionVal) { - assertSameCompartment(cx, promise, resolutionVal); + cx->check(promise, resolutionVal); MOZ_ASSERT(!IsSettledMaybeWrappedPromise(promise)); // Step 7 (reordered). @@ -935,7 +935,7 @@ EnqueuePromiseReactionJob(JSContext* cx, HandleObject reactionObj, // Must not enqueue a reaction job more than once. MOZ_ASSERT(reaction->targetState() == JS::PromiseState::Pending); - assertSameCompartment(cx, handlerArg); + cx->check(handlerArg); reaction->setTargetStateAndHandlerArg(targetState, handlerArg); RootedValue reactionVal(cx, ObjectValue(*reaction)); @@ -1637,7 +1637,7 @@ PromiseResolveBuiltinThenableJob(JSContext* cx, unsigned argc, Value* vp) RootedObject promise(cx, &job->getExtendedSlot(BuiltinThenableJobSlot_Promise).toObject()); RootedObject thenable(cx, &job->getExtendedSlot(BuiltinThenableJobSlot_Thenable).toObject()); - assertSameCompartment(cx, promise, thenable); + cx->check(promise, thenable); MOZ_ASSERT(promise->is()); MOZ_ASSERT(thenable->is()); @@ -1740,7 +1740,7 @@ static MOZ_MUST_USE bool EnqueuePromiseResolveThenableBuiltinJob(JSContext* cx, HandleObject promiseToResolve, HandleObject thenable) { - assertSameCompartment(cx, promiseToResolve, thenable); + cx->check(promiseToResolve, thenable); MOZ_ASSERT(promiseToResolve->is()); MOZ_ASSERT(thenable->is()); @@ -2156,7 +2156,7 @@ js::GetWaitForAllPromise(JSContext* cx, const JS::AutoObjectVector& promises) #ifdef DEBUG for (size_t i = 0, len = promises.length(); i < len; i++) { JSObject* obj = promises[i]; - assertSameCompartment(cx, obj); + cx->check(obj); MOZ_ASSERT(UncheckedUnwrap(obj)->is()); } #endif @@ -2285,9 +2285,9 @@ RunResolutionFunction(JSContext *cx, HandleObject resolutionFun, HandleValue res // subclass constructor passes null/undefined to `super()`.) // There are also reactions where the Promise itself is missing. For // those, there's nothing left to do here. - assertSameCompartment(cx, resolutionFun); - assertSameCompartment(cx, result); - assertSameCompartment(cx, promiseObj); + cx->check(resolutionFun); + cx->check(result); + cx->check(promiseObj); if (resolutionFun) { RootedValue calleeOrRval(cx, ObjectValue(*resolutionFun)); return Call(cx, calleeOrRval, UndefinedHandleValue, result, &calleeOrRval); @@ -3080,12 +3080,12 @@ NewReactionRecord(JSContext* cx, Handle resultCapability, if (!reaction) return nullptr; - assertSameCompartment(cx, resultCapability.promise()); - assertSameCompartment(cx, onFulfilled); - assertSameCompartment(cx, onRejected); - assertSameCompartment(cx, resultCapability.resolve()); - assertSameCompartment(cx, resultCapability.reject()); - assertSameCompartment(cx, incumbentGlobalObject); + cx->check(resultCapability.promise()); + cx->check(onFulfilled); + cx->check(onRejected); + cx->check(resultCapability.resolve()); + cx->check(resultCapability.reject()); + cx->check(incumbentGlobalObject); reaction->setFixedSlot(ReactionRecordSlot_Promise, ObjectOrNullValue(resultCapability.promise())); @@ -3160,7 +3160,7 @@ static MOZ_MUST_USE bool OriginalPromiseThenWithoutSettleHandlers(JSContext* cx, Handle promise, Handle promiseToResolve) { - assertSameCompartment(cx, promise); + cx->check(promise); // Steps 3-4. Rooted resultCapability(cx); @@ -3187,7 +3187,7 @@ static bool OriginalPromiseThenBuiltin(JSContext* cx, HandleValue promiseVal, HandleValue onFulfilled, HandleValue onRejected, MutableHandleValue rval, bool rvalUsed) { - assertSameCompartment(cx, promiseVal, onFulfilled, onRejected); + cx->check(promiseVal, onFulfilled, onRejected); MOZ_ASSERT(CanCallOriginalPromiseThenBuiltin(cx, promiseVal)); Rooted promise(cx, &promiseVal.toObject().as()); diff --git a/js/src/builtin/RegExp.cpp b/js/src/builtin/RegExp.cpp index e6a44c4af1f4..79696d3d21e4 100644 --- a/js/src/builtin/RegExp.cpp +++ b/js/src/builtin/RegExp.cpp @@ -186,7 +186,7 @@ CheckPatternSyntax(JSContext* cx, HandleAtom pattern, RegExpFlag flags) // If we already have a RegExpShared for this pattern/flags, we can // avoid the much slower CheckPatternSyntaxSlow call. - if (RegExpShared* shared = cx->zone()->regExps.maybeGet(pattern, flags)) { + if (RegExpShared* shared = cx->zone()->regExps().maybeGet(pattern, flags)) { #ifdef DEBUG // Assert the pattern is valid. if (!CheckPatternSyntaxSlow(cx, pattern, flags)) { @@ -202,7 +202,7 @@ CheckPatternSyntax(JSContext* cx, HandleAtom pattern, RegExpFlag flags) // Allocate and return a new RegExpShared so we will hit the fast path // next time. - return cx->zone()->regExps.get(cx, pattern, flags); + return cx->zone()->regExps().get(cx, pattern, flags); } /* diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp index f9ba87cffbd4..1db760b8c4fc 100644 --- a/js/src/builtin/TestingFunctions.cpp +++ b/js/src/builtin/TestingFunctions.cpp @@ -205,6 +205,22 @@ GetBuildConfiguration(JSContext* cx, unsigned argc, Value* vp) if (!JS_SetProperty(cx, info, "arm64-simulator", value)) return false; +#ifdef JS_SIMULATOR_MIPS32 + value = BooleanValue(true); +#else + value = BooleanValue(false); +#endif + if (!JS_SetProperty(cx, info, "mips32-simulator", value)) + return false; + +#ifdef JS_SIMULATOR_MIPS64 + value = BooleanValue(true); +#else + value = BooleanValue(false); +#endif + if (!JS_SetProperty(cx, info, "mips64-simulator", value)) + return false; + #ifdef MOZ_ASAN value = BooleanValue(true); #else @@ -4397,96 +4413,6 @@ SetRNGState(JSContext* cx, unsigned argc, Value* vp) } #endif -static ModuleEnvironmentObject* -GetModuleEnvironment(JSContext* cx, HandleModuleObject module) -{ - // Use the initial environment so that tests can check bindings exists - // before they have been instantiated. - RootedModuleEnvironmentObject env(cx, &module->initialEnvironment()); - MOZ_ASSERT(env); - return env; -} - -static bool -GetModuleEnvironmentNames(JSContext* cx, unsigned argc, Value* vp) -{ - CallArgs args = CallArgsFromVp(argc, vp); - if (args.length() != 1) { - JS_ReportErrorASCII(cx, "Wrong number of arguments"); - return false; - } - - if (!args[0].isObject() || !args[0].toObject().is()) { - JS_ReportErrorASCII(cx, "First argument should be a ModuleObject"); - return false; - } - - RootedModuleObject module(cx, &args[0].toObject().as()); - if (module->hadEvaluationError()) { - JS_ReportErrorASCII(cx, "Module environment unavailable"); - return false; - } - - RootedModuleEnvironmentObject env(cx, GetModuleEnvironment(cx, module)); - Rooted ids(cx, IdVector(cx)); - if (!JS_Enumerate(cx, env, &ids)) - return false; - - uint32_t length = ids.length(); - RootedArrayObject array(cx, NewDenseFullyAllocatedArray(cx, length)); - if (!array) - return false; - - array->setDenseInitializedLength(length); - for (uint32_t i = 0; i < length; i++) - array->initDenseElement(i, StringValue(JSID_TO_STRING(ids[i]))); - - args.rval().setObject(*array); - return true; -} - -static bool -GetModuleEnvironmentValue(JSContext* cx, unsigned argc, Value* vp) -{ - CallArgs args = CallArgsFromVp(argc, vp); - if (args.length() != 2) { - JS_ReportErrorASCII(cx, "Wrong number of arguments"); - return false; - } - - if (!args[0].isObject() || !args[0].toObject().is()) { - JS_ReportErrorASCII(cx, "First argument should be a ModuleObject"); - return false; - } - - if (!args[1].isString()) { - JS_ReportErrorASCII(cx, "Second argument should be a string"); - return false; - } - - RootedModuleObject module(cx, &args[0].toObject().as()); - if (module->hadEvaluationError()) { - JS_ReportErrorASCII(cx, "Module environment unavailable"); - return false; - } - - RootedModuleEnvironmentObject env(cx, GetModuleEnvironment(cx, module)); - RootedString name(cx, args[1].toString()); - RootedId id(cx); - if (!JS_StringToId(cx, name, &id)) - return false; - - if (!GetProperty(cx, env, env, id, args.rval())) - return false; - - if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) { - ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id); - return false; - } - - return true; -} - #ifdef DEBUG static const char* AssertionTypeToString(irregexp::RegExpAssertion::AssertionType type) @@ -6002,14 +5928,6 @@ gc::ZealModeHelpText), " Set this compartment's RNG state.\n"), #endif - JS_FN_HELP("getModuleEnvironmentNames", GetModuleEnvironmentNames, 1, 0, -"getModuleEnvironmentNames(module)", -" Get the list of a module environment's bound names for a specified module.\n"), - - JS_FN_HELP("getModuleEnvironmentValue", GetModuleEnvironmentValue, 2, 0, -"getModuleEnvironmentValue(module, name)", -" Get the value of a bound name in a module environment.\n"), - #if defined(FUZZING) && defined(__AFL_COMPILER) JS_FN_HELP("aflloop", AflLoop, 1, 0, "aflloop(max_cnt)", diff --git a/js/src/builtin/WeakMapObject.cpp b/js/src/builtin/WeakMapObject.cpp index b1d74bb50dd2..490484df85c4 100644 --- a/js/src/builtin/WeakMapObject.cpp +++ b/js/src/builtin/WeakMapObject.cpp @@ -204,7 +204,7 @@ JS::GetWeakMapEntry(JSContext* cx, HandleObject mapObj, HandleObject key, MutableHandleValue rval) { CHECK_REQUEST(cx); - assertSameCompartment(cx, key); + cx->check(key); rval.setUndefined(); ObjectValueMap* map = mapObj->as().getMap(); if (!map) @@ -223,7 +223,7 @@ JS::SetWeakMapEntry(JSContext* cx, HandleObject mapObj, HandleObject key, HandleValue val) { CHECK_REQUEST(cx); - assertSameCompartment(cx, key, val); + cx->check(key, val); Handle rootedMap = mapObj.as(); return WeakCollectionPutEntryInternal(cx, rootedMap, key, val); } diff --git a/js/src/devtools/rootAnalysis/analyzeRoots.js b/js/src/devtools/rootAnalysis/analyzeRoots.js index 7797febb3051..b46fed1e0595 100644 --- a/js/src/devtools/rootAnalysis/analyzeRoots.js +++ b/js/src/devtools/rootAnalysis/analyzeRoots.js @@ -222,6 +222,9 @@ function edgeUsesVariable(edge, variable, body) case "Loop": return 0; + case "Assembly": + return 0; + default: assert(false); } diff --git a/js/src/devtools/rootAnalysis/computeCallgraph.js b/js/src/devtools/rootAnalysis/computeCallgraph.js index 8d27601de45d..0e0ae47b16d3 100644 --- a/js/src/devtools/rootAnalysis/computeCallgraph.js +++ b/js/src/devtools/rootAnalysis/computeCallgraph.js @@ -176,20 +176,9 @@ function process(functionName, functionBodies) if (markerPos > 0) { var inChargeXTor = functionName.replace(internalMarker, ""); print("D " + memo(inChargeXTor) + " " + memo(functionName)); - - // Bug 1056410: Oh joy. GCC does something even funkier internally, - // where it generates calls to ~Foo() but a body for ~Foo(int32) even - // though it uses the same mangled name for both. So we need to add a - // synthetic edge from ~Foo() -> ~Foo(int32). - // - // inChargeXTor will have the (int32). - if (functionName.indexOf("::~") > 0) { - var calledDestructor = inChargeXTor.replace("(int32)", "()"); - print("D " + memo(calledDestructor) + " " + memo(inChargeXTor)); - } } - // Further note: from http://mentorembedded.github.io/cxx-abi/abi.html the + // Further note: from https://itanium-cxx-abi.github.io/cxx-abi/abi.html the // different kinds of constructors/destructors are: // C1 # complete object constructor // C2 # base object constructor @@ -210,18 +199,35 @@ function process(functionName, functionBodies) // inject an edge to it from C1, C2, and C3 (or D1, D2, and D3). (Note that // C3 isn't even used in current GCC, but add the edge anyway just in // case.) - if (functionName.indexOf("C4E") != -1 || functionName.indexOf("D4Ev") != -1) { + // + // from gcc/cp/mangle.c: + // + // ::= D0 # deleting (in-charge) destructor + // ::= D1 # complete object (in-charge) destructor + // ::= D2 # base object (not-in-charge) destructor + // ::= C1 # complete object constructor + // ::= C2 # base object constructor + // ::= C3 # complete object allocating constructor + // + // Currently, allocating constructors are never used. + // + if (functionName.indexOf("C4") != -1) { var [ mangled, unmangled ] = splitFunction(functionName); // E terminates the method name (and precedes the method parameters). // If eg "C4E" shows up in the mangled name for another reason, this - // will create bogus edges in the callgraph. But will affect little and - // is somewhat difficult to avoid, so we will live with it. - for (let [synthetic, variant] of [['C4E', 'C1E'], - ['C4E', 'C2E'], - ['C4E', 'C3E'], - ['D4Ev', 'D1Ev'], - ['D4Ev', 'D2Ev'], - ['D4Ev', 'D3Ev']]) + // will create bogus edges in the callgraph. But it will affect little + // and is somewhat difficult to avoid, so we will live with it. + // + // Another possibility! A templatized constructor will contain C4I...E + // for template arguments. + // + for (let [synthetic, variant] of [ + ['C4E', 'C1E'], + ['C4E', 'C2E'], + ['C4E', 'C3E'], + ['C4I', 'C1I'], + ['C4I', 'C2I'], + ['C4I', 'C3I']]) { if (mangled.indexOf(synthetic) == -1) continue; @@ -231,6 +237,30 @@ function process(functionName, functionBodies) print("D " + memo(variant_full) + " " + memo(functionName)); } } + + // For destructors: + // + // I've never seen D4Ev() + D4Ev(int32), only one or the other. So + // for a D4Ev of any sort, create: + // + // D0() -> D1() # deleting destructor calls complete destructor, then deletes + // D1() -> D2() # complete destructor calls base destructor, then destroys virtual bases + // D2() -> D4(?) # base destructor might be aliased to unified destructor + // # use whichever one is defined, in-charge or not. + // # ('?') means either () or (int32). + // + // Note that this doesn't actually make sense -- D0 and D1 should be + // in-charge, but gcc doesn't seem to give them the in-charge parameter?! + // + if (functionName.indexOf("D4Ev") != -1 && functionName.indexOf("::~") != -1) { + const not_in_charge_dtor = functionName.replace("(int32)", "()"); + const D0 = not_in_charge_dtor.replace("D4Ev", "D0Ev"); + const D1 = not_in_charge_dtor.replace("D4Ev", "D1Ev"); + const D2 = not_in_charge_dtor.replace("D4Ev", "D2Ev"); + print("D " + memo(D0) + " " + memo(D1)); + print("D " + memo(D1) + " " + memo(D2)); + print("D " + memo(D2) + " " + memo(functionName)); + } } for (var nameIndex = minStream; nameIndex <= maxStream; nameIndex++) { diff --git a/js/src/devtools/rootAnalysis/computeGCTypes.js b/js/src/devtools/rootAnalysis/computeGCTypes.js index 337adfabfed9..db4419a941fa 100644 --- a/js/src/devtools/rootAnalysis/computeGCTypes.js +++ b/js/src/devtools/rootAnalysis/computeGCTypes.js @@ -15,6 +15,7 @@ var typeInfo = { 'NonGCPointers': {}, 'RootedGCThings': {}, 'RootedPointers': {}, + 'RootedBases': {'JS::AutoGCRooter': true}, // RAII types within which we should assume GC is suppressed, eg // AutoSuppressGC. @@ -36,6 +37,26 @@ var rootedPointers = {}; function processCSU(csu, body) { + for (let { 'Name': [ annType, tag ] } of (body.Annotation || [])) { + if (annType != 'Tag') + continue; + + if (tag == 'GC Pointer') + typeInfo.GCPointers.push(csu); + else if (tag == 'Invalidated by GC') + typeInfo.GCPointers.push(csu); + else if (tag == 'GC Thing') + typeInfo.GCThings.push(csu); + else if (tag == 'Suppressed GC Pointer') + typeInfo.NonGCPointers[csu] = true; + else if (tag == 'Rooted Pointer') + typeInfo.RootedPointers[csu] = true; + else if (tag == 'Rooted Base') + typeInfo.RootedBases[csu] = true; + else if (tag == 'Suppress GC') + typeInfo.GCSuppressors[csu] = true; + } + for (let { 'Base': base } of (body.CSUBaseClass || [])) addBaseClass(csu, base); @@ -52,31 +73,8 @@ function processCSU(csu, body) if (target.Kind == "CSU") addNestedStructure(csu, target.Name, fieldName); } - if (type.Kind == "CSU") { - // Ignore nesting in classes which are AutoGCRooters. We only consider - // types with fields that may not be properly rooted. - if (type.Name == "JS::AutoGCRooter" || type.Name == "JS::CustomAutoRooter") - return; + if (type.Kind == "CSU") addNestedStructure(csu, type.Name, fieldName); - } - } - - for (let { 'Name': [ annType, tag ] } of (body.Annotation || [])) { - if (annType != 'Tag') - continue; - - if (tag == 'GC Pointer') - typeInfo.GCPointers.push(csu); - else if (tag == 'Invalidated by GC') - typeInfo.GCPointers.push(csu); - else if (tag == 'GC Thing') - typeInfo.GCThings.push(csu); - else if (tag == 'Suppressed GC Pointer') - typeInfo.NonGCPointers[csu] = true; - else if (tag == 'Rooted Pointer') - typeInfo.RootedPointers[csu] = true; - else if (tag == 'Suppress GC') - typeInfo.GCSuppressors[csu] = true; } } @@ -86,6 +84,8 @@ function addNestedStructure(csu, inner, field) if (!(inner in structureParents)) structureParents[inner] = []; + // Skip fields that are really base classes, to avoid duplicating the base + // fields; addBaseClass already added a "base-N" name. if (field.match(/^field:\d+$/) && (csu in baseClasses) && (baseClasses[csu].indexOf(inner) != -1)) return; @@ -140,6 +140,16 @@ for (const csu of typeInfo.GCThings) for (const csu of typeInfo.GCPointers) addGCPointer(csu); +// Everything that inherits from a "Rooted Base" is considered to be rooted. +// This is for things like CustomAutoRooter and its subclasses. +var basework = Object.keys(typeInfo.RootedBases); +while (basework.length) { + const base = basework.pop(); + typeInfo.RootedPointers[base] = true; + if (base in subClasses) + basework.push(...subClasses[base]); +} + // "typeName is a (pointer to a)^'typePtrLevel' GC type because it contains a field // named 'child' of type 'why' (or pointer to 'why' if fieldPtrLevel == 1), which is // itself a GCThing or GCPointer." @@ -230,7 +240,7 @@ function addGCPointer(typeName) // Call a function for a type and every type that contains the type in a field // or as a base class (which internally is pretty much the same thing -- -// sublcasses are structs beginning with the base class and adding on their +// subclasses are structs beginning with the base class and adding on their // local fields.) function foreachContainingStruct(typeName, func, seen = new Set()) { diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp index 9a4780fe88b8..a6edc9a5e9d2 100644 --- a/js/src/ds/LifoAlloc.cpp +++ b/js/src/ds/LifoAlloc.cpp @@ -41,15 +41,6 @@ BumpChunk::newWithCapacity(size_t size, bool protect) return result; } -bool -BumpChunk::canAlloc(size_t n) -{ - uint8_t* aligned = AlignPtr(bump_); - uint8_t* newBump = aligned + n; - // bump_ <= newBump, is necessary to catch overflow. - return bump_ <= newBump && newBump <= capacity_; -} - #ifdef LIFO_CHUNK_PROTECT static const uint8_t* @@ -186,25 +177,25 @@ LifoAlloc::newChunkWithCapacity(size_t n) MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope."); // Compute the size which should be requested in order to be able to fit |n| - // bytes in the newly allocated chunk, or default the |defaultChunkSize_|. - size_t defaultChunkFreeSpace = defaultChunkSize_ - detail::BumpChunkReservedSpace; - size_t chunkSize; - if (n > defaultChunkFreeSpace) { - MOZ_ASSERT(defaultChunkFreeSpace < defaultChunkSize_); - size_t allocSizeWithCanaries = n + (defaultChunkSize_ - defaultChunkFreeSpace); + // bytes in a newly allocated chunk, or default to |defaultChunkSize_|. + uint8_t* u8begin = nullptr; + uint8_t* u8end = u8begin + detail::BumpChunkReservedSpace; + u8end = detail::BumpChunk::nextAllocEnd(detail::BumpChunk::nextAllocBase(u8end), n); + size_t allocSizeWithCanaries = u8end - u8begin; - // Guard for overflow. - if (allocSizeWithCanaries < n || - (allocSizeWithCanaries & (size_t(1) << (BitSize::value - 1)))) - { - return nullptr; - } - - chunkSize = RoundUpPow2(allocSizeWithCanaries); - } else { - chunkSize = defaultChunkSize_; + // Guard for overflow. + if (allocSizeWithCanaries < n || + (allocSizeWithCanaries & (size_t(1) << (BitSize::value - 1)))) + { + return nullptr; } + size_t chunkSize; + if (allocSizeWithCanaries > defaultChunkSize_) + chunkSize = RoundUpPow2(allocSizeWithCanaries); + else + chunkSize = defaultChunkSize_; + bool protect = false; #ifdef LIFO_CHUNK_PROTECT protect = protect_; diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h index 739e646563e0..6a9fc416c025 100644 --- a/js/src/ds/LifoAlloc.h +++ b/js/src/ds/LifoAlloc.h @@ -224,7 +224,7 @@ class BumpChunk : public SingleLinkedListElement static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966); #endif -#if defined(DEBUG) || defined(MOZ_ASAN) +#if defined(DEBUG) # define LIFO_CHUNK_PROTECT 1 #endif @@ -250,6 +250,7 @@ class BumpChunk : public SingleLinkedListElement do { \ uint8_t* base = (addr); \ size_t sz = (size); \ + MOZ_MAKE_MEM_UNDEFINED(base, sz); \ memset(base, undefinedChunkMemory, sz); \ MOZ_MAKE_MEM_NOACCESS(base, sz); \ } while (0) @@ -269,6 +270,13 @@ class BumpChunk : public SingleLinkedListElement # define LIFO_MAKE_MEM_UNDEFINED(addr, size) MOZ_MAKE_MEM_UNDEFINED((addr), (size)) #endif +#ifdef LIFO_HAVE_MEM_CHECKS + // Red zone reserved after each allocation. + static constexpr size_t RedZoneSize = 16; +#else + static constexpr size_t RedZoneSize = 0; +#endif + void assertInvariants() { MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber); MOZ_ASSERT(begin() <= end()); @@ -319,10 +327,15 @@ class BumpChunk : public SingleLinkedListElement MOZ_ASSERT(newBump <= capacity_); #if defined(LIFO_HAVE_MEM_CHECKS) // Poison/Unpoison memory that we just free'd/allocated. - if (bump_ > newBump) + if (bump_ > newBump) { LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump); - else if (newBump > bump_) - LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - bump_); + } else if (newBump > bump_) { + MOZ_ASSERT(newBump - RedZoneSize >= bump_); + LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_); + // The area [newBump - RedZoneSize .. newBump[ is already flagged as + // no-access either with the previous if-branch or with the + // BumpChunk constructor. No need to mark it twice. + } #endif bump_ = newBump; } @@ -417,13 +430,27 @@ class BumpChunk : public SingleLinkedListElement setBump(m.bump_); } + // Given a bump chunk pointer, find the next base/end pointers. This is + // useful for having consistent allocations, and iterating over known size + // allocations. + static uint8_t* nextAllocBase(uint8_t* e) { + return detail::AlignPtr(e); + } + static uint8_t* nextAllocEnd(uint8_t* b, size_t n) { + return b + n + RedZoneSize; + } + // Returns true, if the unused space is large enough for an allocation of // |n| bytes. - bool canAlloc(size_t n); + bool canAlloc(size_t n) const { + uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n); + // bump_ <= newBump, is necessary to catch overflow. + return bump_ <= newBump && newBump <= capacity_; + } // Space remaining in the current chunk. size_t unused() const { - uint8_t* aligned = AlignPtr(end()); + uint8_t* aligned = nextAllocBase(end()); if (aligned < capacity_) return capacity_ - aligned; return 0; @@ -432,8 +459,8 @@ class BumpChunk : public SingleLinkedListElement // Try to perform an allocation of size |n|, returns nullptr if not possible. MOZ_ALWAYS_INLINE void* tryAlloc(size_t n) { - uint8_t* aligned = AlignPtr(end()); - uint8_t* newBump = aligned + n; + uint8_t* aligned = nextAllocBase(end()); + uint8_t* newBump = nextAllocEnd(aligned, n); if (newBump > capacity_) return nullptr; @@ -903,15 +930,15 @@ class LifoAlloc uint8_t* seekBaseAndAdvanceBy(size_t size) { MOZ_ASSERT(!empty()); - uint8_t* aligned = detail::AlignPtr(head_); - if (aligned + size > chunkIt_->end()) { + uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_); + if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) { ++chunkIt_; aligned = chunkIt_->begin(); // The current code assumes that if we have a chunk, then we // have allocated something it in. MOZ_ASSERT(!chunkIt_->empty()); } - head_ = aligned + size; + head_ = detail::BumpChunk::nextAllocEnd(aligned, size); MOZ_ASSERT(head_ <= chunkIt_->end()); return aligned; } diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp index 83dc5799a744..0bf06defc8a7 100644 --- a/js/src/gc/Marking.cpp +++ b/js/src/gc/Marking.cpp @@ -26,7 +26,6 @@ #endif #include "vm/Debugger.h" #include "vm/EnvironmentObject.h" -#include "vm/RegExpObject.h" #include "vm/RegExpShared.h" #include "vm/Scope.h" #include "vm/Shape.h" diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp index 934eb15651d3..8334df462662 100644 --- a/js/src/gc/RootMarking.cpp +++ b/js/src/gc/RootMarking.cpp @@ -116,6 +116,10 @@ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS) #undef TRACE_ROOTS TracePersistentRootedList(trc, heapRoots.ref()[JS::RootKind::Id], "persistent-id"); TracePersistentRootedList(trc, heapRoots.ref()[JS::RootKind::Value], "persistent-value"); + + // ConcreteTraceable calls through a function pointer. + JS::AutoSuppressGCAnalysis nogc; + TracePersistentRootedList( trc, heapRoots.ref()[JS::RootKind::Traceable], "persistent-traceable"); } @@ -391,6 +395,9 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM if (!JS::RuntimeHeapIsMinorCollecting()) { gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_EMBEDDING); + // The analysis doesn't like the function pointers below. + JS::AutoSuppressGCAnalysis nogc; + /* * The embedding can register additional roots here. * diff --git a/js/src/gc/WeakMap.h b/js/src/gc/WeakMap.h index 176482fb0e88..f253985799a5 100644 --- a/js/src/gc/WeakMap.h +++ b/js/src/gc/WeakMap.h @@ -11,6 +11,7 @@ #include "gc/Barrier.h" #include "gc/DeletePolicy.h" +#include "gc/Zone.h" #include "js/HashTable.h" namespace JS { diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp index 6f30f049c639..725f73f0d09d 100644 --- a/js/src/gc/Zone.cpp +++ b/js/src/gc/Zone.cpp @@ -43,7 +43,6 @@ JS::Zone::Zone(JSRuntime* rt) weakCaches_(this), gcWeakKeys_(this, SystemAllocPolicy(), rt->randomHashCodeScrambler()), typeDescrObjects_(this, this), - regExps(this), markedAtoms_(this), atomCache_(this), externalStringCache_(this), @@ -97,7 +96,7 @@ Zone::~Zone() // if the embedding leaked GC things. if (!rt->gc.shutdownCollectedEverything()) { gcWeakMapList().clear(); - regExps.clear(); + regExps().clear(); } #endif } @@ -106,7 +105,8 @@ bool Zone::init(bool isSystemArg) { isSystem = isSystemArg; - return gcWeakKeys().init(); + regExps_.ref() = make_unique(this); + return regExps_.ref() && gcWeakKeys().init(); } void @@ -357,7 +357,7 @@ Zone::nextZone() const void Zone::clearTables() { - MOZ_ASSERT(regExps.empty()); + MOZ_ASSERT(regExps().empty()); baseShapes().clear(); initialShapes().clear(); diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h index c6afaf7f4d5e..61f03330934b 100644 --- a/js/src/gc/Zone.h +++ b/js/src/gc/Zone.h @@ -14,12 +14,12 @@ #include "gc/GCRuntime.h" #include "js/GCHashTable.h" #include "vm/MallocProvider.h" -#include "vm/RegExpShared.h" #include "vm/Runtime.h" namespace js { class Debugger; +class RegExpZone; namespace jit { class JitZone; @@ -459,8 +459,10 @@ class Zone : public JS::shadow::Zone, counter.recordTrigger(trigger); } + js::MainThreadData> regExps_; + public: - js::RegExpZone regExps; + js::RegExpZone& regExps() { return *regExps_.ref(); } JS::WeakCache& typeDescrObjects() { return typeDescrObjects_.ref(); } @@ -755,47 +757,50 @@ class Zone : public JS::shadow::Zone, namespace js { -template -inline T* -ZoneAllocPolicy::maybe_pod_malloc(size_t numElems) +/* + * Allocation policy that uses Zone::pod_malloc and friends, so that memory + * pressure is accounted for on the zone. This is suitable for memory associated + * with GC things allocated in the zone. + * + * Since it doesn't hold a JSContext (those may not live long enough), it can't + * report out-of-memory conditions itself; the caller must check for OOM and + * take the appropriate action. + * + * FIXME bug 647103 - replace these *AllocPolicy names. + */ +class ZoneAllocPolicy { - return zone->maybe_pod_malloc(numElems); -} + JS::Zone* const zone; -template -inline T* -ZoneAllocPolicy::maybe_pod_calloc(size_t numElems) -{ - return zone->maybe_pod_calloc(numElems); -} + public: + MOZ_IMPLICIT ZoneAllocPolicy(JS::Zone* z) : zone(z) {} -template -inline T* -ZoneAllocPolicy::maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) -{ - return zone->maybe_pod_realloc(p, oldSize, newSize); -} + template T* maybe_pod_malloc(size_t numElems) { + return zone->maybe_pod_malloc(numElems); + } + template T* maybe_pod_calloc(size_t numElems) { + return zone->maybe_pod_calloc(numElems); + } + template T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) { + return zone->maybe_pod_realloc(p, oldSize, newSize); + } + template T* pod_malloc(size_t numElems) { + return zone->pod_malloc(numElems); + } + template T* pod_calloc(size_t numElems) { + return zone->pod_calloc(numElems); + } + template T* pod_realloc(T* p, size_t oldSize, size_t newSize) { + return zone->pod_realloc(p, oldSize, newSize); + } -template -inline T* -ZoneAllocPolicy::pod_malloc(size_t numElems) -{ - return zone->pod_malloc(numElems); -} + template void free_(T* p, size_t numElems = 0) { js_free(p); } + void reportAllocOverflow() const {} -template -inline T* -ZoneAllocPolicy::pod_calloc(size_t numElems) -{ - return zone->pod_calloc(numElems); -} - -template -inline T* -ZoneAllocPolicy::pod_realloc(T* p, size_t oldSize, size_t newSize) -{ - return zone->pod_realloc(p, oldSize, newSize); -} + MOZ_MUST_USE bool checkSimulatedOOM() const { + return !js::oom::ShouldFailWithOOM(); + } +}; } // namespace js diff --git a/js/src/jit-test/tests/basic/bug1470732.js b/js/src/jit-test/tests/basic/bug1470732.js index 05fdbd1e87c8..378cac012942 100644 --- a/js/src/jit-test/tests/basic/bug1470732.js +++ b/js/src/jit-test/tests/basic/bug1470732.js @@ -7,6 +7,6 @@ while(i++ < 500) { assertFloat32(0x23456789 | 0, false); `); let m = parseModule(""); - m.declarationInstantiation(); + instantiateModule(m); } diff --git a/js/src/jit-test/tests/basic/write-frozen-dense-strict-inlinecache.js b/js/src/jit-test/tests/basic/write-frozen-dense-strict-inlinecache.js index bdd8d055fd25..abb620c6eb9f 100644 --- a/js/src/jit-test/tests/basic/write-frozen-dense-strict-inlinecache.js +++ b/js/src/jit-test/tests/basic/write-frozen-dense-strict-inlinecache.js @@ -1,4 +1,4 @@ -// |jit-test| --no-threads; --ion-eager; --ion-shared-stubs=off +// |jit-test| --no-threads; --ion-eager; setJitCompilerOption('ion.forceinlineCaches', 1); function foo(t) { "use strict"; diff --git a/js/src/jit-test/tests/debug/RematerializedFrame-retval.js b/js/src/jit-test/tests/debug/RematerializedFrame-retval.js index ce16404d1f6c..764f91b12bdf 100644 --- a/js/src/jit-test/tests/debug/RematerializedFrame-retval.js +++ b/js/src/jit-test/tests/debug/RematerializedFrame-retval.js @@ -32,8 +32,8 @@ loadFile(lfLogBuffer); function loadFile(lfVarx) { try { let m = parseModule(lfVarx); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); } catch (lfVare) {} } diff --git a/js/src/jit-test/tests/debug/bug1304553.js b/js/src/jit-test/tests/debug/bug1304553.js index 08a97d909705..42c445e12b4b 100644 --- a/js/src/jit-test/tests/debug/bug1304553.js +++ b/js/src/jit-test/tests/debug/bug1304553.js @@ -17,5 +17,5 @@ let m = parseModule(` f(i); actual; `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/debug/bug1363233.js b/js/src/jit-test/tests/debug/bug1363233.js new file mode 100644 index 000000000000..5db1ef6931e0 --- /dev/null +++ b/js/src/jit-test/tests/debug/bug1363233.js @@ -0,0 +1,14 @@ +// |jit-test| error: SyntaxError; +g = newGlobal(); +dbg = new Debugger; +setInterruptCallback(function () { + dbg.addDebuggee(g); + dbg.getNewestFrame(); + return true; +}); +g.eval("" + function f() { + for (var i = 0; i < 1; evaluate("class h { constructor() {} }")) { + interruptIf(1); + } +}); +g.f(); diff --git a/js/src/jit-test/tests/for-of/bug-1341339.js b/js/src/jit-test/tests/for-of/bug-1341339.js index 1f88acdafaa5..2a0cf1f49840 100644 --- a/js/src/jit-test/tests/for-of/bug-1341339.js +++ b/js/src/jit-test/tests/for-of/bug-1341339.js @@ -5,5 +5,5 @@ for (var i=0; i < 10000; ++i) { for (var x of iterator) {} } `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/gc/bug-1282986.js b/js/src/jit-test/tests/gc/bug-1282986.js index 5c2c3ab67ce7..879731ece47b 100644 --- a/js/src/jit-test/tests/gc/bug-1282986.js +++ b/js/src/jit-test/tests/gc/bug-1282986.js @@ -11,7 +11,7 @@ loadFile(lfLogBuffer); function loadFile(lfVarx) { oomTest(function() { let m = parseModule(lfVarx); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); }); } diff --git a/js/src/jit-test/tests/ion/bug1269756.js b/js/src/jit-test/tests/ion/bug1269756.js index 5555e5f14d68..d93ce58f4d13 100644 --- a/js/src/jit-test/tests/ion/bug1269756.js +++ b/js/src/jit-test/tests/ion/bug1269756.js @@ -3,6 +3,6 @@ if (!('oomTest' in this)) oomTest(function() { m = parseModule(`while (x && NaN) prototype; let x`); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); }) diff --git a/js/src/jit-test/tests/modules/ambiguous-star-export.js b/js/src/jit-test/tests/modules/ambiguous-star-export.js index 94aa7ac4af2c..cc6b796cd6e1 100644 --- a/js/src/jit-test/tests/modules/ambiguous-star-export.js +++ b/js/src/jit-test/tests/modules/ambiguous-star-export.js @@ -7,21 +7,21 @@ load(libdir + "dummyModuleResolveHook.js"); function checkModuleEval(source) { let m = parseModule(source); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); return m; } function checkModuleSyntaxError(source) { let m = parseModule(source); - assertThrowsInstanceOf(() => m.declarationInstantiation(), SyntaxError); + assertThrowsInstanceOf(() => instantiateModule(m), SyntaxError); } let a = moduleRepo['a'] = parseModule("export var a = 1; export var b = 2;"); let b = moduleRepo['b'] = parseModule("export var b = 3; export var c = 4;"); let c = moduleRepo['c'] = parseModule("export * from 'a'; export * from 'b';"); -c.declarationInstantiation(); -c.evaluation(); +instantiateModule(c); +evaluateModule(c); // Check importing/exporting non-ambiguous name works. let d = checkModuleEval("import { a } from 'c';"); @@ -34,9 +34,9 @@ checkModuleSyntaxError("export { b } from 'c';"); // Check that namespace objects include only non-ambiguous names. let m = parseModule("import * as ns from 'c';"); -m.declarationInstantiation(); -m.evaluation(); -let ns = c.namespace; +instantiateModule(m); +evaluateModule(m); +let ns = getModuleObject(c).namespace; let names = Object.keys(ns); assertEq(names.length, 2); assertEq('a' in ns, true); diff --git a/js/src/jit-test/tests/modules/bad-namespace-created.js b/js/src/jit-test/tests/modules/bad-namespace-created.js index 7d17546a4079..6a323a7ff3a1 100644 --- a/js/src/jit-test/tests/modules/bad-namespace-created.js +++ b/js/src/jit-test/tests/modules/bad-namespace-created.js @@ -14,4 +14,4 @@ moduleRepo['D'] = parseModule('export let x'); moduleRepo['E'] = parseModule('export let x'); let m = moduleRepo['A']; -assertThrowsInstanceOf(() => m.declarationInstantiation(), SyntaxError); +assertThrowsInstanceOf(() => instantiateModule(m), SyntaxError); diff --git a/js/src/jit-test/tests/modules/bug-1233915.js b/js/src/jit-test/tests/modules/bug-1233915.js index cef016f5da4f..2280aa66c0f3 100644 --- a/js/src/jit-test/tests/modules/bug-1233915.js +++ b/js/src/jit-test/tests/modules/bug-1233915.js @@ -7,5 +7,5 @@ g.eval("(" + function() { }; } + ")()"); m = parseModule(` s1 `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1236875.js b/js/src/jit-test/tests/modules/bug-1236875.js index 41751f947623..c11fb1e5e939 100644 --- a/js/src/jit-test/tests/modules/bug-1236875.js +++ b/js/src/jit-test/tests/modules/bug-1236875.js @@ -1,2 +1,2 @@ let m = parseModule(`{ function x() {} }`); -m.declarationInstantiation(); +instantiateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1245518.js b/js/src/jit-test/tests/modules/bug-1245518.js index 0621d68c2162..d9324c9909ba 100644 --- a/js/src/jit-test/tests/modules/bug-1245518.js +++ b/js/src/jit-test/tests/modules/bug-1245518.js @@ -11,5 +11,5 @@ m = parseModule(` function g() { return this.hours = 0; } evalInFrame.call(0, 0, "g()") `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1247934.js b/js/src/jit-test/tests/modules/bug-1247934.js index a58d78fe3cc0..4ecc5bcc5a05 100644 --- a/js/src/jit-test/tests/modules/bug-1247934.js +++ b/js/src/jit-test/tests/modules/bug-1247934.js @@ -6,4 +6,4 @@ setJitCompilerOption("ion.warmup.trigger", 50); s = ""; for (i = 0; i < 1024; i++) s += "export let e" + i + "\n"; moduleRepo['a'] = parseModule(s); -parseModule("import * as ns from 'a'").declarationInstantiation(); +instantiateModule(parseModule("import * as ns from 'a'")); diff --git a/js/src/jit-test/tests/modules/bug-1283448.js b/js/src/jit-test/tests/modules/bug-1283448.js index 9ee6217abf79..175711478ee4 100644 --- a/js/src/jit-test/tests/modules/bug-1283448.js +++ b/js/src/jit-test/tests/modules/bug-1283448.js @@ -6,5 +6,5 @@ setModuleResolveHook(function(module, specifier) { }); let a = moduleRepo['a'] = parseModule("var x = 1; export { x };"); let b = moduleRepo['b'] = parseModule("import { x as y } from 'a';"); -a.__proto__ = {15: 1337}; -b.declarationInstantiation(); +getModuleObject(a).__proto__ = {15: 1337}; +instantiateModule(b); diff --git a/js/src/jit-test/tests/modules/bug-1284486-2.js b/js/src/jit-test/tests/modules/bug-1284486-2.js index 62d765fd08e1..e9eb252fad28 100644 --- a/js/src/jit-test/tests/modules/bug-1284486-2.js +++ b/js/src/jit-test/tests/modules/bug-1284486-2.js @@ -15,7 +15,7 @@ let c = moduleRepo['c'] = parseModule("export * from 'a'; export * from 'b';"); let e1; let threw = false; try { - c.declarationInstantiation(); + instantiateModule(c); } catch (exc) { threw = true; e1 = exc; @@ -26,7 +26,7 @@ assertEq(typeof e1 === "undefined", false); threw = false; let e2; try { - c.declarationInstantiation(); + instantiateModule(c); } catch (exc) { threw = true; e2 = exc; diff --git a/js/src/jit-test/tests/modules/bug-1284486.js b/js/src/jit-test/tests/modules/bug-1284486.js index 61c7cafaa2fe..3a520bbcbbf6 100644 --- a/js/src/jit-test/tests/modules/bug-1284486.js +++ b/js/src/jit-test/tests/modules/bug-1284486.js @@ -15,7 +15,7 @@ let c = moduleRepo['c'] = parseModule("export * from 'a'; export * from 'b';"); let e1; let threw = false; try { - c.declarationInstantiation(); + instantiateModule(c); } catch (exc) { threw = true; e1 = exc; @@ -28,7 +28,7 @@ let d = moduleRepo['d'] = parseModule("import { a } from 'c'; a;"); threw = false; try { - d.declarationInstantiation(); + instantiateModule(d); } catch (exc) { threw = true; } diff --git a/js/src/jit-test/tests/modules/bug-1287410.js b/js/src/jit-test/tests/modules/bug-1287410.js index 0d36fe444ade..4460b5054100 100644 --- a/js/src/jit-test/tests/modules/bug-1287410.js +++ b/js/src/jit-test/tests/modules/bug-1287410.js @@ -9,7 +9,7 @@ setModuleResolveHook(function(module, specifier) { let a = moduleRepo['a'] = parseModule("export var a = 1; export var b = 2;"); let b = moduleRepo['b'] = parseModule("export var b = 3; export var c = 4;"); let c = moduleRepo['c'] = parseModule("export * from 'a'; export * from 'b';"); -c.declarationInstantiation(); +instantiateModule(c); // Module 'a' is replaced with another module that has not been instantiated. // This should not happen and would be a bug in the module loader. @@ -19,5 +19,5 @@ let d = moduleRepo['d'] = parseModule("import { a } from 'c'; a;"); // Attempting to instantiate 'd' throws an error because depdency 'a' of // instantiated module 'c' is not instantiated. -d.declarationInstantiation(); -d.evaluation(); +instantiateModule(d); +evaluateModule(d); diff --git a/js/src/jit-test/tests/modules/bug-1372258.js b/js/src/jit-test/tests/modules/bug-1372258.js index ba7dcf2dec0f..16750340a82d 100644 --- a/js/src/jit-test/tests/modules/bug-1372258.js +++ b/js/src/jit-test/tests/modules/bug-1372258.js @@ -24,5 +24,5 @@ for (let i = 0; i < count; i++) { } let b = moduleRepo['b'] = parseModule(s); -b.declarationInstantiation(); -b.evaluation(); +instantiateModule(b); +evaluateModule(b); diff --git a/js/src/jit-test/tests/modules/bug-1402535.js b/js/src/jit-test/tests/modules/bug-1402535.js index 5ac2b24031bb..4332f9cf48d3 100644 --- a/js/src/jit-test/tests/modules/bug-1402535.js +++ b/js/src/jit-test/tests/modules/bug-1402535.js @@ -3,5 +3,5 @@ if (!('stackTest' in this)) stackTest(function() { let m = parseModule(``); - m.declarationInstantiation(); + instantiateModule(m); }); diff --git a/js/src/jit-test/tests/modules/bug-1402649.js b/js/src/jit-test/tests/modules/bug-1402649.js index 2e5487210b51..fec05c1bd107 100644 --- a/js/src/jit-test/tests/modules/bug-1402649.js +++ b/js/src/jit-test/tests/modules/bug-1402649.js @@ -4,7 +4,7 @@ if (!('oomTest' in this)) loadFile(` function parseAndEvaluate(source) { let m = parseModule(source); - m.declarationInstantiation(); + instantiateModule(m); } parseAndEvaluate("async function a() { await 2 + 3; }") `); diff --git a/js/src/jit-test/tests/modules/bug-1406452.js b/js/src/jit-test/tests/modules/bug-1406452.js index 7b8325aad723..9be4b7accf4a 100644 --- a/js/src/jit-test/tests/modules/bug-1406452.js +++ b/js/src/jit-test/tests/modules/bug-1406452.js @@ -1,5 +1,5 @@ // |jit-test| error: Error let m = parseModule(`for (var x of iterator) {}`); -m.declarationInstantiation(); -try { m.evaluation(); } catch (e) {} +instantiateModule(m); +try { evaluateModule(m); } catch (e) {} getModuleEnvironmentValue(m, "r"); diff --git a/js/src/jit-test/tests/modules/bug-1420420-2.js b/js/src/jit-test/tests/modules/bug-1420420-2.js index e7de4eff4c5b..1f27aef34486 100644 --- a/js/src/jit-test/tests/modules/bug-1420420-2.js +++ b/js/src/jit-test/tests/modules/bug-1420420-2.js @@ -14,5 +14,5 @@ moduleRepo["a"] = parseModule(`import* as ns from "good"; import {y} from "bad"; let b = moduleRepo["b"] = parseModule(`import "a";`); let c = moduleRepo["c"] = parseModule(`import "a";`); -assertThrowsInstanceOf(() => b.declarationInstantiation(), SyntaxError); -assertThrowsInstanceOf(() => c.declarationInstantiation(), SyntaxError); +assertThrowsInstanceOf(() => instantiateModule(b), SyntaxError); +assertThrowsInstanceOf(() => instantiateModule(c), SyntaxError); diff --git a/js/src/jit-test/tests/modules/bug-1420420-3.js b/js/src/jit-test/tests/modules/bug-1420420-3.js index 5fec62d0caf5..af72a5ff7d7c 100644 --- a/js/src/jit-test/tests/modules/bug-1420420-3.js +++ b/js/src/jit-test/tests/modules/bug-1420420-3.js @@ -2,7 +2,7 @@ if (!('stackTest' in this)) quit(); let a = parseModule(`throw new Error`); -a.declarationInstantiation(); +instantiateModule(a); stackTest(function() { - a.evaluation(); + evaluateModule(a); }); diff --git a/js/src/jit-test/tests/modules/bug-1420420-4.js b/js/src/jit-test/tests/modules/bug-1420420-4.js index f6ae8f2f6922..57cfd9ac4905 100644 --- a/js/src/jit-test/tests/modules/bug-1420420-4.js +++ b/js/src/jit-test/tests/modules/bug-1420420-4.js @@ -6,10 +6,10 @@ moduleRepo["a"] = parseModule(`throw undefined`); let b = moduleRepo["b"] = parseModule(`import "a";`); let c = moduleRepo["c"] = parseModule(`import "a";`); -b.declarationInstantiation(); -c.declarationInstantiation(); +instantiateModule(b); +instantiateModule(c); let count = 0; -try { b.evaluation() } catch (e) { count++; } -try { c.evaluation() } catch (e) { count++; } +try { evaluateModule(b) } catch (e) { count++; } +try { evaluateModule(c) } catch (e) { count++; } assertEq(count, 2); diff --git a/js/src/jit-test/tests/modules/bug-1420420.js b/js/src/jit-test/tests/modules/bug-1420420.js index b1eda3aead1f..1d513b34f931 100644 --- a/js/src/jit-test/tests/modules/bug-1420420.js +++ b/js/src/jit-test/tests/modules/bug-1420420.js @@ -14,5 +14,5 @@ moduleRepo["a"] = parseModule(`import {x} from "good"; import {y} from "bad";`); let b = moduleRepo["b"] = parseModule(`import "a";`); let c = moduleRepo["c"] = parseModule(`import "a";`); -assertThrowsInstanceOf(() => b.declarationInstantiation(), SyntaxError); -assertThrowsInstanceOf(() => c.declarationInstantiation(), SyntaxError); +assertThrowsInstanceOf(() => instantiateModule(b), SyntaxError); +assertThrowsInstanceOf(() => instantiateModule(c), SyntaxError); diff --git a/js/src/jit-test/tests/modules/bug-1435327.js b/js/src/jit-test/tests/modules/bug-1435327.js index 577e9e4ca526..fe59fc19c0fa 100644 --- a/js/src/jit-test/tests/modules/bug-1435327.js +++ b/js/src/jit-test/tests/modules/bug-1435327.js @@ -8,7 +8,7 @@ lfLogBuffer = ` }); let c = moduleRepo['c'] = parseModule(""); let d = moduleRepo['d'] = parseModule("import { a } from 'c'; a;"); - d.declarationInstantiation(); + instantiateModule(d); `; lfLogBuffer = lfLogBuffer.split('\n'); var lfCodeBuffer = ""; @@ -25,8 +25,8 @@ function loadFile(lfVarx) { try { oomTest(function() { let m = parseModule(lfVarx); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); }); } catch (lfVare) {} } diff --git a/js/src/jit-test/tests/modules/bug-1443555.js b/js/src/jit-test/tests/modules/bug-1443555.js index c9ee40668504..9de864179a49 100644 --- a/js/src/jit-test/tests/modules/bug-1443555.js +++ b/js/src/jit-test/tests/modules/bug-1443555.js @@ -32,5 +32,5 @@ export default 1; moduleRepo['A'] = parseModule(ASrc); let m = parseModule(mainSrc); -m.declarationInstantiation() -m.evaluation(); +instantiateModule(m) +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1462286.js b/js/src/jit-test/tests/modules/bug-1462286.js index b1da5727c2c2..d42bdc50d193 100644 --- a/js/src/jit-test/tests/modules/bug-1462286.js +++ b/js/src/jit-test/tests/modules/bug-1462286.js @@ -5,6 +5,6 @@ let a = moduleRepo['a'] = parseModule(` `); let m = parseModule("import { get } from 'a'; export { get };"); -m.declarationInstantiation(); -m.evaluation() +instantiateModule(m); +evaluateModule(m) assertEq(getModuleEnvironmentValue(m, "get").x, "foo"); diff --git a/js/src/jit-test/tests/modules/bug-1462326.js b/js/src/jit-test/tests/modules/bug-1462326.js index 1c43f160be36..19b011b05631 100644 --- a/js/src/jit-test/tests/modules/bug-1462326.js +++ b/js/src/jit-test/tests/modules/bug-1462326.js @@ -3,4 +3,4 @@ let m = parseModule(` import A from "A"; `); -m.declarationInstantiation(); +instantiateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1463371.js b/js/src/jit-test/tests/modules/bug-1463371.js index f0e8782e8ec4..1e509072f8c1 100644 --- a/js/src/jit-test/tests/modules/bug-1463371.js +++ b/js/src/jit-test/tests/modules/bug-1463371.js @@ -7,4 +7,4 @@ g.eval(` let m = parseModule(` import {} from './foo.js'; `); -m.declarationInstantiation(); +instantiateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1463373.js b/js/src/jit-test/tests/modules/bug-1463373.js index 2319091660b5..255c14a7bbb3 100644 --- a/js/src/jit-test/tests/modules/bug-1463373.js +++ b/js/src/jit-test/tests/modules/bug-1463373.js @@ -4,8 +4,8 @@ let m = parseModule(` let c = parseModule(\` import "a"; \`); - c.declarationInstantiation(); + instantiateModule(c); `); setModuleResolveHook(function(module, specifier) { return m; }); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/bug-1466487.js b/js/src/jit-test/tests/modules/bug-1466487.js index 1d5ce7bcf3d0..2ba9762ddbb6 100644 --- a/js/src/jit-test/tests/modules/bug-1466487.js +++ b/js/src/jit-test/tests/modules/bug-1466487.js @@ -2,6 +2,6 @@ if (helperThreadCount() === 0) quit(); evalInWorker(` let m = parseModule("import.meta;"); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); `); diff --git a/js/src/jit-test/tests/modules/bug1210391.js b/js/src/jit-test/tests/modules/bug1210391.js index 78874a3c1f60..326eb48c28b3 100644 --- a/js/src/jit-test/tests/modules/bug1210391.js +++ b/js/src/jit-test/tests/modules/bug1210391.js @@ -3,6 +3,6 @@ let a = moduleRepo['a'] = parseModule("export var a = 1; export var b = 2;"); let b = moduleRepo['b'] = parseModule("export var b = 3; export var c = 4;"); let c = moduleRepo['c'] = parseModule("export * from 'a'; export * from 'b';"); let d = moduleRepo['d'] = parseModule("import { a } from 'c'; a;"); -d.declarationInstantiation(); -d.evaluation(); +instantiateModule(d); +evaluateModule(d); diff --git a/js/src/jit-test/tests/modules/bug1394492.js b/js/src/jit-test/tests/modules/bug1394492.js index a0e5d2ac39d4..b05146d0eb2b 100644 --- a/js/src/jit-test/tests/modules/bug1394492.js +++ b/js/src/jit-test/tests/modules/bug1394492.js @@ -1,6 +1,6 @@ -// |jit-test| error: NaN +// |jit-test| error: TypeError let m = parseModule(` throw i => { return 5; }, m-1; `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/bug1429031.js b/js/src/jit-test/tests/modules/bug1429031.js index 20079428ef15..1b9bb3b42583 100644 --- a/js/src/jit-test/tests/modules/bug1429031.js +++ b/js/src/jit-test/tests/modules/bug1429031.js @@ -13,7 +13,7 @@ let b = moduleRepo['b'] = parseModule( \`import * as ns from 'a'; export var x = ns.a + ns.b;\` ); -b.declarationInstantiation(); +instantiateModule(b); let ns = getModuleEnvironmentValue(b, "ns"); assertEq(ns.a, 1); while ( t.ArrayType() ) 1; diff --git a/js/src/jit-test/tests/modules/bug1449153.js b/js/src/jit-test/tests/modules/bug1449153.js index bf8a2251799a..50009c856c54 100644 --- a/js/src/jit-test/tests/modules/bug1449153.js +++ b/js/src/jit-test/tests/modules/bug1449153.js @@ -26,10 +26,10 @@ moduleRepo["a"] = parseModule(` let c = moduleRepo["c"] = parseModule(` import "a"; `); -c.declarationInstantiation(); -assertThrowsMyError(() => c.evaluation()); +instantiateModule(c); +assertThrowsMyError(() => evaluateModule(c)); let b = moduleRepo['b'] = parseModule(` import * as ns0 from 'a' `); -assertThrowsMyError(() => b.declarationInstantiation()); +assertThrowsMyError(() => instantiateModule(b)); diff --git a/js/src/jit-test/tests/modules/debugger-frames.js b/js/src/jit-test/tests/modules/debugger-frames.js index ba7a1471c4df..61d01d39526e 100644 --- a/js/src/jit-test/tests/modules/debugger-frames.js +++ b/js/src/jit-test/tests/modules/debugger-frames.js @@ -72,8 +72,8 @@ f = g2.eval( export const c = 3; export function f(x) { return x + 1; } \`); - a.declarationInstantiation(); - a.evaluation(); + instantiateModule(a); + evaluateModule(a); let m = parseModule( \` @@ -84,6 +84,6 @@ f = g2.eval( debugger; \`); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); `); diff --git a/js/src/jit-test/tests/modules/debugger-vars-function.js b/js/src/jit-test/tests/modules/debugger-vars-function.js index 75dc023749a1..11feaabb56fc 100644 --- a/js/src/jit-test/tests/modules/debugger-vars-function.js +++ b/js/src/jit-test/tests/modules/debugger-vars-function.js @@ -31,7 +31,7 @@ g.eval( let e = 5; function f() { debugger; return e; } \`); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); `); diff --git a/js/src/jit-test/tests/modules/debugger-vars-toplevel.js b/js/src/jit-test/tests/modules/debugger-vars-toplevel.js index 8be8f4079a04..46b7db1642f5 100644 --- a/js/src/jit-test/tests/modules/debugger-vars-toplevel.js +++ b/js/src/jit-test/tests/modules/debugger-vars-toplevel.js @@ -32,7 +32,7 @@ g.eval( function f() { return e; } debugger; \`); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); `); diff --git a/js/src/jit-test/tests/modules/eval-module-oom.js b/js/src/jit-test/tests/modules/eval-module-oom.js index a1bd9db2a9ec..b2e318ded77e 100644 --- a/js/src/jit-test/tests/modules/eval-module-oom.js +++ b/js/src/jit-test/tests/modules/eval-module-oom.js @@ -21,6 +21,6 @@ const sb = oomTest(() => { let a = moduleRepo['a'] = parseModule(sa); let b = moduleRepo['b'] = parseModule(sb); - b.declarationInstantiation(); - assertEq(b.evaluation(), 42); + instantiateModule(b); + assertEq(evaluateModule(b), 42); }); diff --git a/js/src/jit-test/tests/modules/export-destructuring.js b/js/src/jit-test/tests/modules/export-destructuring.js index 5e26b9c640f0..fa4c21719306 100644 --- a/js/src/jit-test/tests/modules/export-destructuring.js +++ b/js/src/jit-test/tests/modules/export-destructuring.js @@ -40,8 +40,8 @@ m = parseModule(` assertArrayEq(a.n, [22]); `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); moduleRepo['o'] = parseModule(` export const {} = {}; @@ -66,8 +66,8 @@ m = parseModule(` assertEq(o.h, 6); `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); moduleRepo['ao'] = parseModule(` export const [{x: a}, {x: b}] = [{x: 1}, {x: 2}]; @@ -102,5 +102,5 @@ m = parseModule(` assertEq(ao.p, 21); `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/modules/export-entries.js b/js/src/jit-test/tests/modules/export-entries.js index b383d9e1e988..feb5c0632244 100644 --- a/js/src/jit-test/tests/modules/export-entries.js +++ b/js/src/jit-test/tests/modules/export-entries.js @@ -10,7 +10,7 @@ function testArrayContents(actual, expected) { } function testLocalExportEntries(source, expected) { - var module = parseModule(source); + var module = getModuleObject(parseModule(source)); testArrayContents(module.localExportEntries, expected); } @@ -69,7 +69,7 @@ testLocalExportEntries( // Test indirectExportEntries property function testIndirectExportEntries(source, expected) { - var module = parseModule(source); + var module = getModuleObject(parseModule(source)); testArrayContents(module.indirectExportEntries, expected); } @@ -100,7 +100,7 @@ testIndirectExportEntries( // Test starExportEntries property function testStarExportEntries(source, expected) { - var module = parseModule(source); + var module = getModuleObject(parseModule(source)); testArrayContents(module.starExportEntries, expected); } diff --git a/js/src/jit-test/tests/modules/global-scope.js b/js/src/jit-test/tests/modules/global-scope.js index b99019fa86b9..4f38a5e49902 100644 --- a/js/src/jit-test/tests/modules/global-scope.js +++ b/js/src/jit-test/tests/modules/global-scope.js @@ -2,8 +2,8 @@ function evalModuleAndCheck(source, expected) { let m = parseModule(source); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "r"), expected); } @@ -22,8 +22,8 @@ function offThreadEvalModuleAndCheck(source, expected) { offThreadCompileModule(source); let m = finishOffThreadModule(); print("compiled"); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "r"), expected); } diff --git a/js/src/jit-test/tests/modules/import-entries.js b/js/src/jit-test/tests/modules/import-entries.js index e5f7066f10e6..6ff37d6da591 100644 --- a/js/src/jit-test/tests/modules/import-entries.js +++ b/js/src/jit-test/tests/modules/import-entries.js @@ -17,7 +17,7 @@ function findImportEntry(array, target) } function testImportEntries(source, expected) { - var module = parseModule(source); + var module = getModuleObject(parseModule(source)); var actual = module.importEntries.slice(0); assertEq(actual.length, expected.length); for (var i = 0; i < expected.length; i++) { diff --git a/js/src/jit-test/tests/modules/import-namespace.js b/js/src/jit-test/tests/modules/import-namespace.js index 0287f7a60e4a..6e18c7e9ec5a 100644 --- a/js/src/jit-test/tests/modules/import-namespace.js +++ b/js/src/jit-test/tests/modules/import-namespace.js @@ -8,8 +8,8 @@ load(libdir + "dummyModuleResolveHook.js"); function parseAndEvaluate(source) { let m = parseModule(source); - m.declarationInstantiation(); - return m.evaluation(); + instantiateModule(m); + return evaluateModule(m); } function testHasNames(names, expected) { @@ -40,8 +40,8 @@ let b = moduleRepo['b'] = parseModule( export var x = ns.a + ns.b;` ); -b.declarationInstantiation(); -b.evaluation(); +instantiateModule(b); +evaluateModule(b); testHasNames(getModuleEnvironmentNames(b), ["ns", "x"]); let ns = getModuleEnvironmentValue(b, "ns"); testHasNames(Object.keys(ns), ["a", "b"]); @@ -89,18 +89,18 @@ let c = moduleRepo['c'] = parseModule("export let c = 1; import * as ns from 'd'; let d = ns.d;"); let d = moduleRepo['d'] = parseModule("export let d = 2; import * as ns from 'c'; let c = ns.c;"); -c.declarationInstantiation(); -d.declarationInstantiation(); -assertThrowsInstanceOf(() => c.evaluation(), ReferenceError); +instantiateModule(c); +instantiateModule(d); +assertThrowsInstanceOf(() => evaluateModule(c), ReferenceError); // Test cyclic namespace import. let e = moduleRepo['e'] = parseModule("export let e = 1; import * as ns from 'f'; export function f() { return ns.f }"); let f = moduleRepo['f'] = parseModule("export let f = 2; import * as ns from 'e'; export function e() { return ns.e }"); -e.declarationInstantiation(); -f.declarationInstantiation(); -e.evaluation(); -f.evaluation(); -assertEq(e.namespace.f(), 2); -assertEq(f.namespace.e(), 1); +instantiateModule(e); +instantiateModule(f); +evaluateModule(e); +evaluateModule(f); +assertEq(getModuleObject(e).namespace.f(), 2); +assertEq(getModuleObject(f).namespace.e(), 1); diff --git a/js/src/jit-test/tests/modules/many-exports.js b/js/src/jit-test/tests/modules/many-exports.js index 8e32d34fc177..7928242c0c6e 100644 --- a/js/src/jit-test/tests/modules/many-exports.js +++ b/js/src/jit-test/tests/modules/many-exports.js @@ -11,9 +11,9 @@ let a = moduleRepo['a'] = parseModule(s); let b = moduleRepo['b'] = parseModule("import * as ns from 'a'"); -b.declarationInstantiation(); -b.evaluation(); +instantiateModule(b); +evaluateModule(b); -let ns = a.namespace; +let ns = getModuleObject(a).namespace; for (let i = 0; i < count; i++) assertEq(ns["e" + i], i * i); diff --git a/js/src/jit-test/tests/modules/many-imports.js b/js/src/jit-test/tests/modules/many-imports.js index 14ed6b810742..acfb292b1cdd 100644 --- a/js/src/jit-test/tests/modules/many-imports.js +++ b/js/src/jit-test/tests/modules/many-imports.js @@ -13,5 +13,5 @@ for (let i = 0; i < count; i++) { } let b = moduleRepo['b'] = parseModule(s); -b.declarationInstantiation(); -b.evaluation(); +instantiateModule(b); +evaluateModule(b); diff --git a/js/src/jit-test/tests/modules/many-namespace-imports.js b/js/src/jit-test/tests/modules/many-namespace-imports.js index bfcac8eef47b..3d21b99d492e 100644 --- a/js/src/jit-test/tests/modules/many-namespace-imports.js +++ b/js/src/jit-test/tests/modules/many-namespace-imports.js @@ -13,5 +13,5 @@ for (let i = 0; i < count; i++) { } let b = moduleRepo['b'] = parseModule(s); -b.declarationInstantiation(); -b.evaluation(); +instantiateModule(b); +evaluateModule(b); diff --git a/js/src/jit-test/tests/modules/module-declaration-instantiation.js b/js/src/jit-test/tests/modules/module-declaration-instantiation.js index de820ae5b70d..f1484c677090 100644 --- a/js/src/jit-test/tests/modules/module-declaration-instantiation.js +++ b/js/src/jit-test/tests/modules/module-declaration-instantiation.js @@ -12,14 +12,14 @@ function testModuleEnvironment(module, expected) { // Check the environment of an empty module. let m = parseModule(""); -m.declarationInstantiation(); +instantiateModule(m); testModuleEnvironment(m, []); let a = moduleRepo['a'] = parseModule("var x = 1; export { x };"); let b = moduleRepo['b'] = parseModule("import { x as y } from 'a';"); -a.declarationInstantiation(); -b.declarationInstantiation(); +instantiateModule(a); +instantiateModule(b); testModuleEnvironment(a, ['x']); testModuleEnvironment(b, ['y']); @@ -32,7 +32,7 @@ let c = parseModule(`function a(x) { return x; } const names = ['a', 'b', 'c', 'd']; testModuleEnvironment(c, names); names.forEach((n) => assertEq(typeof getModuleEnvironmentValue(c, n), "undefined")); -c.declarationInstantiation(); +instantiateModule(c); for (let i = 0; i < names.length; i++) { let f = getModuleEnvironmentValue(c, names[i]); assertEq(f(21), 21 + i); diff --git a/js/src/jit-test/tests/modules/module-evaluation.js b/js/src/jit-test/tests/modules/module-evaluation.js index 1b2f2c9990f9..0b008f95b380 100644 --- a/js/src/jit-test/tests/modules/module-evaluation.js +++ b/js/src/jit-test/tests/modules/module-evaluation.js @@ -5,8 +5,8 @@ load(libdir + "dummyModuleResolveHook.js"); function parseAndEvaluate(source) { let m = parseModule(source); - m.declarationInstantiation(); - m.evaluation(); + instantiateModule(m); + evaluateModule(m); return m; } @@ -15,20 +15,20 @@ parseAndEvaluate(""); // Check evaluation returns evaluation result the first time, then undefined. let m = parseModule("1"); -m.declarationInstantiation(); -assertEq(m.evaluation(), undefined); -assertEq(typeof m.evaluation(), "undefined"); +instantiateModule(m); +assertEq(evaluateModule(m), undefined); +assertEq(typeof evaluateModule(m), "undefined"); // Check top level variables are initialized by evaluation. m = parseModule("export var x = 2 + 2;"); assertEq(typeof getModuleEnvironmentValue(m, "x"), "undefined"); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "x"), 4); m = parseModule("export let x = 2 * 3;"); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "x"), 6); // Set up a module to import from. @@ -62,20 +62,20 @@ parseAndEvaluate("export default class foo { constructor() {} };"); // Test default import m = parseModule("import a from 'a'; export { a };") -m.declarationInstantiation(); -m.evaluation() +instantiateModule(m); +evaluateModule(m) assertEq(getModuleEnvironmentValue(m, "a"), 2); // Test named import m = parseModule("import { x as y } from 'a'; export { y };") -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "y"), 1); // Call exported function m = parseModule("import { f } from 'a'; export let x = f(3);") -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); assertEq(getModuleEnvironmentValue(m, "x"), 4); // Test importing an indirect export @@ -93,7 +93,7 @@ assertDeepEq(getModuleEnvironmentValue(m, "z"), [1, 2, 1, 2]); // Import access in functions m = parseModule("import { x } from 'a'; function f() { return x; }") -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); let f = getModuleEnvironmentValue(m, "f"); assertEq(f(), 1); diff --git a/js/src/jit-test/tests/modules/module-this.js b/js/src/jit-test/tests/modules/module-this.js index 10a3241aa756..c11f92657ee5 100644 --- a/js/src/jit-test/tests/modules/module-this.js +++ b/js/src/jit-test/tests/modules/module-this.js @@ -2,14 +2,14 @@ function parseAndEvaluate(source) { let m = parseModule(source); - m.declarationInstantiation(); - return m.evaluation(); + instantiateModule(m); + return evaluateModule(m); } assertEq(typeof(parseAndEvaluate("this")), "undefined"); let m = parseModule("export function getThis() { return this; }"); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); let f = getModuleEnvironmentValue(m, "getThis"); assertEq(typeof(f()), "undefined"); diff --git a/js/src/jit-test/tests/modules/off-thread-compile.js b/js/src/jit-test/tests/modules/off-thread-compile.js index 8d17d4a749fe..fe260abd1d1b 100644 --- a/js/src/jit-test/tests/modules/off-thread-compile.js +++ b/js/src/jit-test/tests/modules/off-thread-compile.js @@ -9,8 +9,8 @@ load(libdir + "dummyModuleResolveHook.js"); function offThreadParseAndEvaluate(source) { offThreadCompileModule(source); let m = finishOffThreadModule(); - m.declarationInstantiation(); - return m.evaluation(); + instantiateModule(m); + return evaluateModule(m); } offThreadParseAndEvaluate("export let x = 2 * 3;"); diff --git a/js/src/jit-test/tests/modules/requested-modules.js b/js/src/jit-test/tests/modules/requested-modules.js index acf58e5a6ee3..ff74f001d143 100644 --- a/js/src/jit-test/tests/modules/requested-modules.js +++ b/js/src/jit-test/tests/modules/requested-modules.js @@ -1,7 +1,7 @@ // Test requestedModules property function testRequestedModules(source, expected) { - var module = parseModule(source); + var module = getModuleObject(parseModule(source)); var actual = module.requestedModules; assertEq(actual.length, expected.length); for (var i = 0; i < actual.length; i++) { diff --git a/js/src/jit-test/tests/parser/bug-1263355-16.js b/js/src/jit-test/tests/parser/bug-1263355-16.js index df66d9593b94..89a76f5160f4 100644 --- a/js/src/jit-test/tests/parser/bug-1263355-16.js +++ b/js/src/jit-test/tests/parser/bug-1263355-16.js @@ -7,5 +7,5 @@ function addThis() { return statusmessages[i] = Number; } `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/parser/bug-1263355-23.js b/js/src/jit-test/tests/parser/bug-1263355-23.js index 690a6c728f67..5eaa1f40d5a7 100644 --- a/js/src/jit-test/tests/parser/bug-1263355-23.js +++ b/js/src/jit-test/tests/parser/bug-1263355-23.js @@ -3,5 +3,5 @@ let m = parseModule(` minorgc(); root.eval(); `); -m.declarationInstantiation(); -m.evaluation(); +instantiateModule(m); +evaluateModule(m); diff --git a/js/src/jit-test/tests/parser/bug-1263355-48.js b/js/src/jit-test/tests/parser/bug-1263355-48.js index 2345b422edf1..88384a34fb65 100644 --- a/js/src/jit-test/tests/parser/bug-1263355-48.js +++ b/js/src/jit-test/tests/parser/bug-1263355-48.js @@ -4,8 +4,8 @@ if (helperThreadCount() == 0) function eval(source) { offThreadCompileModule(source); let m = finishOffThreadModule(); - m.declarationInstantiation(); - return m.evaluation(); + instantiateModule(m); + return evaluateModule(m); } function runTestCase(testcase) { if (testcase() !== true) {} diff --git a/js/src/jit-test/tests/wasm/atomicity.js b/js/src/jit-test/tests/wasm/atomicity.js new file mode 100644 index 000000000000..8e6a6f85ad11 --- /dev/null +++ b/js/src/jit-test/tests/wasm/atomicity.js @@ -0,0 +1,373 @@ +// |jit-test| slow; +// +// Temporarily marked as slow - they time out on the build systems even with +// reduced iteration count. +// +// Test that wasm atomic operations implement correct mutual exclusion. +// +// We have several agents that attempt to hammer on a shared location with rmw +// operations in such a way that failed atomicity will lead to an incorrect +// result. Each agent attempts to clear or set specific bits in a shared datum. + +// 1 for a little bit, 2 for a lot, 3 to quit before running tests +const DEBUG = 0; + +// The longer we run, the better, really, but we don't want to time out. +const ITERATIONS = 100000; + +// If you change NUMWORKERS you must also change the tables for INIT, VAL, and +// RESULT for all the operations, below, by adding or removing bits. +const NUMWORKERS = 2; +const NUMAGENTS = NUMWORKERS + 1; + +if (!wasmThreadsSupported() || helperThreadCount() < NUMWORKERS) { + if (DEBUG > 0) + print("Threads not supported"); + quit(0); +} + +// Most of the simulators have poor support for mutual exclusion and are anyway +// too slow; avoid intermittent failures and timeouts. + +let conf = getBuildConfiguration(); +if (conf["arm-simulator"] || conf["arm64-simulator"] || + conf["mips-simulator"] || conf["mips64-simulator"]) +{ + if (DEBUG > 0) + print("Atomicity test disabled on simulator"); + quit(0); +} + +//////////////////////////////////////////////////////////////////////// +// +// Coordination code for bootstrapping workers - use spawn() to create a worker, +// send() to send an item to a worker. send() will send to anyone, so only one +// worker should be receiving at a time. spawn() will block until the worker is +// running; send() will block until the message has been received. + +var COORD_BUSY = 0; +var COORD_NUMLOC = 1; + +var coord = new Int32Array(new SharedArrayBuffer(COORD_NUMLOC*4)); + +function spawn(text) { + text = ` +var _coord = new Int32Array(getSharedObject()); +Atomics.store(_coord, ${COORD_BUSY}, 0); +function receive() { + while (!Atomics.load(_coord, ${COORD_BUSY})) + ; + let x = getSharedObject(); + Atomics.store(_coord, ${COORD_BUSY}, 0); + return x; +} +` + text; + setSharedObject(coord.buffer); + Atomics.store(coord, COORD_BUSY, 1); + evalInWorker(text); + while (Atomics.load(coord, COORD_BUSY)) + ; +} + +function send(x) { + while(Atomics.load(coord, COORD_BUSY)) + ; + setSharedObject(x); + Atomics.store(coord, COORD_BUSY, 1); + while(Atomics.load(coord, COORD_BUSY)) + ; +} + +///////////////////////////////////////////////////////////////////////////////// +// +// The "agents" comprise one master and one or more additional workers. We make +// a separate module for each agent so that test values can be inlined as +// constants. +// +// The master initially sets a shared location LOC to a value START. +// +// Each agent then operates atomically on LOC with an operation OP and a value +// VAL. The operation OP is the same for all agents but each agent `i` has a +// different VAL_i. +// +// To make this more interesting, the value START is distributed as many times +// through the value at LOC as there is space for, and we perform several +// operations back-to-back, with the VAL_i appropriately shifted. +// +// Each agent then spin-waits for LOC to contain a particular RESULT, which is +// always (START OP VAL_0 OP VAL_1 ... VAL_k), again repeated throughout the +// RESULT as appropriate. +// +// The process then starts over, and we repeat the process many times. If we +// fail to have atomicity at any point the program will hang (LOC will never +// attain the desired value) and the test should therefore time out. +// +// (Barriers are needed to make this all work out.) +// +// The general principle for the values is that each VAL should add (or clear) a +// bit of the stored value. +// +// OP START VAL0 VAL1 VAL2 RESULT +// +// ADD[*] 0 1 2 4 7 +// SUB 7 1 2 4 0 +// AND 7 3 6 5 0 +// OR 0 1 2 4 7 +// XOR 0 1 2 4 7 // or start with 7 and end with 0 +// CMPXCHG 0 1 2 4 7 // use nonatomic "or" to add the bit +// +// [*] Running the tests actually assumes that ADD works reasonably well. +// +// TODO - more variants we could test: +// +// - tests that do not drop the values of the atomic ops but accumulate them: +// uses different code generation on x86/x64 +// +// - Xchg needs a different method, since here the atomic thing is that we read +// the "previous value" and set the next value atomically. How can we observe +// that that fails? If we run three agents, which all set the value to X, +// X+1, ..., X+n, with the initial value being (say) X-1, each can record the +// value it observed in a table, and we should be able to predict the counts +// in that table once postprocessed. eg, the counts should all be the same. +// If atomicity fails then a value is read twice when it shouldn't be, and +// some other value is not read at all, and the counts will be off. +// +// - the different rmw operations can usually be combined so that we can test +// the atomicity of operations that may be implemented differently. +// +// - the same tests, with test values as variables instead of constants. + +function makeModule(id) { + let isMaster = id == 0; + let VALSHIFT = NUMAGENTS; // 1 bit per agent + + function makeLoop(bits, name, op, loc, initial, val, expected) { + // Exclude high bit to avoid messing with the sign. + let NUMVALS32 = Math.floor(31/VALSHIFT); + let NUMVALS = bits == 64 ? 2 * NUMVALS32 : Math.floor(Math.min(bits,31)/VALSHIFT); + let BARRIER = "(i32.const 0)"; + let barrier = ` + ;; Barrier + (set_local $barrierValue (i32.add (get_local $barrierValue) (i32.const ${NUMAGENTS}))) + (drop (i32.atomic.rmw.add ${BARRIER} (i32.const 1))) + (loop $c1 + (if (i32.lt_s (i32.atomic.load ${BARRIER}) (get_local $barrierValue)) + (br $c1))) + ;; End barrier +`; + + // Distribute a value `v` across a word NUMVALs times + + function distribute(v) { + if (bits <= 32) + return '0x' + dist32(v); + return '0x' + dist32(v) + dist32(v); + } + + function dist32(v) { + let n = 0; + for (let i=0; i < Math.min(NUMVALS, NUMVALS32); i++) + n = n | (v << (i*VALSHIFT)); + assertEq(n >= 0, true); + return (n + 0x100000000).toString(16).substring(1); + } + + // Position a value `v` at position `pos` in a word + + function format(val, pos) { + if (bits <= 32) + return '0x' + format32(val, pos); + if (pos < NUMVALS32) + return '0x' + '00000000' + format32(val, pos); + return '0x' + format32(val, pos - NUMVALS32) + '00000000'; + } + + function format32(val, pos) { + return ((val << (pos * VALSHIFT)) + 0x100000000).toString(16).substring(1); + } + + let tag = bits < 32 ? bits + '_u' : ''; + let prefix = bits == 64 ? 'i64' : 'i32'; + return ` + (func ${name} (param $barrierValue i32) (result i32) + (local $n i32) + (local $tmp ${prefix}) + (set_local $n (i32.const ${ITERATIONS})) + (loop $outer + (if (get_local $n) + (block + ${isMaster ? `;; Init +(${prefix}.atomic.store${tag} ${loc} (${prefix}.const ${distribute(initial)}))` : ``} + ${barrier} + +${(() => { + let s = `;; Do\n`; + for (let i=0; i < NUMVALS; i++) { + let bitval = `(${prefix}.const ${format(val, i)})` + // The load must be atomic though it would be better if it were relaxed, + // we would avoid fences in that case. + if (op.match(/cmpxchg/)) { + s += `(loop $doit + (set_local $tmp (${prefix}.atomic.load${tag} ${loc})) + (br_if $doit (i32.eqz + (${prefix}.eq + (get_local $tmp) + (${op} ${loc} (get_local $tmp) (${prefix}.or (get_local $tmp) ${bitval})))))) + `; + } else { + s += `(drop (${op} ${loc} ${bitval})) + `; + } + } + return s +})()} + (loop $wait_done + (br_if $wait_done (${prefix}.ne (${prefix}.atomic.load${tag} ${loc}) (${prefix}.const ${distribute(expected)})))) + ${barrier} + (set_local $n (i32.sub (get_local $n) (i32.const 1))) + (br $outer)))) + (get_local $barrierValue))`; + } + + const ADDLOC = "(i32.const 256)"; + const ADDINIT = 0; + const ADDVAL = [1, 2, 4]; + const ADDRESULT = 7; + + const SUBLOC = "(i32.const 512)"; + const SUBINIT = 7; + const SUBVAL = [1, 2, 4]; + const SUBRESULT = 0; + + const ANDLOC = "(i32.const 768)"; + const ANDINIT = 7; + const ANDVAL = [3, 6, 5]; + const ANDRESULT = 0; + + const ORLOC = "(i32.const 1024)"; + const ORINIT = 0; + const ORVAL = [1, 2, 4]; + const ORRESULT = 7; + + const XORLOC = "(i32.const 1280)"; + const XORINIT = 0; + const XORVAL = [1, 2, 4]; + const XORRESULT = 7; + + const CMPXCHGLOC = "(i32.const 1536)"; + const CMPXCHGINIT = 0; + const CMPXCHGVAL = [1, 2, 4]; + const CMPXCHGRESULT = 7; + + return ` +(module + (import "" "memory" (memory 1 1 shared)) + (import $print "" "print" (param i32)) + + ${makeLoop(8, "$test_add8", "i32.atomic.rmw8_u.add", ADDLOC, ADDINIT, ADDVAL[id], ADDRESULT)} + ${makeLoop(8, "$test_sub8", "i32.atomic.rmw8_u.sub", SUBLOC, SUBINIT, SUBVAL[id], SUBRESULT)} + ${makeLoop(8, "$test_and8", "i32.atomic.rmw8_u.and", ANDLOC, ANDINIT, ANDVAL[id], ANDRESULT)} + ${makeLoop(8, "$test_or8", "i32.atomic.rmw8_u.or", ORLOC, ORINIT, ORVAL[id], ORRESULT)} + ${makeLoop(8, "$test_xor8", "i32.atomic.rmw8_u.xor", XORLOC, XORINIT, XORVAL[id], XORRESULT)} + ${makeLoop(8, "$test_cmpxchg8", "i32.atomic.rmw8_u.cmpxchg", CMPXCHGLOC, CMPXCHGINIT, CMPXCHGVAL[id], CMPXCHGRESULT)} + + ${makeLoop(16, "$test_add16", "i32.atomic.rmw16_u.add", ADDLOC, ADDINIT, ADDVAL[id], ADDRESULT)} + ${makeLoop(16, "$test_sub16", "i32.atomic.rmw16_u.sub", SUBLOC, SUBINIT, SUBVAL[id], SUBRESULT)} + ${makeLoop(16, "$test_and16", "i32.atomic.rmw16_u.and", ANDLOC, ANDINIT, ANDVAL[id], ANDRESULT)} + ${makeLoop(16, "$test_or16", "i32.atomic.rmw16_u.or", ORLOC, ORINIT, ORVAL[id], ORRESULT)} + ${makeLoop(16, "$test_xor16", "i32.atomic.rmw16_u.xor", XORLOC, XORINIT, XORVAL[id], XORRESULT)} + ${makeLoop(16, "$test_cmpxchg16", "i32.atomic.rmw16_u.cmpxchg", CMPXCHGLOC, CMPXCHGINIT, CMPXCHGVAL[id], CMPXCHGRESULT)} + + ${makeLoop(32, "$test_add", "i32.atomic.rmw.add", ADDLOC, ADDINIT, ADDVAL[id], ADDRESULT)} + ${makeLoop(32, "$test_sub", "i32.atomic.rmw.sub", SUBLOC, SUBINIT, SUBVAL[id], SUBRESULT)} + ${makeLoop(32, "$test_and", "i32.atomic.rmw.and", ANDLOC, ANDINIT, ANDVAL[id], ANDRESULT)} + ${makeLoop(32, "$test_or", "i32.atomic.rmw.or", ORLOC, ORINIT, ORVAL[id], ORRESULT)} + ${makeLoop(32, "$test_xor", "i32.atomic.rmw.xor", XORLOC, XORINIT, XORVAL[id], XORRESULT)} + ${makeLoop(32, "$test_cmpxchg", "i32.atomic.rmw.cmpxchg", CMPXCHGLOC, CMPXCHGINIT, CMPXCHGVAL[id], CMPXCHGRESULT)} + + ${makeLoop(64, "$test_add64", "i64.atomic.rmw.add", ADDLOC, ADDINIT, ADDVAL[id], ADDRESULT)} + ${makeLoop(64, "$test_sub64", "i64.atomic.rmw.sub", SUBLOC, SUBINIT, SUBVAL[id], SUBRESULT)} + ${makeLoop(64, "$test_and64", "i64.atomic.rmw.and", ANDLOC, ANDINIT, ANDVAL[id], ANDRESULT)} + ${makeLoop(64, "$test_or64", "i64.atomic.rmw.or", ORLOC, ORINIT, ORVAL[id], ORRESULT)} + ${makeLoop(64, "$test_xor64", "i64.atomic.rmw.xor", XORLOC, XORINIT, XORVAL[id], XORRESULT)} + ${makeLoop(64, "$test_cmpxchg64", "i64.atomic.rmw.cmpxchg", CMPXCHGLOC, CMPXCHGINIT, CMPXCHGVAL[id], CMPXCHGRESULT)} + + (func (export "test") + (local $barrierValue i32) + (call $print (i32.const ${10 + id})) + (set_local $barrierValue (call $test_add8 (get_local $barrierValue))) + (set_local $barrierValue (call $test_sub8 (get_local $barrierValue))) + (set_local $barrierValue (call $test_and8 (get_local $barrierValue))) + (set_local $barrierValue (call $test_or8 (get_local $barrierValue))) + (set_local $barrierValue (call $test_xor8 (get_local $barrierValue))) + (set_local $barrierValue (call $test_cmpxchg8 (get_local $barrierValue))) + (call $print (i32.const ${20 + id})) + (set_local $barrierValue (call $test_add16 (get_local $barrierValue))) + (set_local $barrierValue (call $test_sub16 (get_local $barrierValue))) + (set_local $barrierValue (call $test_and16 (get_local $barrierValue))) + (set_local $barrierValue (call $test_or16 (get_local $barrierValue))) + (set_local $barrierValue (call $test_xor16 (get_local $barrierValue))) + (set_local $barrierValue (call $test_cmpxchg16 (get_local $barrierValue))) + (call $print (i32.const ${30 + id})) + (set_local $barrierValue (call $test_add (get_local $barrierValue))) + (set_local $barrierValue (call $test_sub (get_local $barrierValue))) + (set_local $barrierValue (call $test_and (get_local $barrierValue))) + (set_local $barrierValue (call $test_or (get_local $barrierValue))) + (set_local $barrierValue (call $test_xor (get_local $barrierValue))) + (set_local $barrierValue (call $test_cmpxchg (get_local $barrierValue))) + (call $print (i32.const ${40 + id})) + (set_local $barrierValue (call $test_add64 (get_local $barrierValue))) + (set_local $barrierValue (call $test_sub64 (get_local $barrierValue))) + (set_local $barrierValue (call $test_and64 (get_local $barrierValue))) + (set_local $barrierValue (call $test_or64 (get_local $barrierValue))) + (set_local $barrierValue (call $test_xor64 (get_local $barrierValue))) + (set_local $barrierValue (call $test_cmpxchg64 (get_local $barrierValue))) + )) +`; +} + +function makeModule2(id) { + let text = makeModule(id); + if (DEBUG > 1) + print(text); + return new WebAssembly.Module(wasmTextToBinary(text)); +} + +var mods = []; +mods.push(makeModule2(0)); +for ( let i=0; i < NUMWORKERS; i++ ) + mods.push(makeModule2(i+1)); +if (DEBUG > 2) + quit(0); +var mem = new WebAssembly.Memory({initial: 1, maximum: 1, shared: true}); + +//////////////////////////////////////////////////////////////////////// +// +// Worker code + +function startWorkers() { + for ( let i=0; i < NUMWORKERS; i++ ) { + spawn(` +var mem = receive(); +var mod = receive(); +function pr(n) { if (${DEBUG}) print(n); } +var ins = new WebAssembly.Instance(mod, {"":{memory: mem, print:pr}}); +if (${DEBUG} > 0) + print("Running ${i}"); +ins.exports.test(); + `); + send(mem); + send(mods[i+1]); + } +} + +//////////////////////////////////////////////////////////////////////// +// +// Main thread code + +startWorkers(); +function pr(n) { if (DEBUG) print(n); } +var ins = new WebAssembly.Instance(mods[0], {"":{memory: mem, print:pr}}); +if (DEBUG > 0) + print("Running master"); +ins.exports.test(); diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp index 84a638894ded..29e3eecf266f 100644 --- a/js/src/jit/BaselineBailouts.cpp +++ b/js/src/jit/BaselineBailouts.cpp @@ -1065,7 +1065,7 @@ InitFromBailout(JSContext* cx, size_t frameNo, // Not every monitored op has a monitored fallback stub, e.g. // JSOP_NEWOBJECT, which always returns the same type for a // particular script/pc location. - BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + ICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback(); if (fallbackStub->isMonitoredFallback()) enterMonitorChain = true; @@ -1080,7 +1080,7 @@ InitFromBailout(JSContext* cx, size_t frameNo, builder.setResumeFramePtr(prevFramePtr); if (enterMonitorChain) { - BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + ICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); ICFallbackStub* fallbackStub = icEntry.firstStub()->getChainFallback(); MOZ_ASSERT(fallbackStub->isMonitoredFallback()); JitSpew(JitSpew_BaselineBailouts, " [TYPE-MONITOR CHAIN]"); @@ -1247,7 +1247,7 @@ InitFromBailout(JSContext* cx, size_t frameNo, // Calculate and write out return address. // The icEntry in question MUST have an inlinable fallback stub. - BaselineICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); + ICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff); MOZ_ASSERT(IsInlinableFallback(icEntry.firstStub()->getChainFallback())); if (!builder.writePtr(baselineScript->returnAddressForIC(icEntry), "ReturnAddr")) return false; diff --git a/js/src/jit/BaselineCacheIRCompiler.cpp b/js/src/jit/BaselineCacheIRCompiler.cpp index 23e971dff479..cb6beab5886a 100644 --- a/js/src/jit/BaselineCacheIRCompiler.cpp +++ b/js/src/jit/BaselineCacheIRCompiler.cpp @@ -35,13 +35,6 @@ CacheRegisterAllocator::addressOf(MacroAssembler& masm, BaselineFrameSlot slot) // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code. class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler { -#ifdef DEBUG - // Some Baseline IC stubs can be used in IonMonkey through SharedStubs. - // Those stubs have different machine code, so we need to track whether - // we're compiling for Baseline or Ion. - ICStubEngine engine_; -#endif - bool inStubFrame_; bool makesGCCalls_; @@ -58,12 +51,9 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler public: friend class AutoStubFrame; - BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine, + BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset) : CacheIRCompiler(cx, writer, stubDataOffset, Mode::Baseline, StubFieldPolicy::Address), -#ifdef DEBUG - engine_(engine), -#endif inStubFrame_(false), makesGCCalls_(false) {} @@ -113,7 +103,6 @@ class MOZ_RAII AutoStubFrame void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) { MOZ_ASSERT(compiler.allocator.stackPushed() == 0); - MOZ_ASSERT(compiler.engine_ == ICStubEngine::Baseline); EmitBaselineEnterStubFrame(masm, scratch); @@ -153,7 +142,6 @@ BaselineCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun) TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(fun); MOZ_ASSERT(fun.expectTailCall == NonTailCall); - MOZ_ASSERT(engine_ == ICStubEngine::Baseline); EmitBaselineCallVM(code, masm); return true; @@ -166,7 +154,6 @@ BaselineCacheIRCompiler::tailCallVM(MacroAssembler& masm, const VMFunction& fun) TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(fun); MOZ_ASSERT(fun.expectTailCall == TailCall); - MOZ_ASSERT(engine_ == ICStubEngine::Baseline); size_t argSize = fun.explicitStackSlots() * sizeof(void*); EmitBaselineTailCallVM(code, masm, argSize); @@ -557,8 +544,6 @@ BaselineCacheIRCompiler::emitGuardHasGetterSetter() bool BaselineCacheIRCompiler::emitCallScriptedGetterResult() { - MOZ_ASSERT(engine_ == ICStubEngine::Baseline); - Register obj = allocator.useRegister(masm, reader.objOperandId()); Address getterAddr(stubAddress(reader.stubOffset())); bool isCrossRealm = reader.readBool(); @@ -2121,8 +2106,8 @@ static const size_t MaxOptimizedCacheIRStubs = 16; ICStub* js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind, BaselineCacheIRStubKind stubKind, - ICStubEngine engine, JSScript* outerScript, - ICFallbackStub* stub, bool* attached) + JSScript* outerScript, ICFallbackStub* stub, + bool* attached) { // We shouldn't GC or report OOM (or any other exception) here. AutoAssertNoPendingException aanpe(cx); @@ -2154,12 +2139,13 @@ js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, // Check if we already have JitCode for this stub. CacheIRStubInfo* stubInfo; - CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength()); + CacheIRStubKey::Lookup lookup(kind, ICStubEngine::Baseline, writer.codeStart(), + writer.codeLength()); JitCode* code = jitZone->getBaselineCacheIRStubCode(lookup, &stubInfo); if (!code) { // We have to generate stub code. JitContext jctx(cx, nullptr); - BaselineCacheIRCompiler comp(cx, writer, engine, stubDataOffset); + BaselineCacheIRCompiler comp(cx, writer, stubDataOffset); if (!comp.init(kind)) return nullptr; @@ -2172,7 +2158,8 @@ js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, // to the stub code HashMap, so we don't have to worry about freeing // it below. MOZ_ASSERT(!stubInfo); - stubInfo = CacheIRStubInfo::New(kind, engine, comp.makesGCCalls(), stubDataOffset, writer); + stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::Baseline, comp.makesGCCalls(), + stubDataOffset, writer); if (!stubInfo) return nullptr; @@ -2236,7 +2223,7 @@ js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize(); ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(), - outerScript, engine); + outerScript); void* newStubMem = stubSpace->alloc(bytesNeeded); if (!newStubMem) return nullptr; diff --git a/js/src/jit/BaselineCacheIRCompiler.h b/js/src/jit/BaselineCacheIRCompiler.h index fdef63187505..aa61c66825ae 100644 --- a/js/src/jit/BaselineCacheIRCompiler.h +++ b/js/src/jit/BaselineCacheIRCompiler.h @@ -21,8 +21,8 @@ enum class BaselineCacheIRStubKind { Regular, Monitored, Updated }; ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind, BaselineCacheIRStubKind stubKind, - ICStubEngine engine, JSScript* outerScript, - ICFallbackStub* stub, bool* attached); + JSScript* outerScript, ICFallbackStub* stub, + bool* attached); } // namespace jit } // namespace js diff --git a/js/src/jit/BaselineCompiler.cpp b/js/src/jit/BaselineCompiler.cpp index 0f5414099732..2a621afd9506 100644 --- a/js/src/jit/BaselineCompiler.cpp +++ b/js/src/jit/BaselineCompiler.cpp @@ -247,7 +247,7 @@ BaselineCompiler::compile() for (size_t i = 0; i < icLoadLabels_.length(); i++) { CodeOffset label = icLoadLabels_[i].label; size_t icEntry = icLoadLabels_[i].icEntry; - BaselineICEntry* entryAddr = &(baselineScript->icEntry(icEntry)); + ICEntry* entryAddr = &(baselineScript->icEntry(icEntry)); Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label), ImmPtr(entryAddr), ImmPtr((void*)-1)); @@ -512,7 +512,7 @@ BaselineCompiler::emitOutOfLinePostBarrierSlot() bool BaselineCompiler::emitIC(ICStub* stub, ICEntry::Kind kind) { - BaselineICEntry* entry = allocateICEntry(stub, kind); + ICEntry* entry = allocateICEntry(stub, kind); if (!entry) return false; @@ -1926,7 +1926,7 @@ BaselineCompiler::emitBinaryArith() frame.popRegsAndSync(2); // Call IC - ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + ICBinaryArith_Fallback::Compiler stubCompiler(cx); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2008,7 +2008,7 @@ BaselineCompiler::emitCompare() frame.popRegsAndSync(2); // Call IC. - ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + ICCompare_Fallback::Compiler stubCompiler(cx); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2043,7 +2043,7 @@ BaselineCompiler::emit_JSOP_CASE() frame.syncStack(0); // Call IC. - ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + ICCompare_Fallback::Compiler stubCompiler(cx); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2091,7 +2091,7 @@ BaselineCompiler::emit_JSOP_NEWARRAY() if (!group) return false; - ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::Baseline); + ICNewArray_Fallback::Compiler stubCompiler(cx, group); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2155,7 +2155,7 @@ BaselineCompiler::emit_JSOP_NEWOBJECT() { frame.syncStack(0); - ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + ICNewObject_Fallback::Compiler stubCompiler(cx); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2168,7 +2168,7 @@ BaselineCompiler::emit_JSOP_NEWINIT() { frame.syncStack(0); - ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::Baseline); + ICNewObject_Fallback::Compiler stubCompiler(cx); if (!emitOpIC(stubCompiler.getStub(&stubSpace_))) return false; @@ -2616,7 +2616,7 @@ BaselineCompiler::emit_JSOP_GETPROP() frame.popRegsAndSync(1); // Call IC. - ICGetProp_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline); + ICGetProp_Fallback::Compiler compiler(cx); if (!emitOpIC(compiler.getStub(&stubSpace_))) return false; @@ -2651,8 +2651,7 @@ BaselineCompiler::emit_JSOP_GETPROP_SUPER() masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R1); frame.pop(); - ICGetProp_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline, - /* hasReceiver = */ true); + ICGetProp_Fallback::Compiler compiler(cx, /* hasReceiver = */ true); if (!emitOpIC(compiler.getStub(&stubSpace_))) return false; diff --git a/js/src/jit/BaselineDebugModeOSR.cpp b/js/src/jit/BaselineDebugModeOSR.cpp index 674ee3237c28..da4a2cf607c1 100644 --- a/js/src/jit/BaselineDebugModeOSR.cpp +++ b/js/src/jit/BaselineDebugModeOSR.cpp @@ -218,7 +218,7 @@ CollectJitStackScripts(JSContext* cx, const Debugger::ExecutionObservableSet& ob } else { // The frame must be settled on a pc with an ICEntry. uint8_t* retAddr = frame.returnAddressToFp(); - BaselineICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr); + ICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr); if (!entries.append(DebugModeOSREntry(script, icEntry))) return false; } @@ -498,7 +498,7 @@ PatchBaselineFramesForDebugMode(JSContext* cx, // callVMs which can trigger debug mode OSR are the *only* // callVMs generated for their respective pc locations in the // baseline JIT code. - BaselineICEntry& callVMEntry = bl->callVMEntryFromPCOffset(pcOffset); + ICEntry& callVMEntry = bl->callVMEntryFromPCOffset(pcOffset); recompInfo->resumeAddr = bl->returnAddressForIC(callVMEntry); popFrameReg = false; break; @@ -510,7 +510,7 @@ PatchBaselineFramesForDebugMode(JSContext* cx, // Patching mechanism is identical to a CallVM. This is // handled especially only because the warmup counter VM call is // part of the prologue, and not tied an opcode. - BaselineICEntry& warmupCountEntry = bl->warmupCountICEntry(); + ICEntry& warmupCountEntry = bl->warmupCountICEntry(); recompInfo->resumeAddr = bl->returnAddressForIC(warmupCountEntry); popFrameReg = false; break; @@ -524,7 +524,7 @@ PatchBaselineFramesForDebugMode(JSContext* cx, // handled especially only because the stack check VM call is // part of the prologue, and not tied an opcode. bool earlyCheck = kind == ICEntry::Kind_EarlyStackCheck; - BaselineICEntry& stackCheckEntry = bl->stackCheckICEntry(earlyCheck); + ICEntry& stackCheckEntry = bl->stackCheckICEntry(earlyCheck); recompInfo->resumeAddr = bl->returnAddressForIC(stackCheckEntry); popFrameReg = false; break; @@ -782,8 +782,8 @@ CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t ent } } - ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(), entry.script, - ICStubCompiler::Engine::Baseline); + ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(), + entry.script); // Clone the existing stub into the recompiled IC. // diff --git a/js/src/jit/BaselineDebugModeOSR.h b/js/src/jit/BaselineDebugModeOSR.h index 0854dbe9b3db..0feb2df5d96e 100644 --- a/js/src/jit/BaselineDebugModeOSR.h +++ b/js/src/jit/BaselineDebugModeOSR.h @@ -41,30 +41,18 @@ namespace jit { template class DebugModeOSRVolatileStub { - ICStubCompiler::Engine engine_; T stub_; BaselineFrame* frame_; uint32_t pcOffset_; public: - DebugModeOSRVolatileStub(ICStubCompiler::Engine engine, BaselineFrame* frame, - ICFallbackStub* stub) - : engine_(engine), - stub_(static_cast(stub)), - frame_(frame), - pcOffset_(stub->icEntry()->pcOffset()) - { } - DebugModeOSRVolatileStub(BaselineFrame* frame, ICFallbackStub* stub) - : engine_(ICStubCompiler::Engine::Baseline), - stub_(static_cast(stub)), + : stub_(static_cast(stub)), frame_(frame), pcOffset_(stub->icEntry()->pcOffset()) { } bool invalid() const { - if (engine_ == ICStubCompiler::Engine::IonSharedIC) - return stub_->invalid(); MOZ_ASSERT(!frame_->isHandlingException()); ICEntry& entry = frame_->script()->baselineScript()->icEntryFromPCOffset(pcOffset_); return stub_ != entry.fallbackStub(); diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp index 701d72c4ef65..4e301cea8294 100644 --- a/js/src/jit/BaselineIC.cpp +++ b/js/src/jit/BaselineIC.cpp @@ -6,7 +6,10 @@ #include "jit/BaselineIC.h" +#include "mozilla/Casting.h" #include "mozilla/DebugOnly.h" +#include "mozilla/IntegerPrintfMacros.h" +#include "mozilla/Sprintf.h" #include "mozilla/TemplateLib.h" #include "jsfriendapi.h" @@ -51,6 +54,285 @@ using mozilla::DebugOnly; namespace js { namespace jit { + +#ifdef JS_JITSPEW +void +FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...) +{ + if (JitSpewEnabled(JitSpew_BaselineICFallback)) { + RootedScript script(cx, GetTopJitJSScript(cx)); + jsbytecode* pc = stub->icEntry()->pc(script); + + char fmtbuf[100]; + va_list args; + va_start(args, fmt); + (void) VsprintfLiteral(fmtbuf, fmt, args); + va_end(args); + + JitSpew(JitSpew_BaselineICFallback, + "Fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%zu): %s", + script->filename(), + script->lineno(), + script->pcToOffset(pc), + PCToLineNumber(script, pc), + script->getWarmUpCount(), + stub->numOptimizedStubs(), + fmtbuf); + } +} + +void +TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...) +{ + if (JitSpewEnabled(JitSpew_BaselineICFallback)) { + RootedScript script(cx, GetTopJitJSScript(cx)); + jsbytecode* pc = stub->icEntry()->pc(script); + + char fmtbuf[100]; + va_list args; + va_start(args, fmt); + (void) VsprintfLiteral(fmtbuf, fmt, args); + va_end(args); + + JitSpew(JitSpew_BaselineICFallback, + "Type monitor fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%d): %s", + script->filename(), + script->lineno(), + script->pcToOffset(pc), + PCToLineNumber(script, pc), + script->getWarmUpCount(), + (int) stub->numOptimizedMonitorStubs(), + fmtbuf); + } +} +#endif // JS_JITSPEW + +ICFallbackStub* +ICEntry::fallbackStub() const +{ + return firstStub()->getChainFallback(); +} + +void +ICEntry::trace(JSTracer* trc) +{ + if (!hasStub()) + return; + for (ICStub* stub = firstStub(); stub; stub = stub->next()) + stub->trace(trc); +} + +ICStubConstIterator& +ICStubConstIterator::operator++() +{ + MOZ_ASSERT(currentStub_ != nullptr); + currentStub_ = currentStub_->next(); + return *this; +} + + +ICStubIterator::ICStubIterator(ICFallbackStub* fallbackStub, bool end) + : icEntry_(fallbackStub->icEntry()), + fallbackStub_(fallbackStub), + previousStub_(nullptr), + currentStub_(end ? fallbackStub : icEntry_->firstStub()), + unlinked_(false) +{ } + +ICStubIterator& +ICStubIterator::operator++() +{ + MOZ_ASSERT(currentStub_->next() != nullptr); + if (!unlinked_) + previousStub_ = currentStub_; + currentStub_ = currentStub_->next(); + unlinked_ = false; + return *this; +} + +void +ICStubIterator::unlink(JSContext* cx) +{ + MOZ_ASSERT(currentStub_->next() != nullptr); + MOZ_ASSERT(currentStub_ != fallbackStub_); + MOZ_ASSERT(!unlinked_); + + fallbackStub_->unlinkStub(cx->zone(), previousStub_, currentStub_); + + // Mark the current iterator position as unlinked, so operator++ works properly. + unlinked_ = true; +} + +/* static */ bool +ICStub::NonCacheIRStubMakesGCCalls(Kind kind) +{ + MOZ_ASSERT(IsValidKind(kind)); + MOZ_ASSERT(!IsCacheIRKind(kind)); + + switch (kind) { + case Call_Fallback: + case Call_Scripted: + case Call_AnyScripted: + case Call_Native: + case Call_ClassHook: + case Call_ScriptedApplyArray: + case Call_ScriptedApplyArguments: + case Call_ScriptedFunCall: + case Call_ConstStringSplit: + case WarmUpCounter_Fallback: + case RetSub_Fallback: + // These two fallback stubs don't actually make non-tail calls, + // but the fallback code for the bailout path needs to pop the stub frame + // pushed during the bailout. + case GetProp_Fallback: + case SetProp_Fallback: + return true; + default: + return false; + } +} + +bool +ICStub::makesGCCalls() const +{ + switch (kind()) { + case CacheIR_Regular: + return toCacheIR_Regular()->stubInfo()->makesGCCalls(); + case CacheIR_Monitored: + return toCacheIR_Monitored()->stubInfo()->makesGCCalls(); + case CacheIR_Updated: + return toCacheIR_Updated()->stubInfo()->makesGCCalls(); + default: + return NonCacheIRStubMakesGCCalls(kind()); + } +} + +void +ICStub::traceCode(JSTracer* trc, const char* name) +{ + JitCode* stubJitCode = jitCode(); + TraceManuallyBarrieredEdge(trc, &stubJitCode, name); +} + +void +ICStub::updateCode(JitCode* code) +{ + // Write barrier on the old code. + JitCode::writeBarrierPre(jitCode()); + stubCode_ = code->raw(); +} + +/* static */ void +ICStub::trace(JSTracer* trc) +{ + traceCode(trc, "shared-stub-jitcode"); + + // If the stub is a monitored fallback stub, then trace the monitor ICs hanging + // off of that stub. We don't need to worry about the regular monitored stubs, + // because the regular monitored stubs will always have a monitored fallback stub + // that references the same stub chain. + if (isMonitoredFallback()) { + ICTypeMonitor_Fallback* lastMonStub = + toMonitoredFallbackStub()->maybeFallbackMonitorStub(); + if (lastMonStub) { + for (ICStubConstIterator iter(lastMonStub->firstMonitorStub()); + !iter.atEnd(); + iter++) + { + MOZ_ASSERT_IF(iter->next() == nullptr, *iter == lastMonStub); + iter->trace(trc); + } + } + } + + if (isUpdated()) { + for (ICStubConstIterator iter(toUpdatedStub()->firstUpdateStub()); !iter.atEnd(); iter++) { + MOZ_ASSERT_IF(iter->next() == nullptr, iter->isTypeUpdate_Fallback()); + iter->trace(trc); + } + } + + switch (kind()) { + case ICStub::Call_Scripted: { + ICCall_Scripted* callStub = toCall_Scripted(); + TraceEdge(trc, &callStub->callee(), "baseline-callscripted-callee"); + TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callscripted-template"); + break; + } + case ICStub::Call_Native: { + ICCall_Native* callStub = toCall_Native(); + TraceEdge(trc, &callStub->callee(), "baseline-callnative-callee"); + TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callnative-template"); + break; + } + case ICStub::Call_ClassHook: { + ICCall_ClassHook* callStub = toCall_ClassHook(); + TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callclasshook-template"); + break; + } + case ICStub::Call_ConstStringSplit: { + ICCall_ConstStringSplit* callStub = toCall_ConstStringSplit(); + TraceEdge(trc, &callStub->templateObject(), "baseline-callstringsplit-template"); + TraceEdge(trc, &callStub->expectedSep(), "baseline-callstringsplit-sep"); + TraceEdge(trc, &callStub->expectedStr(), "baseline-callstringsplit-str"); + break; + } + case ICStub::TypeMonitor_SingleObject: { + ICTypeMonitor_SingleObject* monitorStub = toTypeMonitor_SingleObject(); + TraceEdge(trc, &monitorStub->object(), "baseline-monitor-singleton"); + break; + } + case ICStub::TypeMonitor_ObjectGroup: { + ICTypeMonitor_ObjectGroup* monitorStub = toTypeMonitor_ObjectGroup(); + TraceEdge(trc, &monitorStub->group(), "baseline-monitor-group"); + break; + } + case ICStub::TypeUpdate_SingleObject: { + ICTypeUpdate_SingleObject* updateStub = toTypeUpdate_SingleObject(); + TraceEdge(trc, &updateStub->object(), "baseline-update-singleton"); + break; + } + case ICStub::TypeUpdate_ObjectGroup: { + ICTypeUpdate_ObjectGroup* updateStub = toTypeUpdate_ObjectGroup(); + TraceEdge(trc, &updateStub->group(), "baseline-update-group"); + break; + } + case ICStub::NewArray_Fallback: { + ICNewArray_Fallback* stub = toNewArray_Fallback(); + TraceNullableEdge(trc, &stub->templateObject(), "baseline-newarray-template"); + TraceEdge(trc, &stub->templateGroup(), "baseline-newarray-template-group"); + break; + } + case ICStub::NewObject_Fallback: { + ICNewObject_Fallback* stub = toNewObject_Fallback(); + TraceNullableEdge(trc, &stub->templateObject(), "baseline-newobject-template"); + break; + } + case ICStub::Rest_Fallback: { + ICRest_Fallback* stub = toRest_Fallback(); + TraceEdge(trc, &stub->templateObject(), "baseline-rest-template"); + break; + } + case ICStub::CacheIR_Regular: + TraceCacheIRStub(trc, this, toCacheIR_Regular()->stubInfo()); + break; + case ICStub::CacheIR_Monitored: + TraceCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo()); + break; + case ICStub::CacheIR_Updated: { + ICCacheIR_Updated* stub = toCacheIR_Updated(); + TraceNullableEdge(trc, &stub->updateStubGroup(), "baseline-update-stub-group"); + TraceEdge(trc, &stub->updateStubId(), "baseline-update-stub-id"); + TraceCacheIRStub(trc, this, stub->stubInfo()); + break; + } + default: + break; + } +} + + + // // WarmUpCounter_Fallback // @@ -178,8 +460,6 @@ static const VMFunction DoWarmUpCounterFallbackOSRInfo = bool ICWarmUpCounter_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // Push a stub frame so that we can perform a non-tail call. enterStubFrame(masm, R1.scratchReg()); @@ -264,6 +544,804 @@ ICWarmUpCounter_Fallback::Compiler::generateStubCode(MacroAssembler& masm) return true; } + +void +ICFallbackStub::unlinkStub(Zone* zone, ICStub* prev, ICStub* stub) +{ + MOZ_ASSERT(stub->next()); + + // If stub is the last optimized stub, update lastStubPtrAddr. + if (stub->next() == this) { + MOZ_ASSERT(lastStubPtrAddr_ == stub->addressOfNext()); + if (prev) + lastStubPtrAddr_ = prev->addressOfNext(); + else + lastStubPtrAddr_ = icEntry()->addressOfFirstStub(); + *lastStubPtrAddr_ = this; + } else { + if (prev) { + MOZ_ASSERT(prev->next() == stub); + prev->setNext(stub->next()); + } else { + MOZ_ASSERT(icEntry()->firstStub() == stub); + icEntry()->setFirstStub(stub->next()); + } + } + + state_.trackUnlinkedStub(); + + if (zone->needsIncrementalBarrier()) { + // We are removing edges from ICStub to gcthings. Perform one final trace + // of the stub for incremental GC, as it must know about those edges. + stub->trace(zone->barrierTracer()); + } + + if (stub->makesGCCalls() && stub->isMonitored()) { + // This stub can make calls so we can return to it if it's on the stack. + // We just have to reset its firstMonitorStub_ field to avoid a stale + // pointer when purgeOptimizedStubs destroys all optimized monitor + // stubs (unlinked stubs won't be updated). + ICTypeMonitor_Fallback* monitorFallback = + toMonitoredFallbackStub()->maybeFallbackMonitorStub(); + MOZ_ASSERT(monitorFallback); + stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback); + } + +#ifdef DEBUG + // Poison stub code to ensure we don't call this stub again. However, if + // this stub can make calls, a pointer to it may be stored in a stub frame + // on the stack, so we can't touch the stubCode_ or GC will crash when + // tracing this pointer. + if (!stub->makesGCCalls()) + stub->stubCode_ = (uint8_t*)0xbad; +#endif +} + +void +ICFallbackStub::unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind) +{ + for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) { + if (iter->kind() == kind) + iter.unlink(cx); + } +} + +void +ICFallbackStub::discardStubs(JSContext* cx) +{ + for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) + iter.unlink(cx); +} + +void +ICTypeMonitor_Fallback::resetMonitorStubChain(Zone* zone) +{ + if (zone->needsIncrementalBarrier()) { + // We are removing edges from monitored stubs to gcthings (JitCode). + // Perform one final trace of all monitor stubs for incremental GC, + // as it must know about those edges. + for (ICStub* s = firstMonitorStub_; !s->isTypeMonitor_Fallback(); s = s->next()) + s->trace(zone->barrierTracer()); + } + + firstMonitorStub_ = this; + numOptimizedMonitorStubs_ = 0; + + if (hasFallbackStub_) { + lastMonitorStubPtrAddr_ = nullptr; + + // Reset firstMonitorStub_ field of all monitored stubs. + for (ICStubConstIterator iter = mainFallbackStub_->beginChainConst(); + !iter.atEnd(); iter++) + { + if (!iter->isMonitored()) + continue; + iter->toMonitoredStub()->resetFirstMonitorStub(this); + } + } else { + icEntry_->setFirstStub(this); + lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub(); + } +} + +void +ICUpdatedStub::resetUpdateStubChain(Zone* zone) +{ + while (!firstUpdateStub_->isTypeUpdate_Fallback()) { + if (zone->needsIncrementalBarrier()) { + // We are removing edges from update stubs to gcthings (JitCode). + // Perform one final trace of all update stubs for incremental GC, + // as it must know about those edges. + firstUpdateStub_->trace(zone->barrierTracer()); + } + firstUpdateStub_ = firstUpdateStub_->next(); + } + + numOptimizedStubs_ = 0; +} + +ICMonitoredStub::ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub) + : ICStub(kind, ICStub::Monitored, stubCode), + firstMonitorStub_(firstMonitorStub) +{ + // In order to silence Coverity - null pointer dereference checker + MOZ_ASSERT(firstMonitorStub_); + // If the first monitored stub is a ICTypeMonitor_Fallback stub, then + // double check that _its_ firstMonitorStub is the same as this one. + MOZ_ASSERT_IF(firstMonitorStub_->isTypeMonitor_Fallback(), + firstMonitorStub_->toTypeMonitor_Fallback()->firstMonitorStub() == + firstMonitorStub_); +} + +bool +ICMonitoredFallbackStub::initMonitoringChain(JSContext* cx, JSScript* script) +{ + MOZ_ASSERT(fallbackMonitorStub_ == nullptr); + + ICTypeMonitor_Fallback::Compiler compiler(cx, this); + ICStubSpace* space = script->baselineScript()->fallbackStubSpace(); + ICTypeMonitor_Fallback* stub = compiler.getStub(space); + if (!stub) + return false; + fallbackMonitorStub_ = stub; + return true; +} + +bool +ICMonitoredFallbackStub::addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, + StackTypeSet* types, HandleValue val) +{ + ICTypeMonitor_Fallback* typeMonitorFallback = getFallbackMonitorStub(cx, frame->script()); + if (!typeMonitorFallback) + return false; + return typeMonitorFallback->addMonitorStubForValue(cx, frame, types, val); +} + +bool +ICUpdatedStub::initUpdatingChain(JSContext* cx, ICStubSpace* space) +{ + MOZ_ASSERT(firstUpdateStub_ == nullptr); + + ICTypeUpdate_Fallback::Compiler compiler(cx); + ICTypeUpdate_Fallback* stub = compiler.getStub(space); + if (!stub) + return false; + + firstUpdateStub_ = stub; + return true; +} + +JitCode* +ICStubCompiler::getStubCode() +{ + JitRealm* realm = cx->realm()->jitRealm(); + + // Check for existing cached stubcode. + uint32_t stubKey = getKey(); + JitCode* stubCode = realm->getStubCode(stubKey); + if (stubCode) + return stubCode; + + // Compile new stubcode. + JitContext jctx(cx, nullptr); + StackMacroAssembler masm; +#ifndef JS_USE_LINK_REGISTER + // The first value contains the return addres, + // which we pull into ICTailCallReg for tail calls. + masm.adjustFrame(sizeof(intptr_t)); +#endif +#ifdef JS_CODEGEN_ARM + masm.setSecondScratchReg(BaselineSecondScratchReg); +#endif + + if (!generateStubCode(masm)) + return nullptr; + Linker linker(masm); + AutoFlushICache afc("getStubCode"); + Rooted newStubCode(cx, linker.newCode(cx, CodeKind::Baseline)); + if (!newStubCode) + return nullptr; + + // Cache newly compiled stubcode. + if (!realm->putStubCode(cx, stubKey, newStubCode)) + return nullptr; + + // After generating code, run postGenerateStubCode(). We must not fail + // after this point. + postGenerateStubCode(masm, newStubCode); + + MOZ_ASSERT(entersStubFrame_ == ICStub::NonCacheIRStubMakesGCCalls(kind)); + MOZ_ASSERT(!inStubFrame_); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(newStubCode, "BaselineIC"); +#endif + + return newStubCode; +} + +bool +ICStubCompiler::tailCallVM(const VMFunction& fun, MacroAssembler& masm) +{ + TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(fun); + MOZ_ASSERT(fun.expectTailCall == TailCall); + uint32_t argSize = fun.explicitStackSlots() * sizeof(void*); + EmitBaselineTailCallVM(code, masm, argSize); + return true; +} + +bool +ICStubCompiler::callVM(const VMFunction& fun, MacroAssembler& masm) +{ + MOZ_ASSERT(inStubFrame_); + + TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(fun); + MOZ_ASSERT(fun.expectTailCall == NonTailCall); + + EmitBaselineCallVM(code, masm); + return true; +} + +void +ICStubCompiler::enterStubFrame(MacroAssembler& masm, Register scratch) +{ + EmitBaselineEnterStubFrame(masm, scratch); +#ifdef DEBUG + framePushedAtEnterStubFrame_ = masm.framePushed(); +#endif + + MOZ_ASSERT(!inStubFrame_); + inStubFrame_ = true; + +#ifdef DEBUG + entersStubFrame_ = true; +#endif +} + +void +ICStubCompiler::assumeStubFrame() +{ + MOZ_ASSERT(!inStubFrame_); + inStubFrame_ = true; + +#ifdef DEBUG + entersStubFrame_ = true; + + // |framePushed| isn't tracked precisely in ICStubs, so simply assume it to + // be STUB_FRAME_SIZE so that assertions don't fail in leaveStubFrame. + framePushedAtEnterStubFrame_ = STUB_FRAME_SIZE; +#endif +} + +void +ICStubCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon) +{ + MOZ_ASSERT(entersStubFrame_ && inStubFrame_); + inStubFrame_ = false; + +#ifdef DEBUG + masm.setFramePushed(framePushedAtEnterStubFrame_); + if (calledIntoIon) + masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra. +#endif + EmitBaselineLeaveStubFrame(masm, calledIntoIon); +} + +void +ICStubCompiler::pushStubPayload(MacroAssembler& masm, Register scratch) +{ + if (inStubFrame_) { + masm.loadPtr(Address(BaselineFrameReg, 0), scratch); + masm.pushBaselineFramePtr(scratch, scratch); + } else { + masm.pushBaselineFramePtr(BaselineFrameReg, scratch); + } +} + +void +ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch) +{ + pushStubPayload(masm, scratch); + masm.adjustFrame(sizeof(intptr_t)); +} + +// +void +BaselineScript::noteAccessedGetter(uint32_t pcOffset) +{ + ICEntry& entry = icEntryFromPCOffset(pcOffset); + ICFallbackStub* stub = entry.fallbackStub(); + + if (stub->isGetProp_Fallback()) + stub->toGetProp_Fallback()->noteAccessedGetter(); +} + +// TypeMonitor_Fallback +// + +bool +ICTypeMonitor_Fallback::addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, + StackTypeSet* types, HandleValue val) +{ + MOZ_ASSERT(types); + + // Don't attach too many SingleObject/ObjectGroup stubs. If the value is a + // primitive or if we will attach an any-object stub, we can handle this + // with a single PrimitiveSet or AnyValue stub so we always optimize. + if (numOptimizedMonitorStubs_ >= MAX_OPTIMIZED_STUBS && + val.isObject() && + !types->unknownObject()) + { + return true; + } + + bool wasDetachedMonitorChain = lastMonitorStubPtrAddr_ == nullptr; + MOZ_ASSERT_IF(wasDetachedMonitorChain, numOptimizedMonitorStubs_ == 0); + + if (types->unknown()) { + // The TypeSet got marked as unknown so attach a stub that always + // succeeds. + + // Check for existing TypeMonitor_AnyValue stubs. + for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { + if (iter->isTypeMonitor_AnyValue()) + return true; + } + + // Discard existing stubs. + resetMonitorStubChain(cx->zone()); + wasDetachedMonitorChain = (lastMonitorStubPtrAddr_ == nullptr); + + ICTypeMonitor_AnyValue::Compiler compiler(cx); + ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); + if (!stub) { + ReportOutOfMemory(cx); + return false; + } + + JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for any value", stub); + addOptimizedMonitorStub(stub); + + } else if (val.isPrimitive() || types->unknownObject()) { + if (val.isMagic(JS_UNINITIALIZED_LEXICAL)) + return true; + MOZ_ASSERT(!val.isMagic()); + JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType(); + + // Check for existing TypeMonitor stub. + ICTypeMonitor_PrimitiveSet* existingStub = nullptr; + for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { + if (iter->isTypeMonitor_PrimitiveSet()) { + existingStub = iter->toTypeMonitor_PrimitiveSet(); + if (existingStub->containsType(type)) + return true; + } + } + + if (val.isObject()) { + // Check for existing SingleObject/ObjectGroup stubs and discard + // stubs if we find one. Ideally we would discard just these stubs, + // but unlinking individual type monitor stubs is somewhat + // complicated. + MOZ_ASSERT(types->unknownObject()); + bool hasObjectStubs = false; + for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { + if (iter->isTypeMonitor_SingleObject() || iter->isTypeMonitor_ObjectGroup()) { + hasObjectStubs = true; + break; + } + } + if (hasObjectStubs) { + resetMonitorStubChain(cx->zone()); + wasDetachedMonitorChain = (lastMonitorStubPtrAddr_ == nullptr); + existingStub = nullptr; + } + } + + ICTypeMonitor_PrimitiveSet::Compiler compiler(cx, existingStub, type); + ICStub* stub = existingStub + ? compiler.updateStub() + : compiler.getStub(compiler.getStubSpace(frame->script())); + if (!stub) { + ReportOutOfMemory(cx); + return false; + } + + JitSpew(JitSpew_BaselineIC, " %s TypeMonitor stub %p for primitive type %d", + existingStub ? "Modified existing" : "Created new", stub, type); + + if (!existingStub) { + MOZ_ASSERT(!hasStub(TypeMonitor_PrimitiveSet)); + addOptimizedMonitorStub(stub); + } + + } else if (val.toObject().isSingleton()) { + RootedObject obj(cx, &val.toObject()); + + // Check for existing TypeMonitor stub. + for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { + if (iter->isTypeMonitor_SingleObject() && + iter->toTypeMonitor_SingleObject()->object() == obj) + { + return true; + } + } + + ICTypeMonitor_SingleObject::Compiler compiler(cx, obj); + ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); + if (!stub) { + ReportOutOfMemory(cx); + return false; + } + + JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for singleton %p", + stub, obj.get()); + + addOptimizedMonitorStub(stub); + + } else { + RootedObjectGroup group(cx, val.toObject().group()); + + // Check for existing TypeMonitor stub. + for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { + if (iter->isTypeMonitor_ObjectGroup() && + iter->toTypeMonitor_ObjectGroup()->group() == group) + { + return true; + } + } + + ICTypeMonitor_ObjectGroup::Compiler compiler(cx, group); + ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); + if (!stub) { + ReportOutOfMemory(cx); + return false; + } + + JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for ObjectGroup %p", + stub, group.get()); + + addOptimizedMonitorStub(stub); + } + + bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0); + + if (firstMonitorStubAdded) { + // Was an empty monitor chain before, but a new stub was added. This is the + // only time that any main stubs' firstMonitorStub fields need to be updated to + // refer to the newly added monitor stub. + ICStub* firstStub = mainFallbackStub_->icEntry()->firstStub(); + for (ICStubConstIterator iter(firstStub); !iter.atEnd(); iter++) { + // Non-monitored stubs are used if the result has always the same type, + // e.g. a StringLength stub will always return int32. + if (!iter->isMonitored()) + continue; + + // Since we just added the first optimized monitoring stub, any + // existing main stub's |firstMonitorStub| MUST be pointing to the fallback + // monitor stub (i.e. this stub). + MOZ_ASSERT(iter->toMonitoredStub()->firstMonitorStub() == this); + iter->toMonitoredStub()->updateFirstMonitorStub(firstMonitorStub_); + } + } + + return true; +} + +static bool +DoTypeMonitorFallback(JSContext* cx, BaselineFrame* frame, ICTypeMonitor_Fallback* stub, + HandleValue value, MutableHandleValue res) +{ + JSScript* script = frame->script(); + jsbytecode* pc = stub->icEntry()->pc(script); + TypeFallbackICSpew(cx, stub, "TypeMonitor"); + + // Copy input value to res. + res.set(value); + + if (MOZ_UNLIKELY(value.isMagic())) { + // It's possible that we arrived here from bailing out of Ion, and that + // Ion proved that the value is dead and optimized out. In such cases, + // do nothing. However, it's also possible that we have an uninitialized + // this, in which case we should not look for other magic values. + + if (value.whyMagic() == JS_OPTIMIZED_OUT) { + MOZ_ASSERT(!stub->monitorsThis()); + return true; + } + + // In derived class constructors (including nested arrows/eval), the + // |this| argument or GETALIASEDVAR can return the magic TDZ value. + MOZ_ASSERT(value.isMagic(JS_UNINITIALIZED_LEXICAL)); + MOZ_ASSERT(frame->isFunctionFrame() || frame->isEvalFrame()); + MOZ_ASSERT(stub->monitorsThis() || + *GetNextPc(pc) == JSOP_CHECKTHIS || + *GetNextPc(pc) == JSOP_CHECKTHISREINIT || + *GetNextPc(pc) == JSOP_CHECKRETURN); + if (stub->monitorsThis()) + TypeScript::SetThis(cx, script, TypeSet::UnknownType()); + else + TypeScript::Monitor(cx, script, pc, TypeSet::UnknownType()); + return true; + } + + StackTypeSet* types; + uint32_t argument; + if (stub->monitorsArgument(&argument)) { + MOZ_ASSERT(pc == script->code()); + types = TypeScript::ArgTypes(script, argument); + TypeScript::SetArgument(cx, script, argument, value); + } else if (stub->monitorsThis()) { + MOZ_ASSERT(pc == script->code()); + types = TypeScript::ThisTypes(script); + TypeScript::SetThis(cx, script, value); + } else { + types = TypeScript::BytecodeTypes(script, pc); + TypeScript::Monitor(cx, script, pc, types, value); + } + + if (MOZ_UNLIKELY(stub->invalid())) + return true; + + return stub->addMonitorStubForValue(cx, frame, types, value); +} + +typedef bool (*DoTypeMonitorFallbackFn)(JSContext*, BaselineFrame*, ICTypeMonitor_Fallback*, + HandleValue, MutableHandleValue); +static const VMFunction DoTypeMonitorFallbackInfo = + FunctionInfo(DoTypeMonitorFallback, "DoTypeMonitorFallback", + TailCall); + +bool +ICTypeMonitor_Fallback::Compiler::generateStubCode(MacroAssembler& masm) +{ + MOZ_ASSERT(R0 == JSReturnOperand); + + // Restore the tail call register. + EmitRestoreTailCallReg(masm); + + masm.pushValue(R0); + masm.push(ICStubReg); + masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); + + return tailCallVM(DoTypeMonitorFallbackInfo, masm); +} + +bool +ICTypeMonitor_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label success; + if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))) + masm.branchTestInt32(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)) + masm.branchTestNumber(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED)) + masm.branchTestUndefined(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN)) + masm.branchTestBoolean(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_STRING)) + masm.branchTestString(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL)) + masm.branchTestSymbol(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)) + masm.branchTestObject(Assembler::Equal, R0, &success); + + if (flags_ & TypeToFlag(JSVAL_TYPE_NULL)) + masm.branchTestNull(Assembler::Equal, R0, &success); + + EmitStubGuardFailure(masm); + + masm.bind(&success); + EmitReturnFromIC(masm); + return true; +} + +static void +MaybeWorkAroundAmdBug(MacroAssembler& masm) +{ + // Attempt to work around an AMD bug (see bug 1034706 and bug 1281759), by + // inserting 32-bytes of NOPs. +#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + if (CPUInfo::NeedAmdBugWorkaround()) { + masm.nop(9); + masm.nop(9); + masm.nop(9); + masm.nop(5); + } +#endif +} + +bool +ICTypeMonitor_SingleObject::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure; + masm.branchTestObject(Assembler::NotEqual, R0, &failure); + MaybeWorkAroundAmdBug(masm); + + // Guard on the object's identity. + Register obj = masm.extractObject(R0, ExtractTemp0); + Address expectedObject(ICStubReg, ICTypeMonitor_SingleObject::offsetOfObject()); + masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure); + MaybeWorkAroundAmdBug(masm); + + EmitReturnFromIC(masm); + MaybeWorkAroundAmdBug(masm); + + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; +} + +bool +ICTypeMonitor_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure; + masm.branchTestObject(Assembler::NotEqual, R0, &failure); + MaybeWorkAroundAmdBug(masm); + + // Guard on the object's ObjectGroup. No Spectre mitigations are needed + // here: we're just recording type information for Ion compilation and + // it's safe to speculatively return. + Register obj = masm.extractObject(R0, ExtractTemp0); + Address expectedGroup(ICStubReg, ICTypeMonitor_ObjectGroup::offsetOfGroup()); + masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, expectedGroup, + R1.scratchReg(), &failure); + MaybeWorkAroundAmdBug(masm); + + EmitReturnFromIC(masm); + MaybeWorkAroundAmdBug(masm); + + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; +} + +bool +ICTypeMonitor_AnyValue::Compiler::generateStubCode(MacroAssembler& masm) +{ + EmitReturnFromIC(masm); + return true; +} + +bool +ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, HandleObject obj, + HandleObjectGroup group, HandleId id, HandleValue val) +{ + EnsureTrackPropertyTypes(cx, obj, id); + + // Make sure that undefined values are explicitly included in the property + // types for an object if generating a stub to write an undefined value. + if (val.isUndefined() && CanHaveEmptyPropertyTypesForOwnProperty(obj)) { + MOZ_ASSERT(obj->group() == group); + AddTypePropertyId(cx, obj, id, val); + } + + bool unknown = false, unknownObject = false; + AutoSweepObjectGroup sweep(group); + if (group->unknownProperties(sweep)) { + unknown = unknownObject = true; + } else { + if (HeapTypeSet* types = group->maybeGetProperty(sweep, id)) { + unknown = types->unknown(); + unknownObject = types->unknownObject(); + } else { + // We don't record null/undefined types for certain TypedObject + // properties. In these cases |types| is allowed to be nullptr + // without implying unknown types. See DoTypeUpdateFallback. + MOZ_ASSERT(obj->is()); + MOZ_ASSERT(val.isNullOrUndefined()); + } + } + MOZ_ASSERT_IF(unknown, unknownObject); + + // Don't attach too many SingleObject/ObjectGroup stubs unless we can + // replace them with a single PrimitiveSet or AnyValue stub. + if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS && + val.isObject() && + !unknownObject) + { + return true; + } + + if (unknown) { + // Attach a stub that always succeeds. We should not have a + // TypeUpdate_AnyValue stub yet. + MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_AnyValue)); + + // Discard existing stubs. + resetUpdateStubChain(cx->zone()); + + ICTypeUpdate_AnyValue::Compiler compiler(cx); + ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); + if (!stub) + return false; + + JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for any value", stub); + addOptimizedUpdateStub(stub); + + } else if (val.isPrimitive() || unknownObject) { + JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType(); + + // Check for existing TypeUpdate stub. + ICTypeUpdate_PrimitiveSet* existingStub = nullptr; + for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { + if (iter->isTypeUpdate_PrimitiveSet()) { + existingStub = iter->toTypeUpdate_PrimitiveSet(); + MOZ_ASSERT(!existingStub->containsType(type)); + } + } + + if (val.isObject()) { + // Discard existing ObjectGroup/SingleObject stubs. + resetUpdateStubChain(cx->zone()); + if (existingStub) + addOptimizedUpdateStub(existingStub); + } + + ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type); + ICStub* stub = existingStub ? compiler.updateStub() + : compiler.getStub(compiler.getStubSpace(outerScript)); + if (!stub) + return false; + if (!existingStub) { + MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet)); + addOptimizedUpdateStub(stub); + } + + JitSpew(JitSpew_BaselineIC, " %s TypeUpdate stub %p for primitive type %d", + existingStub ? "Modified existing" : "Created new", stub, type); + + } else if (val.toObject().isSingleton()) { + RootedObject obj(cx, &val.toObject()); + +#ifdef DEBUG + // We should not have a stub for this object. + for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { + MOZ_ASSERT_IF(iter->isTypeUpdate_SingleObject(), + iter->toTypeUpdate_SingleObject()->object() != obj); + } +#endif + + ICTypeUpdate_SingleObject::Compiler compiler(cx, obj); + ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); + if (!stub) + return false; + + JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for singleton %p", stub, obj.get()); + + addOptimizedUpdateStub(stub); + + } else { + RootedObjectGroup group(cx, val.toObject().group()); + +#ifdef DEBUG + // We should not have a stub for this group. + for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { + MOZ_ASSERT_IF(iter->isTypeUpdate_ObjectGroup(), + iter->toTypeUpdate_ObjectGroup()->group() != group); + } +#endif + + ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group); + ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); + if (!stub) + return false; + + JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for ObjectGroup %p", + stub, group.get()); + + addOptimizedUpdateStub(stub); + } + + return true; +} + // // TypeUpdate_Fallback // @@ -346,8 +1424,6 @@ const VMFunction DoTypeUpdateFallbackInfo = bool ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // Just store false into R1.scratchReg() and return. masm.move32(Imm32(0), R1.scratchReg()); EmitReturnFromIC(masm); @@ -357,8 +1433,6 @@ ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler& masm) bool ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label success; if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))) masm.branchTestInt32(Assembler::Equal, R0, &success); @@ -397,8 +1471,6 @@ ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm) bool ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; masm.branchTestObject(Assembler::NotEqual, R0, &failure); @@ -419,8 +1491,6 @@ ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler& masm) bool ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; masm.branchTestObject(Assembler::NotEqual, R0, &failure); @@ -469,14 +1539,13 @@ DoToBoolFallback(JSContext* cx, BaselineFrame* frame, ICToBool_Fallback* stub, H RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; ToBoolIRGenerator gen(cx, script, pc, stub->state().mode(), arg); bool attached = false; if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached ToBool CacheIR stub, attached is now %d", attached); } @@ -497,7 +1566,6 @@ static const VMFunction fun = FunctionInfo(DoToBoolFallback, "DoToBoolFallba bool ICToBool_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); // Restore the tail call register. @@ -532,7 +1600,6 @@ static const VMFunction DoToNumberFallbackInfo = bool ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); // Restore the tail call register. @@ -548,6 +1615,28 @@ ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler& masm) return tailCallVM(DoToNumberFallbackInfo, masm); } +static void +StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub) +{ + // Before the new script properties analysis has been performed on a type, + // all instances of that type have the maximum number of fixed slots. + // Afterwards, the objects (even the preliminary ones) might be changed + // to reduce the number of fixed slots they have. If we generate stubs for + // both the old and new number of fixed slots, the stub will look + // polymorphic to IonBuilder when it is actually monomorphic. To avoid + // this, strip out any stubs for preliminary objects before attaching a new + // stub which isn't on a preliminary object. + + for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) { + if (iter->isCacheIR_Regular() && iter->toCacheIR_Regular()->hasPreliminaryObject()) + iter.unlink(cx); + else if (iter->isCacheIR_Monitored() && iter->toCacheIR_Monitored()->hasPreliminaryObject()) + iter.unlink(cx); + else if (iter->isCacheIR_Updated() && iter->toCacheIR_Updated()->hasPreliminaryObject()) + iter.unlink(cx); + } +} + // // GetElem_Fallback // @@ -587,7 +1676,6 @@ DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_ stub->discardStubs(cx); if (stub->state().canAttachStub()) { - ICStubEngine engine = ICStubEngine::Baseline; GetPropIRGenerator gen(cx, script, pc, CacheKind::GetElem, stub->state().mode(), &isTemporarilyUnoptimizable, lhs, rhs, lhs, @@ -595,7 +1683,7 @@ DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_ if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Monitored, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached GetElem CacheIR stub"); if (gen.shouldNotePreliminaryObjectStub()) @@ -661,14 +1749,13 @@ DoGetElemSuperFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub->discardStubs(cx); if (stub->state().canAttachStub()) { - ICStubEngine engine = ICStubEngine::Baseline; GetPropIRGenerator gen(cx, script, pc, CacheKind::GetElemSuper, stub->state().mode(), &isTemporarilyUnoptimizable, lhs, rhs, receiver, GetPropertyResultFlags::All); if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Monitored, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached GetElemSuper CacheIR stub"); if (gen.shouldNotePreliminaryObjectStub()) @@ -726,7 +1813,6 @@ static const VMFunction DoGetElemSuperFallbackInfo = bool ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); // Restore the tail call register. @@ -765,22 +1851,6 @@ ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm) return tailCallVM(DoGetElemFallbackInfo, masm); } -void -LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result) -{ - switch (layout) { - case Layout_TypedArray: - masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result); - break; - case Layout_OutlineTypedObject: - case Layout_InlineTypedObject: - masm.loadTypedObjectLength(obj, result); - break; - default: - MOZ_CRASH(); - } -} - static void SetUpdateStubData(ICCacheIR_Updated* stub, const PropertyTypeCheckInfo* info) { @@ -837,8 +1907,7 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_ if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Updated, - ICStubEngine::Baseline, frame->script(), - stub, &attached); + frame->script(), stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached SetElem CacheIR stub"); @@ -901,8 +1970,7 @@ DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_ if (gen.tryAttachAddSlotStub(oldGroup, oldShape)) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Updated, - ICStubEngine::Baseline, frame->script(), - stub, &attached); + frame->script(), stub, &attached); if (newStub) { if (gen.shouldNotePreliminaryObjectStub()) newStub->toCacheIR_Updated()->notePreliminaryObject(); @@ -932,7 +2000,6 @@ static const VMFunction DoSetElemFallbackInfo = bool ICSetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); EmitRestoreTailCallReg(masm); @@ -1093,13 +2160,12 @@ DoInFallback(JSContext* cx, BaselineFrame* frame, ICIn_Fallback* stub_, RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; HasPropIRGenerator gen(cx, script, pc, CacheKind::In, stub->state().mode(), key, objValue); bool attached = false; if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached In CacheIR stub"); } @@ -1124,8 +2190,6 @@ static const VMFunction DoInFallbackInfo = bool ICIn_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); // Sync for the decompiler. @@ -1161,14 +2225,13 @@ DoHasOwnFallback(JSContext* cx, BaselineFrame* frame, ICHasOwn_Fallback* stub_, RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; HasPropIRGenerator gen(cx, script, pc, CacheKind::HasOwn, stub->state().mode(), keyValue, objValue); bool attached = false; if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached HasOwn CacheIR stub"); } @@ -1192,8 +2255,6 @@ static const VMFunction DoHasOwnFallbackInfo = bool ICHasOwn_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); // Sync for the decompiler. @@ -1235,12 +2296,11 @@ DoGetNameFallback(JSContext* cx, BaselineFrame* frame, ICGetName_Fallback* stub_ stub->discardStubs(cx); if (stub->state().canAttachStub()) { - ICStubEngine engine = ICStubEngine::Baseline; GetNameIRGenerator gen(cx, script, pc, stub->state().mode(), envChain, name); if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Monitored, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached GetName CacheIR stub"); } @@ -1282,7 +2342,6 @@ static const VMFunction DoGetNameFallbackInfo = bool ICGetName_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); EmitRestoreTailCallReg(masm); @@ -1320,8 +2379,7 @@ DoBindNameFallback(JSContext* cx, BaselineFrame* frame, ICBindName_Fallback* stu if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline, script, stub, - &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached BindName CacheIR stub"); } @@ -1345,7 +2403,6 @@ static const VMFunction DoBindNameFallbackInfo = bool ICBindName_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); EmitRestoreTailCallReg(masm); @@ -1398,8 +2455,7 @@ DoGetIntrinsicFallback(JSContext* cx, BaselineFrame* frame, ICGetIntrinsic_Fallb if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline, script, stub, - &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached GetIntrinsic CacheIR stub"); } @@ -1419,8 +2475,6 @@ static const VMFunction DoGetIntrinsicFallbackInfo = bool ICGetIntrinsic_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); masm.push(ICStubReg); @@ -1433,28 +2487,6 @@ ICGetIntrinsic_Fallback::Compiler::generateStubCode(MacroAssembler& masm) // GetProp_Fallback // -void -StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub) -{ - // Before the new script properties analysis has been performed on a type, - // all instances of that type have the maximum number of fixed slots. - // Afterwards, the objects (even the preliminary ones) might be changed - // to reduce the number of fixed slots they have. If we generate stubs for - // both the old and new number of fixed slots, the stub will look - // polymorphic to IonBuilder when it is actually monomorphic. To avoid - // this, strip out any stubs for preliminary objects before attaching a new - // stub which isn't on a preliminary object. - - for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) { - if (iter->isCacheIR_Regular() && iter->toCacheIR_Regular()->hasPreliminaryObject()) - iter.unlink(cx); - else if (iter->isCacheIR_Monitored() && iter->toCacheIR_Monitored()->hasPreliminaryObject()) - iter.unlink(cx); - else if (iter->isCacheIR_Updated() && iter->toCacheIR_Updated()->hasPreliminaryObject()) - iter.unlink(cx); - } -} - static bool ComputeGetPropResult(JSContext* cx, BaselineFrame* frame, JSOp op, HandlePropertyName name, MutableHandleValue val, MutableHandleValue res) @@ -1522,8 +2554,7 @@ DoGetPropFallback(JSContext* cx, BaselineFrame* frame, ICGetProp_Fallback* stub_ if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Monitored, - ICStubEngine::Baseline, script, - stub, &attached); + script, stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub"); if (gen.shouldNotePreliminaryObjectStub()) @@ -1593,8 +2624,7 @@ DoGetPropSuperFallback(JSContext* cx, BaselineFrame* frame, ICGetProp_Fallback* if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Monitored, - ICStubEngine::Baseline, script, - stub, &attached); + script, stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub"); if (gen.shouldNotePreliminaryObjectStub()) @@ -1697,12 +2727,10 @@ ICGetProp_Fallback::Compiler::generateStubCode(MacroAssembler& masm) void ICGetProp_Fallback::Compiler::postGenerateStubCode(MacroAssembler& masm, Handle code) { - if (engine_ == Engine::Baseline) { - BailoutReturnStub kind = hasReceiver_ ? BailoutReturnStub::GetPropSuper - : BailoutReturnStub::GetProp; - void* address = code->raw() + bailoutReturnOffset_.offset(); - cx->realm()->jitRealm()->initBailoutReturnAddr(address, getKey(), kind); - } + BailoutReturnStub kind = hasReceiver_ ? BailoutReturnStub::GetPropSuper + : BailoutReturnStub::GetProp; + void* address = code->raw() + bailoutReturnOffset_.offset(); + cx->realm()->jitRealm()->initBailoutReturnAddr(address, getKey(), kind); } // @@ -1772,8 +2800,7 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_ if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Updated, - ICStubEngine::Baseline, frame->script(), - stub, &attached); + frame->script(), stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached SetProp CacheIR stub"); @@ -1844,8 +2871,7 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_ if (gen.tryAttachAddSlotStub(oldGroup, oldShape)) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Updated, - ICStubEngine::Baseline, frame->script(), - stub, &attached); + frame->script(), stub, &attached); if (newStub) { if (gen.shouldNotePreliminaryObjectStub()) newStub->toCacheIR_Updated()->notePreliminaryObject(); @@ -1877,7 +2903,6 @@ static const VMFunction DoSetPropFallbackInfo = bool ICSetProp_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); MOZ_ASSERT(R0 == JSReturnOperand); EmitRestoreTailCallReg(masm); @@ -2533,9 +3558,8 @@ DoCallFallback(JSContext* cx, BaselineFrame* frame, ICCall_Fallback* stub_, uint HandleValueArray::fromMarkedLocation(argc, vp+2)); if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), - gen.cacheIRStubKind(), - ICStubEngine::Baseline, - script, stub, &handled); + gen.cacheIRStubKind(), script, + stub, &handled); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached Call CacheIR stub"); @@ -2973,8 +3997,6 @@ static const VMFunction DoSpreadCallFallbackInfo = bool ICCall_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - MOZ_ASSERT(R0 == JSReturnOperand); // Values are on the stack left-to-right. Calling convention wants them @@ -3101,8 +4123,6 @@ static const VMFunction CreateThisInfoBaseline = bool ICCallScriptedCompiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); bool canUseTailCallReg = regs.has(ICTailCallReg); @@ -3369,8 +4389,6 @@ static const VMFunction CopyStringSplitArrayInfo = bool ICCall_ConstStringSplit::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // Stack Layout: [ ..., CalleeVal, ThisVal, strVal, sepVal, +ICStackValueOffset+ ] static const size_t SEP_DEPTH = 0; static const size_t STR_DEPTH = sizeof(Value); @@ -3469,8 +4487,6 @@ ICCall_ConstStringSplit::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_IsSuspendedGenerator::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // The IsSuspendedGenerator intrinsic is only called in self-hosted code, // so it's safe to assume we have a single argument and the callee is our // intrinsic. @@ -3514,8 +4530,6 @@ ICCall_IsSuspendedGenerator::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_Native::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); @@ -3627,8 +4641,6 @@ ICCall_Native::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); @@ -3718,8 +4730,6 @@ ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_ScriptedApplyArray::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); @@ -3816,8 +4826,6 @@ ICCall_ScriptedApplyArray::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_ScriptedApplyArguments::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); @@ -3908,8 +4916,6 @@ ICCall_ScriptedApplyArguments::Compiler::generateStubCode(MacroAssembler& masm) bool ICCall_ScriptedFunCall::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; AllocatableGeneralRegisterSet regs(availableGeneralRegs(0)); bool canUseTailCallReg = regs.has(ICTailCallReg); @@ -4042,8 +5048,6 @@ DoubleValueToInt32ForSwitch(Value* v) bool ICTableSwitch::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label isInt32, notInt32, outOfRange; Register scratch = R1.scratchReg(); @@ -4160,13 +5164,12 @@ DoGetIteratorFallback(JSContext* cx, BaselineFrame* frame, ICGetIterator_Fallbac RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; GetIteratorIRGenerator gen(cx, script, pc, stub->state().mode(), value); bool attached = false; if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached GetIterator CacheIR stub"); } @@ -4191,8 +5194,6 @@ static const VMFunction DoGetIteratorFallbackInfo = bool ICGetIterator_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); // Sync stack for the decompiler. @@ -4250,8 +5251,6 @@ static const VMFunction DoIteratorMoreFallbackInfo = bool ICIteratorMore_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); masm.unboxObject(R0, R0.scratchReg()); @@ -4269,8 +5268,6 @@ ICIteratorMore_Fallback::Compiler::generateStubCode(MacroAssembler& masm) bool ICIteratorMore_Native::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - Label failure; Register obj = masm.extractObject(R0, ExtractTemp0); @@ -4330,8 +5327,6 @@ static const VMFunction DoIteratorCloseFallbackInfo = bool ICIteratorClose_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); masm.pushValue(R0); @@ -4358,7 +5353,6 @@ TryAttachInstanceOfStub(JSContext* cx, BaselineFrame* frame, ICInstanceOf_Fallba RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; InstanceOfIRGenerator gen(cx, script, pc, stub->state().mode(), lhs, rhs); @@ -4366,7 +5360,7 @@ TryAttachInstanceOfStub(JSContext* cx, BaselineFrame* frame, ICInstanceOf_Fallba if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, attached); + script, stub, attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached InstanceOf CacheIR stub, attached is now %d", *attached); } @@ -4382,7 +5376,7 @@ DoInstanceOfFallback(JSContext* cx, BaselineFrame* frame, ICInstanceOf_Fallback* HandleValue lhs, HandleValue rhs, MutableHandleValue res) { // This fallback stub may trigger debug mode toggling. - DebugModeOSRVolatileStub stub(ICStubEngine::Baseline, frame, stub_); + DebugModeOSRVolatileStub stub(frame, stub_); FallbackICSpew(cx, stub, "InstanceOf"); @@ -4428,8 +5422,6 @@ static const VMFunction DoInstanceOfFallbackInfo = bool ICInstanceOf_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); // Sync stack for the decompiler. @@ -4461,13 +5453,12 @@ DoTypeOfFallback(JSContext* cx, BaselineFrame* frame, ICTypeOf_Fallback* stub, H RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); - ICStubEngine engine = ICStubEngine::Baseline; TypeOfIRGenerator gen(cx, script, pc, stub->state().mode(), val); bool attached = false; if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - engine, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached TypeOf CacheIR stub"); } @@ -4489,8 +5480,6 @@ static const VMFunction DoTypeOfFallbackInfo = bool ICTypeOf_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); masm.pushValue(R0); @@ -4542,8 +5531,6 @@ static const VMFunction ThrowInfoBaseline = bool ICRetSub_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // If R0 is BooleanValue(true), rethrow R1. Label rethrow; masm.branchTestBooleanTruthy(true, R0, &rethrow); @@ -4578,8 +5565,6 @@ ICRetSub_Fallback::Compiler::generateStubCode(MacroAssembler& masm) bool ICRetSub_Resume::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - // If R0 is BooleanValue(true), rethrow R1. Label fail, rethrow; masm.branchTestBooleanTruthy(true, R0, &rethrow); @@ -4761,8 +5746,6 @@ static const VMFunction DoRestFallbackInfo = bool ICRest_Fallback::Compiler::generateStubCode(MacroAssembler& masm) { - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitRestoreTailCallReg(masm); masm.push(ICStubReg); @@ -4780,7 +5763,7 @@ DoUnaryArithFallback(JSContext* cx, BaselineFrame* frame, ICUnaryArith_Fallback* HandleValue val, MutableHandleValue res) { // This fallback stub may trigger debug mode toggling. - DebugModeOSRVolatileStub debug_stub(ICStubEngine::Baseline, frame, stub); + DebugModeOSRVolatileStub debug_stub(frame, stub); RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); @@ -4823,7 +5806,7 @@ DoUnaryArithFallback(JSContext* cx, BaselineFrame* frame, ICUnaryArith_Fallback* bool attached = false; ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline, script, stub, &attached); + script, stub, &attached); if (newStub) { JitSpew(JitSpew_BaselineIC, " Attached UnaryArith CacheIR stub for %s", CodeName[op]); } @@ -4867,7 +5850,7 @@ DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame, ICBinaryArith_Fallbac HandleValue lhs, HandleValue rhs, MutableHandleValue ret) { // This fallback stub may trigger debug mode toggling. - DebugModeOSRVolatileStub stub(ICStubEngine::Baseline, frame, stub_); + DebugModeOSRVolatileStub stub(frame, stub_); RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); @@ -4976,7 +5959,7 @@ DoBinaryArithFallback(JSContext* cx, BaselineFrame* frame, ICBinaryArith_Fallbac bool attached = false; ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached BinaryArith CacheIR stub for %s", CodeName[op]); @@ -5022,7 +6005,7 @@ DoCompareFallback(JSContext* cx, BaselineFrame* frame, ICCompare_Fallback* stub_ HandleValue rhs, MutableHandleValue ret) { // This fallback stub may trigger debug mode toggling. - DebugModeOSRVolatileStub stub(ICStubEngine::Baseline, frame, stub_); + DebugModeOSRVolatileStub stub(frame, stub_); RootedScript script(cx, frame->script()); jsbytecode* pc = stub->icEntry()->pc(script); @@ -5098,7 +6081,7 @@ DoCompareFallback(JSContext* cx, BaselineFrame* frame, ICCompare_Fallback* stub_ if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline, script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " Attached CacheIR stub"); return true; @@ -5192,10 +6175,8 @@ ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler& masm) // NewObject_Fallback // static bool -DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHandleValue res) +DoNewObject(JSContext* cx, BaselineFrame* frame, ICNewObject_Fallback* stub, MutableHandleValue res) { - SharedStubInfo info(cx, payload, stub->icEntry()); - FallbackICSpew(cx, stub, "NewObject"); RootedObject obj(cx); @@ -5205,8 +6186,8 @@ DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHan MOZ_ASSERT(!templateObject->group()->maybePreliminaryObjectsDontCheckGeneration()); obj = NewObjectOperationWithTemplate(cx, templateObject); } else { - HandleScript script = info.script(); - jsbytecode* pc = info.pc(); + RootedScript script(cx, frame->script()); + jsbytecode* pc = stub->icEntry()->pc(script); obj = NewObjectOperation(cx, script, pc); if (obj && !obj->isSingleton() && @@ -5218,12 +6199,11 @@ DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHan if (!JitOptions.disableCacheIR) { bool attached = false; - RootedScript script(cx, info.outerScript(cx)); NewObjectIRGenerator gen(cx, script, pc, stub->state().mode(), JSOp(*pc), templateObject); if (gen.tryAttachStub()) { ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(), BaselineCacheIRStubKind::Regular, - ICStubEngine::Baseline , script, stub, &attached); + script, stub, &attached); if (newStub) JitSpew(JitSpew_BaselineIC, " NewObject Attached CacheIR stub"); } @@ -5239,7 +6219,7 @@ DoNewObject(JSContext* cx, void* payload, ICNewObject_Fallback* stub, MutableHan return true; } -typedef bool(*DoNewObjectFn)(JSContext*, void*, ICNewObject_Fallback*, MutableHandleValue); +typedef bool(*DoNewObjectFn)(JSContext*, BaselineFrame*, ICNewObject_Fallback*, MutableHandleValue); static const VMFunction DoNewObjectInfo = FunctionInfo(DoNewObject, "DoNewObject", TailCall); diff --git a/js/src/jit/BaselineIC.h b/js/src/jit/BaselineIC.h index 8eac9cf79ff9..53139bc393ca 100644 --- a/js/src/jit/BaselineIC.h +++ b/js/src/jit/BaselineIC.h @@ -11,9 +11,10 @@ #include "builtin/TypedObject.h" #include "gc/Barrier.h" +#include "gc/GC.h" #include "jit/BaselineICList.h" #include "jit/BaselineJIT.h" -#include "jit/SharedIC.h" +#include "jit/ICState.h" #include "jit/SharedICRegisters.h" #include "js/GCVector.h" #include "vm/ArrayObject.h" @@ -25,6 +26,1092 @@ namespace js { namespace jit { +// [SMDOC] JIT Inline Caches (ICs) +// +// Baseline Inline Caches are polymorphic caches that aggressively +// share their stub code. +// +// Every polymorphic site contains a linked list of stubs which are +// specific to that site. These stubs are composed of a |StubData| +// structure that stores parametrization information (e.g. +// the shape pointer for a shape-check-and-property-get stub), any +// dynamic information (e.g. warm-up counters), a pointer to the stub code, +// and a pointer to the next stub state in the linked list. +// +// Every BaselineScript keeps an table of |CacheDescriptor| data +// structures, which store the following: +// A pointer to the first StubData in the cache. +// The bytecode PC of the relevant IC. +// The machine-code PC where the call to the stubcode returns. +// +// A diagram: +// +// Control flow Pointers +// =======# ----. .----> +// # | | +// #======> \-----/ +// +// +// .---------------------------------------. +// | .-------------------------. | +// | | .----. | | +// Baseline | | | | | | +// JIT Code 0 ^ 1 ^ 2 ^ | | | +// +--------------+ .-->+-----+ +-----+ +-----+ | | | +// | | #=|==>| |==>| |==>| FB | | | | +// | | # | +-----+ +-----+ +-----+ | | | +// | | # | # # # | | | +// |==============|==# | # # # | | | +// |=== IC =======| | # # # | | | +// .->|==============|<===|======#=========#=========# | | | +// | | | | | | | +// | | | | | | | +// | | | | | | | +// | | | | v | | +// | | | | +---------+ | | +// | | | | | Fallback| | | +// | | | | | Stub | | | +// | | | | | Code | | | +// | | | | +---------+ | | +// | +--------------+ | | | +// | |_______ | +---------+ | | +// | | | | Stub |<---/ | +// | IC | \--. | Code | | +// | Descriptor | | +---------+ | +// | Table v | | +// | +-----------------+ | +---------+ | +// \--| Ins | PC | Stub |----/ | Stub |<-------/ +// +-----------------+ | Code | +// | ... | +---------+ +// +-----------------+ +// Shared +// Stub Code +// +// +// Type ICs +// ======== +// +// Type ICs are otherwise regular ICs that are actually nested within +// other IC chains. They serve to optimize locations in the code where the +// baseline compiler would have otherwise had to perform a type Monitor operation +// (e.g. the result of GetProp, GetElem, etc.), or locations where the baseline +// compiler would have had to modify a heap typeset using the type of an input +// value (e.g. SetProp, SetElem, etc.) +// +// There are two kinds of Type ICs: Monitor and Update. +// +// Note that type stub bodies are no-ops. The stubs only exist for their +// guards, and their existence simply signifies that the typeset (implicit) +// that is being checked already contains that type. +// +// TypeMonitor ICs +// --------------- +// Monitor ICs are shared between stubs in the general IC, and monitor the resulting +// types of getter operations (call returns, getprop outputs, etc.) +// +// +-----------+ +-----------+ +-----------+ +-----------+ +// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub | +// +-----------+ +-----------+ +-----------+ +-----------+ +// | | | | +// |------------------/-----------------/ | +// v | +// +-----------+ +-----------+ +-----------+ | +// | Type 1 |---->| Type 2 |---->| Type FB | | +// +-----------+ +-----------+ +-----------+ | +// | | | | +// <----------/-----------------/------------------/------------------/ +// r e t u r n p a t h +// +// After an optimized IC stub successfully executes, it passes control to the type stub +// chain to check the resulting type. If no type stub succeeds, and the monitor fallback +// stub is reached, the monitor fallback stub performs a manual monitor, and also adds the +// appropriate type stub to the chain. +// +// The IC's main fallback, in addition to generating new mainline stubs, also generates +// type stubs as reflected by its returned value. +// +// NOTE: The type IC chain returns directly to the mainline code, not back to the +// stub it was entered from. Thus, entering a type IC is a matter of a |jump|, not +// a |call|. This allows us to safely call a VM Monitor function from within the monitor IC's +// fallback chain, since the return address (needed for stack inspection) is preserved. +// +// +// TypeUpdate ICs +// -------------- +// Update ICs update heap typesets and monitor the input types of setter operations +// (setelem, setprop inputs, etc.). Unlike monitor ICs, they are not shared +// between stubs on an IC, but instead are kept track of on a per-stub basis. +// +// This is because the main stubs for the operation will each identify a potentially +// different ObjectGroup to update. New input types must be tracked on a group-to- +// group basis. +// +// Type-update ICs cannot be called in tail position (they must return to the +// the stub that called them so that the stub may continue to perform its original +// purpose). This means that any VMCall to perform a manual type update from C++ must be +// done from within the main IC stub. This necessitates that the stub enter a +// "BaselineStub" frame before making the call. +// +// If the type-update IC chain could itself make the VMCall, then the BaselineStub frame +// must be entered before calling the type-update chain, and exited afterward. This +// is very expensive for a common case where we expect the type-update fallback to not +// be called. To avoid the cost of entering and exiting a BaselineStub frame when +// using the type-update IC chain, we design the chain to not perform any VM-calls +// in its fallback. +// +// Instead, the type-update IC chain is responsible for returning 1 or 0, depending +// on if a type is represented in the chain or not. The fallback stub simply returns +// 0, and all other optimized stubs return 1. +// If the chain returns 1, then the IC stub goes ahead and performs its operation. +// If the chain returns 0, then the IC stub performs a call to the fallback function +// inline (doing the requisite BaselineStub frame enter/exit). +// This allows us to avoid the expensive subfram enter/exit in the common case. +// +// r e t u r n p a t h +// <--------------.-----------------.-----------------.-----------------. +// | | | | +// +-----------+ +-----------+ +-----------+ +-----------+ +// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub | +// +-----------+ +-----------+ +-----------+ +-----------+ +// | ^ | ^ | ^ +// | | | | | | +// | | | | | |----------------. +// | | | | v |1 |0 +// | | | | +-----------+ +-----------+ +// | | | | | Type 3.1 |--->| FB 3 | +// | | | | +-----------+ +-----------+ +// | | | | +// | | | \-------------.-----------------. +// | | | | | | +// | | v |1 |1 |0 +// | | +-----------+ +-----------+ +-----------+ +// | | | Type 2.1 |---->| Type 2.2 |---->| FB 2 | +// | | +-----------+ +-----------+ +-----------+ +// | | +// | \-------------.-----------------. +// | | | | +// v |1 |1 |0 +// +-----------+ +-----------+ +-----------+ +// | Type 1.1 |---->| Type 1.2 |---->| FB 1 | +// +-----------+ +-----------+ +-----------+ +// + +class ICStub; +class ICFallbackStub; + + +#define FORWARD_DECLARE_STUBS(kindName) class IC##kindName; + IC_BASELINE_STUB_KIND_LIST(FORWARD_DECLARE_STUBS) +#undef FORWARD_DECLARE_STUBS + +#ifdef JS_JITSPEW +void FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...) + MOZ_FORMAT_PRINTF(3, 4); +void TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...) + MOZ_FORMAT_PRINTF(3, 4); +#else +#define FallbackICSpew(...) +#define TypeFallbackICSpew(...) +#endif + +// +// An entry in the JIT IC descriptor table. +// +class ICEntry +{ + private: + // A pointer to the shared IC stub for this instruction. + ICStub* firstStub_; + + // Offset from the start of the JIT code where the IC + // load and call instructions are. + uint32_t returnOffset_; + + // The PC of this IC's bytecode op within the JSScript. + uint32_t pcOffset_ : 28; + + public: + enum Kind { + // A for-op IC entry. + Kind_Op = 0, + + // A non-op IC entry. + Kind_NonOp, + + // A fake IC entry for returning from a callVM for an op. + Kind_CallVM, + + // A fake IC entry for returning from a callVM not for an op (e.g., in + // the prologue). + Kind_NonOpCallVM, + + // A fake IC entry for returning from a callVM to after the + // warmup counter. + Kind_WarmupCounter, + + // A fake IC entry for returning from a callVM to the interrupt + // handler via the over-recursion check on function entry. + Kind_StackCheck, + + // As above, but for the early check. See emitStackCheck. + Kind_EarlyStackCheck, + + // A fake IC entry for returning from DebugTrapHandler. + Kind_DebugTrap, + + // A fake IC entry for returning from a callVM to + // Debug{Prologue,AfterYield,Epilogue}. + Kind_DebugPrologue, + Kind_DebugAfterYield, + Kind_DebugEpilogue, + + Kind_Invalid + }; + + private: + // What this IC is for. + Kind kind_ : 4; + + // Set the kind and asserts that it's sane. + void setKind(Kind kind) { + MOZ_ASSERT(kind < Kind_Invalid); + kind_ = kind; + MOZ_ASSERT(this->kind() == kind); + } + + public: + ICEntry(uint32_t pcOffset, Kind kind) + : firstStub_(nullptr), returnOffset_(), pcOffset_(pcOffset) + { + // The offset must fit in at least 28 bits, since we shave off 4 for + // the Kind enum. + MOZ_ASSERT(pcOffset_ == pcOffset); + JS_STATIC_ASSERT(BaselineScript::MAX_JSSCRIPT_LENGTH <= (1u << 28) - 1); + MOZ_ASSERT(pcOffset <= BaselineScript::MAX_JSSCRIPT_LENGTH); + setKind(kind); + } + + CodeOffset returnOffset() const { + return CodeOffset(returnOffset_); + } + + void setReturnOffset(CodeOffset offset) { + MOZ_ASSERT(offset.offset() <= (size_t) UINT32_MAX); + returnOffset_ = (uint32_t) offset.offset(); + } + + uint32_t pcOffset() const { + return pcOffset_; + } + + jsbytecode* pc(JSScript* script) const { + return script->offsetToPC(pcOffset_); + } + + Kind kind() const { + // MSVC compiles enums as signed. + return Kind(kind_ & 0xf); + } + bool isForOp() const { + return kind() == Kind_Op; + } + + void setFakeKind(Kind kind) { + MOZ_ASSERT(kind != Kind_Op && kind != Kind_NonOp); + setKind(kind); + } + + bool hasStub() const { + return firstStub_ != nullptr; + } + ICStub* firstStub() const { + MOZ_ASSERT(hasStub()); + return firstStub_; + } + + ICFallbackStub* fallbackStub() const; + + void setFirstStub(ICStub* stub) { + firstStub_ = stub; + } + + static inline size_t offsetOfFirstStub() { + return offsetof(ICEntry, firstStub_); + } + + inline ICStub** addressOfFirstStub() { + return &firstStub_; + } + + void trace(JSTracer* trc); +}; + +class ICMonitoredStub; +class ICMonitoredFallbackStub; +class ICUpdatedStub; + +// Constant iterator that traverses arbitrary chains of ICStubs. +// No requirements are made of the ICStub used to construct this +// iterator, aside from that the stub be part of a nullptr-terminated +// chain. +// The iterator is considered to be at its end once it has been +// incremented _past_ the last stub. Thus, if 'atEnd()' returns +// true, the '*' and '->' operations are not valid. +class ICStubConstIterator +{ + friend class ICStub; + friend class ICFallbackStub; + + private: + ICStub* currentStub_; + + public: + explicit ICStubConstIterator(ICStub* currentStub) : currentStub_(currentStub) {} + + static ICStubConstIterator StartingAt(ICStub* stub) { + return ICStubConstIterator(stub); + } + static ICStubConstIterator End(ICStub* stub) { + return ICStubConstIterator(nullptr); + } + + bool operator ==(const ICStubConstIterator& other) const { + return currentStub_ == other.currentStub_; + } + bool operator !=(const ICStubConstIterator& other) const { + return !(*this == other); + } + + ICStubConstIterator& operator++(); + + ICStubConstIterator operator++(int) { + ICStubConstIterator oldThis(*this); + ++(*this); + return oldThis; + } + + ICStub* operator*() const { + MOZ_ASSERT(currentStub_); + return currentStub_; + } + + ICStub* operator ->() const { + MOZ_ASSERT(currentStub_); + return currentStub_; + } + + bool atEnd() const { + return currentStub_ == nullptr; + } +}; + +// Iterator that traverses "regular" IC chains that start at an ICEntry +// and are terminated with an ICFallbackStub. +// +// The iterator is considered to be at its end once it is _at_ the +// fallback stub. Thus, unlike the ICStubConstIterator, operators +// '*' and '->' are valid even if 'atEnd()' returns true - they +// will act on the fallback stub. +// +// This iterator also allows unlinking of stubs being traversed. +// Note that 'unlink' does not implicitly advance the iterator - +// it must be advanced explicitly using '++'. +class ICStubIterator +{ + friend class ICFallbackStub; + + private: + ICEntry* icEntry_; + ICFallbackStub* fallbackStub_; + ICStub* previousStub_; + ICStub* currentStub_; + bool unlinked_; + + explicit ICStubIterator(ICFallbackStub* fallbackStub, bool end=false); + public: + + bool operator ==(const ICStubIterator& other) const { + // == should only ever be called on stubs from the same chain. + MOZ_ASSERT(icEntry_ == other.icEntry_); + MOZ_ASSERT(fallbackStub_ == other.fallbackStub_); + return currentStub_ == other.currentStub_; + } + bool operator !=(const ICStubIterator& other) const { + return !(*this == other); + } + + ICStubIterator& operator++(); + + ICStubIterator operator++(int) { + ICStubIterator oldThis(*this); + ++(*this); + return oldThis; + } + + ICStub* operator*() const { + return currentStub_; + } + + ICStub* operator ->() const { + return currentStub_; + } + + bool atEnd() const { + return currentStub_ == (ICStub*) fallbackStub_; + } + + void unlink(JSContext* cx); +}; + +// +// Base class for all IC stubs. +// +class ICStub +{ + friend class ICFallbackStub; + + public: + enum Kind { + INVALID = 0, +#define DEF_ENUM_KIND(kindName) kindName, + IC_BASELINE_STUB_KIND_LIST(DEF_ENUM_KIND) +#undef DEF_ENUM_KIND + LIMIT + }; + + static bool IsValidKind(Kind k) { + return (k > INVALID) && (k < LIMIT); + } + static bool IsCacheIRKind(Kind k) { + return k == CacheIR_Regular || k == CacheIR_Monitored || k == CacheIR_Updated; + } + + static const char* KindString(Kind k) { + switch(k) { +#define DEF_KIND_STR(kindName) case kindName: return #kindName; + IC_BASELINE_STUB_KIND_LIST(DEF_KIND_STR) +#undef DEF_KIND_STR + default: + MOZ_CRASH("Invalid kind."); + } + } + + enum Trait { + Regular = 0x0, + Fallback = 0x1, + Monitored = 0x2, + MonitoredFallback = 0x3, + Updated = 0x4 + }; + + void traceCode(JSTracer* trc, const char* name); + void updateCode(JitCode* stubCode); + void trace(JSTracer* trc); + + template + static T* New(JSContext* cx, ICStubSpace* space, JitCode* code, Args&&... args) { + if (!code) + return nullptr; + T* result = space->allocate(code, std::forward(args)...); + if (!result) + ReportOutOfMemory(cx); + return result; + } + + protected: + // The raw jitcode to call for this stub. + uint8_t* stubCode_; + + // Pointer to next IC stub. This is null for the last IC stub, which should + // either be a fallback or inert IC stub. + ICStub* next_; + + // A 16-bit field usable by subtypes of ICStub for subtype-specific small-info + uint16_t extra_; + + // The kind of the stub. + // High bit is 'isFallback' flag. + // Second high bit is 'isMonitored' flag. + Trait trait_ : 3; + Kind kind_ : 13; + + inline ICStub(Kind kind, JitCode* stubCode) + : stubCode_(stubCode->raw()), + next_(nullptr), + extra_(0), + trait_(Regular), + kind_(kind) + { + MOZ_ASSERT(stubCode != nullptr); + } + + inline ICStub(Kind kind, Trait trait, JitCode* stubCode) + : stubCode_(stubCode->raw()), + next_(nullptr), + extra_(0), + trait_(trait), + kind_(kind) + { + MOZ_ASSERT(stubCode != nullptr); + } + + inline Trait trait() const { + // Workaround for MSVC reading trait_ as signed value. + return (Trait)(trait_ & 0x7); + } + + public: + + inline Kind kind() const { + return static_cast(kind_); + } + + inline bool isFallback() const { + return trait() == Fallback || trait() == MonitoredFallback; + } + + inline bool isMonitored() const { + return trait() == Monitored; + } + + inline bool isUpdated() const { + return trait() == Updated; + } + + inline bool isMonitoredFallback() const { + return trait() == MonitoredFallback; + } + + inline const ICFallbackStub* toFallbackStub() const { + MOZ_ASSERT(isFallback()); + return reinterpret_cast(this); + } + + inline ICFallbackStub* toFallbackStub() { + MOZ_ASSERT(isFallback()); + return reinterpret_cast(this); + } + + inline const ICMonitoredStub* toMonitoredStub() const { + MOZ_ASSERT(isMonitored()); + return reinterpret_cast(this); + } + + inline ICMonitoredStub* toMonitoredStub() { + MOZ_ASSERT(isMonitored()); + return reinterpret_cast(this); + } + + inline const ICMonitoredFallbackStub* toMonitoredFallbackStub() const { + MOZ_ASSERT(isMonitoredFallback()); + return reinterpret_cast(this); + } + + inline ICMonitoredFallbackStub* toMonitoredFallbackStub() { + MOZ_ASSERT(isMonitoredFallback()); + return reinterpret_cast(this); + } + + inline const ICUpdatedStub* toUpdatedStub() const { + MOZ_ASSERT(isUpdated()); + return reinterpret_cast(this); + } + + inline ICUpdatedStub* toUpdatedStub() { + MOZ_ASSERT(isUpdated()); + return reinterpret_cast(this); + } + +#define KIND_METHODS(kindName) \ + inline bool is##kindName() const { return kind() == kindName; } \ + inline const IC##kindName* to##kindName() const { \ + MOZ_ASSERT(is##kindName()); \ + return reinterpret_cast(this); \ + } \ + inline IC##kindName* to##kindName() { \ + MOZ_ASSERT(is##kindName()); \ + return reinterpret_cast(this); \ + } + IC_BASELINE_STUB_KIND_LIST(KIND_METHODS) +#undef KIND_METHODS + + inline ICStub* next() const { + return next_; + } + + inline bool hasNext() const { + return next_ != nullptr; + } + + inline void setNext(ICStub* stub) { + // Note: next_ only needs to be changed under the compilation lock for + // non-type-monitor/update ICs. + next_ = stub; + } + + inline ICStub** addressOfNext() { + return &next_; + } + + inline JitCode* jitCode() { + return JitCode::FromExecutable(stubCode_); + } + + inline uint8_t* rawStubCode() const { + return stubCode_; + } + + // This method is not valid on TypeUpdate stub chains! + inline ICFallbackStub* getChainFallback() { + ICStub* lastStub = this; + while (lastStub->next_) + lastStub = lastStub->next_; + MOZ_ASSERT(lastStub->isFallback()); + return lastStub->toFallbackStub(); + } + + inline ICStubConstIterator beginHere() { + return ICStubConstIterator::StartingAt(this); + } + + static inline size_t offsetOfNext() { + return offsetof(ICStub, next_); + } + + static inline size_t offsetOfStubCode() { + return offsetof(ICStub, stubCode_); + } + + static inline size_t offsetOfExtra() { + return offsetof(ICStub, extra_); + } + + static bool NonCacheIRStubMakesGCCalls(Kind kind); + bool makesGCCalls() const; + + // Optimized stubs get purged on GC. But some stubs can be active on the + // stack during GC - specifically the ones that can make calls. To ensure + // that these do not get purged, all stubs that can make calls are allocated + // in the fallback stub space. + bool allocatedInFallbackSpace() const { + MOZ_ASSERT(next()); + return makesGCCalls(); + } +}; + +class ICFallbackStub : public ICStub +{ + friend class ICStubConstIterator; + protected: + // Fallback stubs need these fields to easily add new stubs to + // the linked list of stubs for an IC. + + // The IC entry for this linked list of stubs. + ICEntry* icEntry_; + + // The number of stubs kept in the IC entry. + ICState state_; + + // A pointer to the location stub pointer that needs to be + // changed to add a new "last" stub immediately before the fallback + // stub. This'll start out pointing to the icEntry's "firstStub_" + // field, and as new stubs are added, it'll point to the current + // last stub's "next_" field. + ICStub** lastStubPtrAddr_; + + ICFallbackStub(Kind kind, JitCode* stubCode) + : ICStub(kind, ICStub::Fallback, stubCode), + icEntry_(nullptr), + state_(), + lastStubPtrAddr_(nullptr) {} + + ICFallbackStub(Kind kind, Trait trait, JitCode* stubCode) + : ICStub(kind, trait, stubCode), + icEntry_(nullptr), + state_(), + lastStubPtrAddr_(nullptr) + { + MOZ_ASSERT(trait == ICStub::Fallback || + trait == ICStub::MonitoredFallback); + } + + public: + inline ICEntry* icEntry() const { + return icEntry_; + } + + inline size_t numOptimizedStubs() const { + return state_.numOptimizedStubs(); + } + + void setInvalid() { + state_.setInvalid(); + } + + bool invalid() const { + return state_.invalid(); + } + + ICState& state() { + return state_; + } + + // The icEntry and lastStubPtrAddr_ fields can't be initialized when the stub is + // created since the stub is created at compile time, and we won't know the IC entry + // address until after compile when the JitScript is created. This method + // allows these fields to be fixed up at that point. + void fixupICEntry(ICEntry* icEntry) { + MOZ_ASSERT(icEntry_ == nullptr); + MOZ_ASSERT(lastStubPtrAddr_ == nullptr); + icEntry_ = icEntry; + lastStubPtrAddr_ = icEntry_->addressOfFirstStub(); + } + + // Add a new stub to the IC chain terminated by this fallback stub. + void addNewStub(ICStub* stub) { + MOZ_ASSERT(!invalid()); + MOZ_ASSERT(*lastStubPtrAddr_ == this); + MOZ_ASSERT(stub->next() == nullptr); + stub->setNext(this); + *lastStubPtrAddr_ = stub; + lastStubPtrAddr_ = stub->addressOfNext(); + state_.trackAttached(); + } + + ICStubConstIterator beginChainConst() const { + return ICStubConstIterator(icEntry_->firstStub()); + } + + ICStubIterator beginChain() { + return ICStubIterator(this); + } + + bool hasStub(ICStub::Kind kind) const { + for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) { + if (iter->kind() == kind) + return true; + } + return false; + } + + unsigned numStubsWithKind(ICStub::Kind kind) const { + unsigned count = 0; + for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) { + if (iter->kind() == kind) + count++; + } + return count; + } + + void discardStubs(JSContext* cx); + + void unlinkStub(Zone* zone, ICStub* prev, ICStub* stub); + void unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind); +}; + +// Base class for Trait::Regular CacheIR stubs +class ICCacheIR_Regular : public ICStub +{ + const CacheIRStubInfo* stubInfo_; + + public: + ICCacheIR_Regular(JitCode* stubCode, const CacheIRStubInfo* stubInfo) + : ICStub(ICStub::CacheIR_Regular, stubCode), + stubInfo_(stubInfo) + {} + + static ICCacheIR_Regular* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, + ICCacheIR_Regular& other); + + void notePreliminaryObject() { + extra_ = 1; + } + bool hasPreliminaryObject() const { + return extra_; + } + + const CacheIRStubInfo* stubInfo() const { + return stubInfo_; + } + + uint8_t* stubDataStart(); +}; + +// Monitored stubs are IC stubs that feed a single resulting value out to a +// type monitor operation. +class ICMonitoredStub : public ICStub +{ + protected: + // Pointer to the start of the type monitoring stub chain. + ICStub* firstMonitorStub_; + + ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub); + + public: + inline void updateFirstMonitorStub(ICStub* monitorStub) { + // This should only be called once: when the first optimized monitor stub + // is added to the type monitor IC chain. + MOZ_ASSERT(firstMonitorStub_ && firstMonitorStub_->isTypeMonitor_Fallback()); + firstMonitorStub_ = monitorStub; + } + inline void resetFirstMonitorStub(ICStub* monitorFallback) { + MOZ_ASSERT(monitorFallback->isTypeMonitor_Fallback()); + firstMonitorStub_ = monitorFallback; + } + inline ICStub* firstMonitorStub() const { + return firstMonitorStub_; + } + + static inline size_t offsetOfFirstMonitorStub() { + return offsetof(ICMonitoredStub, firstMonitorStub_); + } +}; + +class ICCacheIR_Monitored : public ICMonitoredStub +{ + const CacheIRStubInfo* stubInfo_; + + public: + ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub, + const CacheIRStubInfo* stubInfo) + : ICMonitoredStub(ICStub::CacheIR_Monitored, stubCode, firstMonitorStub), + stubInfo_(stubInfo) + {} + + static ICCacheIR_Monitored* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, + ICCacheIR_Monitored& other); + + void notePreliminaryObject() { + extra_ = 1; + } + bool hasPreliminaryObject() const { + return extra_; + } + + const CacheIRStubInfo* stubInfo() const { + return stubInfo_; + } + + uint8_t* stubDataStart(); +}; + +// Updated stubs are IC stubs that use a TypeUpdate IC to track +// the status of heap typesets that need to be updated. +class ICUpdatedStub : public ICStub +{ + protected: + // Pointer to the start of the type updating stub chain. + ICStub* firstUpdateStub_; + + static const uint32_t MAX_OPTIMIZED_STUBS = 8; + uint32_t numOptimizedStubs_; + + ICUpdatedStub(Kind kind, JitCode* stubCode) + : ICStub(kind, ICStub::Updated, stubCode), + firstUpdateStub_(nullptr), + numOptimizedStubs_(0) + {} + + public: + MOZ_MUST_USE bool initUpdatingChain(JSContext* cx, ICStubSpace* space); + + MOZ_MUST_USE bool addUpdateStubForValue(JSContext* cx, HandleScript script, HandleObject obj, + HandleObjectGroup group, HandleId id, HandleValue val); + + void addOptimizedUpdateStub(ICStub* stub) { + if (firstUpdateStub_->isTypeUpdate_Fallback()) { + stub->setNext(firstUpdateStub_); + firstUpdateStub_ = stub; + } else { + ICStub* iter = firstUpdateStub_; + MOZ_ASSERT(iter->next() != nullptr); + while (!iter->next()->isTypeUpdate_Fallback()) + iter = iter->next(); + MOZ_ASSERT(iter->next()->next() == nullptr); + stub->setNext(iter->next()); + iter->setNext(stub); + } + + numOptimizedStubs_++; + } + + inline ICStub* firstUpdateStub() const { + return firstUpdateStub_; + } + + void resetUpdateStubChain(Zone* zone); + + bool hasTypeUpdateStub(ICStub::Kind kind) { + ICStub* stub = firstUpdateStub_; + do { + if (stub->kind() == kind) + return true; + + stub = stub->next(); + } while (stub); + + return false; + } + + inline uint32_t numOptimizedStubs() const { + return numOptimizedStubs_; + } + + static inline size_t offsetOfFirstUpdateStub() { + return offsetof(ICUpdatedStub, firstUpdateStub_); + } +}; + +class ICCacheIR_Updated : public ICUpdatedStub +{ + const CacheIRStubInfo* stubInfo_; + GCPtrObjectGroup updateStubGroup_; + GCPtrId updateStubId_; + + public: + ICCacheIR_Updated(JitCode* stubCode, const CacheIRStubInfo* stubInfo) + : ICUpdatedStub(ICStub::CacheIR_Updated, stubCode), + stubInfo_(stubInfo), + updateStubGroup_(nullptr), + updateStubId_(JSID_EMPTY) + {} + + static ICCacheIR_Updated* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, + ICCacheIR_Updated& other); + + GCPtrObjectGroup& updateStubGroup() { + return updateStubGroup_; + } + GCPtrId& updateStubId() { + return updateStubId_; + } + + void notePreliminaryObject() { + extra_ = 1; + } + bool hasPreliminaryObject() const { + return extra_; + } + + const CacheIRStubInfo* stubInfo() const { + return stubInfo_; + } + + uint8_t* stubDataStart(); +}; + +// Base class for stubcode compilers. +class ICStubCompiler +{ + // Prevent GC in the middle of stub compilation. + js::gc::AutoSuppressGC suppressGC; + + protected: + JSContext* cx; + ICStub::Kind kind; + bool inStubFrame_; + +#ifdef DEBUG + bool entersStubFrame_; + uint32_t framePushedAtEnterStubFrame_; +#endif + + // By default the stubcode key is just the kind. + virtual int32_t getKey() const { + return static_cast(kind); + } + + virtual MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) = 0; + virtual void postGenerateStubCode(MacroAssembler& masm, Handle genCode) {} + + JitCode* getStubCode(); + + ICStubCompiler(JSContext* cx, ICStub::Kind kind) + : suppressGC(cx), cx(cx), kind(kind), inStubFrame_(false) +#ifdef DEBUG + , entersStubFrame_(false), framePushedAtEnterStubFrame_(0) +#endif + {} + + // Push a payload specialized per compiler needed to execute stubs. + void PushStubPayload(MacroAssembler& masm, Register scratch); + void pushStubPayload(MacroAssembler& masm, Register scratch); + + // Emits a tail call to a VMFunction wrapper. + MOZ_MUST_USE bool tailCallVM(const VMFunction& fun, MacroAssembler& masm); + + // Emits a normal (non-tail) call to a VMFunction wrapper. + MOZ_MUST_USE bool callVM(const VMFunction& fun, MacroAssembler& masm); + + // A stub frame is used when a stub wants to call into the VM without + // performing a tail call. This is required for the return address + // to pc mapping to work. + void enterStubFrame(MacroAssembler& masm, Register scratch); + void assumeStubFrame(); + void leaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false); + + public: + static inline AllocatableGeneralRegisterSet availableGeneralRegs(size_t numInputs) { + AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); +#if defined(JS_CODEGEN_ARM) + MOZ_ASSERT(!regs.has(BaselineStackReg)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); + regs.take(BaselineSecondScratchReg); +#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + MOZ_ASSERT(!regs.has(BaselineStackReg)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); + MOZ_ASSERT(!regs.has(BaselineSecondScratchReg)); +#elif defined(JS_CODEGEN_ARM64) + MOZ_ASSERT(!regs.has(PseudoStackPointer)); + MOZ_ASSERT(!regs.has(RealStackPointer)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); +#else + MOZ_ASSERT(!regs.has(BaselineStackReg)); +#endif + regs.take(BaselineFrameReg); + regs.take(ICStubReg); +#ifdef JS_CODEGEN_X64 + regs.take(ExtractTemp0); + regs.take(ExtractTemp1); +#endif + + switch (numInputs) { + case 0: + break; + case 1: + regs.take(R0); + break; + case 2: + regs.take(R0); + regs.take(R1); + break; + default: + MOZ_CRASH("Invalid numInputs"); + } + + return regs; + } + + protected: + template + T* newStub(Args&&... args) { + return ICStub::New(cx, std::forward(args)...); + } + + public: + virtual ICStub* getStub(ICStubSpace* space) = 0; + + static ICStubSpace* StubSpaceForStub(bool makesGCCalls, JSScript* outerScript) { + if (makesGCCalls) { + return outerScript->baselineScript()->fallbackStubSpace(); + } + return outerScript->zone()->jitZone()->optimizedStubSpace(); + } + ICStubSpace* getStubSpace(JSScript* outerScript) { + return StubSpaceForStub(ICStub::NonCacheIRStubMakesGCCalls(kind), outerScript); + } +}; + // WarmUpCounter_Fallback // A WarmUpCounter IC chain has only the fallback stub. @@ -44,7 +1131,7 @@ class ICWarmUpCounter_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::WarmUpCounter_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::WarmUpCounter_Fallback) { } ICWarmUpCounter_Fallback* getStub(ICStubSpace* space) override { @@ -54,6 +1141,426 @@ class ICWarmUpCounter_Fallback : public ICFallbackStub }; +// Monitored fallback stubs - as the name implies. +class ICMonitoredFallbackStub : public ICFallbackStub +{ + protected: + // Pointer to the fallback monitor stub. Created lazily by + // getFallbackMonitorStub if needed. + ICTypeMonitor_Fallback* fallbackMonitorStub_; + + ICMonitoredFallbackStub(Kind kind, JitCode* stubCode) + : ICFallbackStub(kind, ICStub::MonitoredFallback, stubCode), + fallbackMonitorStub_(nullptr) {} + + public: + MOZ_MUST_USE bool initMonitoringChain(JSContext* cx, JSScript* script); + MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, + StackTypeSet* types, HandleValue val); + + ICTypeMonitor_Fallback* maybeFallbackMonitorStub() const { + return fallbackMonitorStub_; + } + ICTypeMonitor_Fallback* getFallbackMonitorStub(JSContext* cx, JSScript* script) { + if (!fallbackMonitorStub_ && !initMonitoringChain(cx, script)) + return nullptr; + MOZ_ASSERT(fallbackMonitorStub_); + return fallbackMonitorStub_; + } + + static inline size_t offsetOfFallbackMonitorStub() { + return offsetof(ICMonitoredFallbackStub, fallbackMonitorStub_); + } +}; + +// TypeCheckPrimitiveSetStub +// Base class for IC stubs (TypeUpdate or TypeMonitor) that check that a given +// value's type falls within a set of primitive types. + +class TypeCheckPrimitiveSetStub : public ICStub +{ + friend class ICStubSpace; + protected: + inline static uint16_t TypeToFlag(JSValueType type) { + return 1u << static_cast(type); + } + + inline static uint16_t ValidFlags() { + return ((TypeToFlag(JSVAL_TYPE_OBJECT) << 1) - 1) & ~TypeToFlag(JSVAL_TYPE_MAGIC); + } + + TypeCheckPrimitiveSetStub(Kind kind, JitCode* stubCode, uint16_t flags) + : ICStub(kind, stubCode) + { + MOZ_ASSERT(kind == TypeMonitor_PrimitiveSet || kind == TypeUpdate_PrimitiveSet); + MOZ_ASSERT(flags && !(flags & ~ValidFlags())); + extra_ = flags; + } + + TypeCheckPrimitiveSetStub* updateTypesAndCode(uint16_t flags, JitCode* code) { + MOZ_ASSERT(flags && !(flags & ~ValidFlags())); + if (!code) + return nullptr; + extra_ = flags; + updateCode(code); + return this; + } + + public: + uint16_t typeFlags() const { + return extra_; + } + + bool containsType(JSValueType type) const { + MOZ_ASSERT(type <= JSVAL_TYPE_OBJECT); + MOZ_ASSERT(type != JSVAL_TYPE_MAGIC); + return extra_ & TypeToFlag(type); + } + + ICTypeMonitor_PrimitiveSet* toMonitorStub() { + return toTypeMonitor_PrimitiveSet(); + } + + ICTypeUpdate_PrimitiveSet* toUpdateStub() { + return toTypeUpdate_PrimitiveSet(); + } + + class Compiler : public ICStubCompiler { + protected: + TypeCheckPrimitiveSetStub* existingStub_; + uint16_t flags_; + + virtual int32_t getKey() const override { + return static_cast(kind) | + (static_cast(flags_) << 16); + } + + public: + Compiler(JSContext* cx, Kind kind, TypeCheckPrimitiveSetStub* existingStub, + JSValueType type) + : ICStubCompiler(cx, kind), + existingStub_(existingStub), + flags_((existingStub ? existingStub->typeFlags() : 0) | TypeToFlag(type)) + { + MOZ_ASSERT_IF(existingStub_, flags_ != existingStub_->typeFlags()); + } + + TypeCheckPrimitiveSetStub* updateStub() { + MOZ_ASSERT(existingStub_); + return existingStub_->updateTypesAndCode(flags_, getStubCode()); + } + }; +}; + +// TypeMonitor + +// The TypeMonitor fallback stub is not always a regular fallback stub. When +// used for monitoring the values pushed by a bytecode it doesn't hold a +// pointer to the IC entry, but rather back to the main fallback stub for the +// IC (from which a pointer to the IC entry can be retrieved). When monitoring +// the types of 'this', arguments or other values with no associated IC, there +// is no main fallback stub, and the IC entry is referenced directly. +class ICTypeMonitor_Fallback : public ICStub +{ + friend class ICStubSpace; + + static const uint32_t MAX_OPTIMIZED_STUBS = 8; + + // Pointer to the main fallback stub for the IC or to the main IC entry, + // depending on hasFallbackStub. + union { + ICMonitoredFallbackStub* mainFallbackStub_; + ICEntry* icEntry_; + }; + + // Pointer to the first monitor stub. + ICStub* firstMonitorStub_; + + // Address of the last monitor stub's field pointing to this + // fallback monitor stub. This will get updated when new + // monitor stubs are created and added. + ICStub** lastMonitorStubPtrAddr_; + + // Count of optimized type monitor stubs in this chain. + uint32_t numOptimizedMonitorStubs_ : 7; + + uint32_t invalid_ : 1; + + // Whether this has a fallback stub referring to the IC entry. + bool hasFallbackStub_ : 1; + + // Index of 'this' or argument which is being monitored, or BYTECODE_INDEX + // if this is monitoring the types of values pushed at some bytecode. + uint32_t argumentIndex_ : 23; + + static const uint32_t BYTECODE_INDEX = (1 << 23) - 1; + + ICTypeMonitor_Fallback(JitCode* stubCode, ICMonitoredFallbackStub* mainFallbackStub, + uint32_t argumentIndex) + : ICStub(ICStub::TypeMonitor_Fallback, stubCode), + mainFallbackStub_(mainFallbackStub), + firstMonitorStub_(thisFromCtor()), + lastMonitorStubPtrAddr_(nullptr), + numOptimizedMonitorStubs_(0), + invalid_(false), + hasFallbackStub_(mainFallbackStub != nullptr), + argumentIndex_(argumentIndex) + { } + + ICTypeMonitor_Fallback* thisFromCtor() { + return this; + } + + void addOptimizedMonitorStub(ICStub* stub) { + MOZ_ASSERT(!invalid()); + stub->setNext(this); + + MOZ_ASSERT((lastMonitorStubPtrAddr_ != nullptr) == + (numOptimizedMonitorStubs_ || !hasFallbackStub_)); + + if (lastMonitorStubPtrAddr_) + *lastMonitorStubPtrAddr_ = stub; + + if (numOptimizedMonitorStubs_ == 0) { + MOZ_ASSERT(firstMonitorStub_ == this); + firstMonitorStub_ = stub; + } else { + MOZ_ASSERT(firstMonitorStub_ != nullptr); + } + + lastMonitorStubPtrAddr_ = stub->addressOfNext(); + numOptimizedMonitorStubs_++; + } + + public: + bool hasStub(ICStub::Kind kind) { + ICStub* stub = firstMonitorStub_; + do { + if (stub->kind() == kind) + return true; + + stub = stub->next(); + } while (stub); + + return false; + } + + inline ICFallbackStub* mainFallbackStub() const { + MOZ_ASSERT(hasFallbackStub_); + return mainFallbackStub_; + } + + inline ICEntry* icEntry() const { + return hasFallbackStub_ ? mainFallbackStub()->icEntry() : icEntry_; + } + + inline ICStub* firstMonitorStub() const { + return firstMonitorStub_; + } + + static inline size_t offsetOfFirstMonitorStub() { + return offsetof(ICTypeMonitor_Fallback, firstMonitorStub_); + } + + inline uint32_t numOptimizedMonitorStubs() const { + return numOptimizedMonitorStubs_; + } + + void setInvalid() { + invalid_ = 1; + } + + bool invalid() const { + return invalid_; + } + + inline bool monitorsThis() const { + return argumentIndex_ == 0; + } + + inline bool monitorsArgument(uint32_t* pargument) const { + if (argumentIndex_ > 0 && argumentIndex_ < BYTECODE_INDEX) { + *pargument = argumentIndex_ - 1; + return true; + } + return false; + } + + inline bool monitorsBytecode() const { + return argumentIndex_ == BYTECODE_INDEX; + } + + // Fixup the IC entry as for a normal fallback stub, for this/arguments. + void fixupICEntry(ICEntry* icEntry) { + MOZ_ASSERT(!hasFallbackStub_); + MOZ_ASSERT(icEntry_ == nullptr); + MOZ_ASSERT(lastMonitorStubPtrAddr_ == nullptr); + icEntry_ = icEntry; + lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub(); + } + + // Create a new monitor stub for the type of the given value, and + // add it to this chain. + MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, + StackTypeSet* types, HandleValue val); + + void resetMonitorStubChain(Zone* zone); + + // Compiler for this stub kind. + class Compiler : public ICStubCompiler { + ICMonitoredFallbackStub* mainFallbackStub_; + uint32_t argumentIndex_; + + protected: + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + Compiler(JSContext* cx, ICMonitoredFallbackStub* mainFallbackStub) + : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback), + mainFallbackStub_(mainFallbackStub), + argumentIndex_(BYTECODE_INDEX) + { } + + Compiler(JSContext* cx, uint32_t argumentIndex) + : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback), + mainFallbackStub_(nullptr), + argumentIndex_(argumentIndex) + { } + + ICTypeMonitor_Fallback* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode(), mainFallbackStub_, + argumentIndex_); + } + }; +}; + +class ICTypeMonitor_PrimitiveSet : public TypeCheckPrimitiveSetStub +{ + friend class ICStubSpace; + + ICTypeMonitor_PrimitiveSet(JitCode* stubCode, uint16_t flags) + : TypeCheckPrimitiveSetStub(TypeMonitor_PrimitiveSet, stubCode, flags) + {} + + public: + class Compiler : public TypeCheckPrimitiveSetStub::Compiler { + protected: + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + Compiler(JSContext* cx, ICTypeMonitor_PrimitiveSet* existingStub, + JSValueType type) + : TypeCheckPrimitiveSetStub::Compiler(cx, TypeMonitor_PrimitiveSet, existingStub, + type) + {} + + ICTypeMonitor_PrimitiveSet* updateStub() { + TypeCheckPrimitiveSetStub* stub = + this->TypeCheckPrimitiveSetStub::Compiler::updateStub(); + if (!stub) + return nullptr; + return stub->toMonitorStub(); + } + + ICTypeMonitor_PrimitiveSet* getStub(ICStubSpace* space) override { + MOZ_ASSERT(!existingStub_); + return newStub(space, getStubCode(), flags_); + } + }; +}; + +class ICTypeMonitor_SingleObject : public ICStub +{ + friend class ICStubSpace; + + GCPtrObject obj_; + + ICTypeMonitor_SingleObject(JitCode* stubCode, JSObject* obj); + + public: + GCPtrObject& object() { + return obj_; + } + + static size_t offsetOfObject() { + return offsetof(ICTypeMonitor_SingleObject, obj_); + } + + class Compiler : public ICStubCompiler { + protected: + HandleObject obj_; + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + Compiler(JSContext* cx, HandleObject obj) + : ICStubCompiler(cx, TypeMonitor_SingleObject), + obj_(obj) + { } + + ICTypeMonitor_SingleObject* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode(), obj_); + } + }; +}; + +class ICTypeMonitor_ObjectGroup : public ICStub +{ + friend class ICStubSpace; + + GCPtrObjectGroup group_; + + ICTypeMonitor_ObjectGroup(JitCode* stubCode, ObjectGroup* group); + + public: + GCPtrObjectGroup& group() { + return group_; + } + + static size_t offsetOfGroup() { + return offsetof(ICTypeMonitor_ObjectGroup, group_); + } + + class Compiler : public ICStubCompiler { + protected: + HandleObjectGroup group_; + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + Compiler(JSContext* cx, HandleObjectGroup group) + : ICStubCompiler(cx, TypeMonitor_ObjectGroup), + group_(group) + { } + + ICTypeMonitor_ObjectGroup* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode(), group_); + } + }; +}; + +class ICTypeMonitor_AnyValue : public ICStub +{ + friend class ICStubSpace; + + explicit ICTypeMonitor_AnyValue(JitCode* stubCode) + : ICStub(TypeMonitor_AnyValue, stubCode) + {} + + public: + class Compiler : public ICStubCompiler { + protected: + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + explicit Compiler(JSContext* cx) + : ICStubCompiler(cx, TypeMonitor_AnyValue) + { } + + ICTypeMonitor_AnyValue* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode()); + } + }; +}; + // TypeUpdate extern const VMFunction DoTypeUpdateFallbackInfo; @@ -76,7 +1583,7 @@ class ICTypeUpdate_Fallback : public ICStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::TypeUpdate_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::TypeUpdate_Fallback) { } ICTypeUpdate_Fallback* getStub(ICStubSpace* space) override { @@ -144,7 +1651,7 @@ class ICTypeUpdate_SingleObject : public ICStub public: Compiler(JSContext* cx, HandleObject obj) - : ICStubCompiler(cx, TypeUpdate_SingleObject, Engine::Baseline), + : ICStubCompiler(cx, TypeUpdate_SingleObject), obj_(obj) { } @@ -179,7 +1686,7 @@ class ICTypeUpdate_ObjectGroup : public ICStub public: Compiler(JSContext* cx, HandleObjectGroup group) - : ICStubCompiler(cx, TypeUpdate_ObjectGroup, Engine::Baseline), + : ICStubCompiler(cx, TypeUpdate_ObjectGroup), group_(group) { } @@ -204,7 +1711,7 @@ class ICTypeUpdate_AnyValue : public ICStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, TypeUpdate_AnyValue, Engine::Baseline) + : ICStubCompiler(cx, TypeUpdate_AnyValue) {} ICTypeUpdate_AnyValue* getStub(ICStubSpace* space) override { @@ -233,7 +1740,7 @@ class ICToBool_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::ToBool_Fallback, Engine::Baseline) {} + : ICStubCompiler(cx, ICStub::ToBool_Fallback) {} ICStub* getStub(ICStubSpace* space) override { return newStub(space, getStubCode()); @@ -259,7 +1766,7 @@ class ICToNumber_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::ToNumber_Fallback, Engine::Baseline) {} + : ICStubCompiler(cx, ICStub::ToNumber_Fallback) {} ICStub* getStub(ICStubSpace* space) override { return newStub(space, getStubCode()); @@ -303,14 +1810,13 @@ class ICGetElem_Fallback : public ICMonitoredFallbackStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(hasReceiver_) << 17); + return static_cast(kind) | + (static_cast(hasReceiver_) << 16); } public: explicit Compiler(JSContext* cx, bool hasReceiver = false) - : ICStubCompiler(cx, ICStub::GetElem_Fallback, Engine::Baseline), + : ICStubCompiler(cx, ICStub::GetElem_Fallback), hasReceiver_(hasReceiver) { } @@ -349,7 +1855,7 @@ class ICSetElem_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::SetElem_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::SetElem_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -375,7 +1881,7 @@ class ICIn_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::In_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::In_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -401,7 +1907,7 @@ class ICHasOwn_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::HasOwn_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::HasOwn_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -437,7 +1943,7 @@ class ICGetName_Fallback : public ICMonitoredFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::GetName_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::GetName_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -463,7 +1969,7 @@ class ICBindName_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::BindName_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::BindName_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -489,7 +1995,7 @@ class ICGetIntrinsic_Fallback : public ICMonitoredFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::GetIntrinsic_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::GetIntrinsic_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -498,6 +2004,61 @@ class ICGetIntrinsic_Fallback : public ICMonitoredFallbackStub }; }; +// GetProp +// JSOP_GETPROP +// JSOP_GETPROP_SUPER + +class ICGetProp_Fallback : public ICMonitoredFallbackStub +{ + friend class ICStubSpace; + + explicit ICGetProp_Fallback(JitCode* stubCode) + : ICMonitoredFallbackStub(ICStub::GetProp_Fallback, stubCode) + { } + + public: + static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0; + static const size_t ACCESSED_GETTER_BIT = 1; + + void noteUnoptimizableAccess() { + extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT); + } + bool hadUnoptimizableAccess() const { + return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT); + } + + void noteAccessedGetter() { + extra_ |= (1u << ACCESSED_GETTER_BIT); + } + bool hasAccessedGetter() const { + return extra_ & (1u << ACCESSED_GETTER_BIT); + } + + class Compiler : public ICStubCompiler { + protected: + CodeOffset bailoutReturnOffset_; + bool hasReceiver_; + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + void postGenerateStubCode(MacroAssembler& masm, Handle code) override; + + virtual int32_t getKey() const override { + return static_cast(kind) | + (static_cast(hasReceiver_) << 16); + } + + public: + explicit Compiler(JSContext* cx, bool hasReceiver = false) + : ICStubCompiler(cx, ICStub::GetProp_Fallback), + hasReceiver_(hasReceiver) + { } + + ICStub* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode()); + } + }; +}; + + // SetProp // JSOP_SETPROP // JSOP_SETNAME @@ -529,7 +2090,7 @@ class ICSetProp_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::SetProp_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::SetProp_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -552,7 +2113,7 @@ class ICCallStubCompiler : public ICStubCompiler { protected: ICCallStubCompiler(JSContext* cx, ICStub::Kind kind) - : ICStubCompiler(cx, kind, Engine::Baseline) + : ICStubCompiler(cx, kind) { } enum FunApplyThing { @@ -613,10 +2174,9 @@ class ICCall_Fallback : public ICMonitoredFallbackStub void postGenerateStubCode(MacroAssembler& masm, Handle code) override; virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(isSpread_) << 17) | - (static_cast(isConstructing_) << 18); + return static_cast(kind) | + (static_cast(isSpread_) << 16) | + (static_cast(isConstructing_) << 17); } public: @@ -703,11 +2263,10 @@ class ICCallScriptedCompiler : public ICCallStubCompiler { MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(isConstructing_) << 17) | - (static_cast(isSpread_) << 18) | - (static_cast(maybeCrossRealm_) << 19); + return static_cast(kind) | + (static_cast(isConstructing_) << 16) | + (static_cast(isSpread_) << 17) | + (static_cast(maybeCrossRealm_) << 18); } public: @@ -801,12 +2360,11 @@ class ICCall_Native : public ICMonitoredStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(isSpread_) << 17) | - (static_cast(isConstructing_) << 18) | - (static_cast(ignoresReturnValue_) << 19) | - (static_cast(isCrossRealm_) << 20); + return static_cast(kind) | + (static_cast(isSpread_) << 16) | + (static_cast(isConstructing_) << 17) | + (static_cast(ignoresReturnValue_) << 18) | + (static_cast(isCrossRealm_) << 19); } public: @@ -882,9 +2440,8 @@ class ICCall_ClassHook : public ICMonitoredStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(isConstructing_) << 17); + return static_cast(kind) | + (static_cast(isConstructing_) << 16); } public: @@ -942,11 +2499,6 @@ class ICCall_ScriptedApplyArray : public ICMonitoredStub uint32_t pcOffset_; MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1); - } - public: Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset) : ICCallStubCompiler(cx, ICStub::Call_ScriptedApplyArray), @@ -990,11 +2542,6 @@ class ICCall_ScriptedApplyArguments : public ICMonitoredStub uint32_t pcOffset_; MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1); - } - public: Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset) : ICCallStubCompiler(cx, ICStub::Call_ScriptedApplyArguments), @@ -1037,11 +2584,6 @@ class ICCall_ScriptedFunCall : public ICMonitoredStub uint32_t pcOffset_; MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1); - } - public: Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset) : ICCallStubCompiler(cx, ICStub::Call_ScriptedFunCall), @@ -1108,11 +2650,6 @@ class ICCall_ConstStringSplit : public ICMonitoredStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1); - } - public: Compiler(JSContext* cx, ICStub* firstMonitorStub, uint32_t pcOffset, HandleString str, HandleString sep, HandleArrayObject templateObject) @@ -1148,7 +2685,7 @@ class ICCall_IsSuspendedGenerator : public ICStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::Call_IsSuspendedGenerator, Engine::Baseline) + : ICStubCompiler(cx, ICStub::Call_IsSuspendedGenerator) {} ICStub* getStub(ICStubSpace* space) override { return newStub(space, getStubCode()); @@ -1184,7 +2721,7 @@ class ICTableSwitch : public ICStub public: Compiler(JSContext* cx, jsbytecode* pc) - : ICStubCompiler(cx, ICStub::TableSwitch, Engine::Baseline), pc_(pc) + : ICStubCompiler(cx, ICStub::TableSwitch), pc_(pc) {} ICStub* getStub(ICStubSpace* space) override; @@ -1207,7 +2744,7 @@ class ICGetIterator_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::GetIterator_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::GetIterator_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1240,7 +2777,7 @@ class ICIteratorMore_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::IteratorMore_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::IteratorMore_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1265,7 +2802,7 @@ class ICIteratorMore_Native : public ICStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::IteratorMore_Native, Engine::Baseline) + : ICStubCompiler(cx, ICStub::IteratorMore_Native) { } ICStub* getStub(ICStubSpace* space) override { @@ -1290,7 +2827,7 @@ class ICIteratorClose_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::IteratorClose_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::IteratorClose_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1326,7 +2863,7 @@ class ICInstanceOf_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::InstanceOf_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::InstanceOf_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1355,7 +2892,7 @@ class ICTypeOf_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::TypeOf_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::TypeOf_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1388,7 +2925,7 @@ class ICRest_Fallback : public ICFallbackStub public: Compiler(JSContext* cx, ArrayObject* templateObject) - : ICStubCompiler(cx, ICStub::Rest_Fallback, Engine::Baseline), + : ICStubCompiler(cx, ICStub::Rest_Fallback), templateObject(cx, templateObject) { } @@ -1416,7 +2953,7 @@ class ICRetSub_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::RetSub_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::RetSub_Fallback) { } ICStub* getStub(ICStubSpace* space) override { @@ -1457,7 +2994,7 @@ class ICRetSub_Resume : public ICStub public: Compiler(JSContext* cx, uint32_t pcOffset, uint8_t* addr) - : ICStubCompiler(cx, ICStub::RetSub_Resume, Engine::Baseline), + : ICStubCompiler(cx, ICStub::RetSub_Resume), pcOffset_(pcOffset), addr_(addr) { } @@ -1497,7 +3034,7 @@ class ICUnaryArith_Fallback : public ICFallbackStub public: explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, ICStub::UnaryArith_Fallback, Engine::Baseline) + : ICStubCompiler(cx, ICStub::UnaryArith_Fallback) {} ICStub* getStub(ICStubSpace* space) override { @@ -1506,6 +3043,50 @@ class ICUnaryArith_Fallback : public ICFallbackStub }; }; +// Compare +// JSOP_LT +// JSOP_LE +// JSOP_GT +// JSOP_GE +// JSOP_EQ +// JSOP_NE +// JSOP_STRICTEQ +// JSOP_STRICTNE + +class ICCompare_Fallback : public ICFallbackStub +{ + friend class ICStubSpace; + + explicit ICCompare_Fallback(JitCode* stubCode) + : ICFallbackStub(ICStub::Compare_Fallback, stubCode) {} + + public: + static const uint32_t MAX_OPTIMIZED_STUBS = 8; + + static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0; + void noteUnoptimizableAccess() { + extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT); + } + bool hadUnoptimizableAccess() const { + return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT); + } + + // Compiler for this stub kind. + class Compiler : public ICStubCompiler { + protected: + MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; + + public: + explicit Compiler(JSContext* cx) + : ICStubCompiler(cx, ICStub::Compare_Fallback) {} + + ICStub* getStub(ICStubSpace* space) override { + return newStub(space, getStubCode()); + } + }; +}; + + // BinaryArith // JSOP_ADD, JSOP_SUB, JSOP_MUL, JOP_DIV, JSOP_MOD // JSOP_BITAND, JSOP_BITXOR, JSOP_BITOR @@ -1546,8 +3127,8 @@ class ICBinaryArith_Fallback : public ICFallbackStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; public: - explicit Compiler(JSContext* cx, Engine engine) - : ICStubCompiler(cx, ICStub::BinaryArith_Fallback, engine) {} + explicit Compiler(JSContext* cx) + : ICStubCompiler(cx, ICStub::BinaryArith_Fallback) {} ICStub* getStub(ICStubSpace* space) override { return newStub(space, getStubCode()); @@ -1578,8 +3159,8 @@ class ICNewArray_Fallback : public ICFallbackStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; public: - Compiler(JSContext* cx, ObjectGroup* templateGroup, Engine engine) - : ICStubCompiler(cx, ICStub::NewArray_Fallback, engine), + Compiler(JSContext* cx, ObjectGroup* templateGroup) + : ICStubCompiler(cx, ICStub::NewArray_Fallback), templateGroup(cx, templateGroup) {} @@ -1625,8 +3206,8 @@ class ICNewObject_Fallback : public ICFallbackStub MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; public: - explicit Compiler(JSContext* cx, Engine engine) - : ICStubCompiler(cx, ICStub::NewObject_Fallback, engine) + explicit Compiler(JSContext* cx) + : ICStubCompiler(cx, ICStub::NewObject_Fallback) {} ICStub* getStub(ICStubSpace* space) override { diff --git a/js/src/jit/BaselineICList.h b/js/src/jit/BaselineICList.h index e33f09d39db8..0c5c4b52ae0b 100644 --- a/js/src/jit/BaselineICList.h +++ b/js/src/jit/BaselineICList.h @@ -75,8 +75,18 @@ namespace jit { \ _(Rest_Fallback) \ \ + _(BinaryArith_Fallback) \ + \ + _(Compare_Fallback) \ + \ + _(GetProp_Fallback) \ + \ _(RetSub_Fallback) \ - _(RetSub_Resume) + _(RetSub_Resume) \ + \ + _(CacheIR_Regular) \ + _(CacheIR_Monitored) \ + _(CacheIR_Updated) \ } // namespace jit } // namespace js diff --git a/js/src/jit/BaselineInspector.h b/js/src/jit/BaselineInspector.h index 4c3b4cc2930f..4e854c2d4819 100644 --- a/js/src/jit/BaselineInspector.h +++ b/js/src/jit/BaselineInspector.h @@ -43,7 +43,7 @@ class BaselineInspector { private: JSScript* script; - BaselineICEntry* prevLookedUpEntry; + ICEntry* prevLookedUpEntry; public: explicit BaselineInspector(JSScript* script) @@ -67,20 +67,20 @@ class BaselineInspector } #endif - BaselineICEntry& icEntryFromPC(jsbytecode* pc) { + ICEntry& icEntryFromPC(jsbytecode* pc) { MOZ_ASSERT(hasBaselineScript()); MOZ_ASSERT(isValidPC(pc)); - BaselineICEntry& ent = + ICEntry& ent = baselineScript()->icEntryFromPCOffset(script->pcToOffset(pc), prevLookedUpEntry); MOZ_ASSERT(ent.isForOp()); prevLookedUpEntry = &ent; return ent; } - BaselineICEntry* maybeICEntryFromPC(jsbytecode* pc) { + ICEntry* maybeICEntryFromPC(jsbytecode* pc) { MOZ_ASSERT(hasBaselineScript()); MOZ_ASSERT(isValidPC(pc)); - BaselineICEntry* ent = + ICEntry* ent = baselineScript()->maybeICEntryFromPCOffset(script->pcToOffset(pc), prevLookedUpEntry); if (!ent) return nullptr; @@ -91,7 +91,7 @@ class BaselineInspector template ICInspectorType makeICInspector(jsbytecode* pc, ICStub::Kind expectedFallbackKind) { - BaselineICEntry* ent = nullptr; + ICEntry* ent = nullptr; if (hasBaselineScript()) { ent = &icEntryFromPC(pc); MOZ_ASSERT(ent->fallbackStub()->kind() == expectedFallbackKind); diff --git a/js/src/jit/BaselineJIT.cpp b/js/src/jit/BaselineJIT.cpp index e1db930a43e6..ccd8d74cdb9f 100644 --- a/js/src/jit/BaselineJIT.cpp +++ b/js/src/jit/BaselineJIT.cpp @@ -244,7 +244,7 @@ jit::EnterBaselineAtBranch(JSContext* cx, InterpreterFrame* fp, jsbytecode* pc) MethodStatus jit::BaselineCompile(JSContext* cx, JSScript* script, bool forceDebugInstrumentation) { - assertSameCompartment(cx, script); + cx->check(script); MOZ_ASSERT(!script->hasBaselineScript()); MOZ_ASSERT(script->canBaselineCompile()); MOZ_ASSERT(IsBaselineEnabled(cx)); @@ -377,7 +377,7 @@ BaselineScript::New(JSScript* jsscript, { static const unsigned DataAlignment = sizeof(uintptr_t); - size_t icEntriesSize = icEntries * sizeof(BaselineICEntry); + size_t icEntriesSize = icEntries * sizeof(ICEntry); size_t pcMappingIndexEntriesSize = pcMappingIndexEntries * sizeof(PCMappingIndexEntry); size_t bytecodeTypeMapSize = bytecodeTypeMapEntries * sizeof(uint32_t); size_t yieldEntriesSize = yieldEntries * sizeof(uintptr_t); @@ -441,7 +441,7 @@ BaselineScript::trace(JSTracer* trc) // Mark all IC stub codes hanging off the IC stub entries. for (size_t i = 0; i < numICEntries(); i++) { - BaselineICEntry& ent = icEntry(i); + ICEntry& ent = icEntry(i); ent.trace(trc); } } @@ -536,7 +536,7 @@ BaselineScript::removeDependentWasmImport(wasm::Instance& instance, uint32_t idx } } -BaselineICEntry& +ICEntry& BaselineScript::icEntry(size_t index) { MOZ_ASSERT(index < numICEntries()); @@ -569,12 +569,12 @@ struct ICEntries explicit ICEntries(BaselineScript* baseline) : baseline_(baseline) {} - BaselineICEntry& operator[](size_t index) const { + ICEntry& operator[](size_t index) const { return baseline_->icEntry(index); } }; -BaselineICEntry& +ICEntry& BaselineScript::icEntryFromReturnOffset(CodeOffset returnOffset) { size_t loc; @@ -582,7 +582,7 @@ BaselineScript::icEntryFromReturnOffset(CodeOffset returnOffset) bool found = #endif BinarySearchIf(ICEntries(this), 0, numICEntries(), - [&returnOffset](BaselineICEntry& entry) { + [&returnOffset](ICEntry& entry) { size_t roffset = returnOffset.offset(); size_t entryRoffset = entry.returnOffset().offset(); if (roffset < entryRoffset) @@ -603,7 +603,7 @@ static inline bool ComputeBinarySearchMid(BaselineScript* baseline, uint32_t pcOffset, size_t* loc) { return BinarySearchIf(ICEntries(baseline), 0, baseline->numICEntries(), - [pcOffset](BaselineICEntry& entry) { + [pcOffset](ICEntry& entry) { uint32_t entryOffset = entry.pcOffset(); if (pcOffset < entryOffset) return -1; @@ -615,12 +615,12 @@ ComputeBinarySearchMid(BaselineScript* baseline, uint32_t pcOffset, size_t* loc) } uint8_t* -BaselineScript::returnAddressForIC(const BaselineICEntry& ent) +BaselineScript::returnAddressForIC(const ICEntry& ent) { return method()->raw() + ent.returnOffset().offset(); } -BaselineICEntry* +ICEntry* BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset) { // Multiple IC entries can have the same PC offset, but this method only looks for @@ -647,25 +647,25 @@ BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset) return nullptr; } -BaselineICEntry& +ICEntry& BaselineScript::icEntryFromPCOffset(uint32_t pcOffset) { - BaselineICEntry* entry = maybeICEntryFromPCOffset(pcOffset); + ICEntry* entry = maybeICEntryFromPCOffset(pcOffset); MOZ_RELEASE_ASSERT(entry); return *entry; } -BaselineICEntry* -BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry) +ICEntry* +BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry) { // Do a linear forward search from the last queried PC offset, or fallback to a // binary search if the last offset is too far away. if (prevLookedUpEntry && pcOffset >= prevLookedUpEntry->pcOffset() && (pcOffset - prevLookedUpEntry->pcOffset()) <= 10) { - BaselineICEntry* firstEntry = &icEntry(0); - BaselineICEntry* lastEntry = &icEntry(numICEntries() - 1); - BaselineICEntry* curEntry = prevLookedUpEntry; + ICEntry* firstEntry = &icEntry(0); + ICEntry* lastEntry = &icEntry(numICEntries() - 1); + ICEntry* curEntry = prevLookedUpEntry; while (curEntry >= firstEntry && curEntry <= lastEntry) { if (curEntry->pcOffset() == pcOffset && curEntry->isForOp()) return curEntry; @@ -677,15 +677,15 @@ BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* pre return maybeICEntryFromPCOffset(pcOffset); } -BaselineICEntry& -BaselineScript::icEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry) +ICEntry& +BaselineScript::icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry) { - BaselineICEntry* entry = maybeICEntryFromPCOffset(pcOffset, prevLookedUpEntry); + ICEntry* entry = maybeICEntryFromPCOffset(pcOffset, prevLookedUpEntry); MOZ_RELEASE_ASSERT(entry); return *entry; } -BaselineICEntry& +ICEntry& BaselineScript::callVMEntryFromPCOffset(uint32_t pcOffset) { // Like icEntryFromPCOffset, but only looks for the fake ICEntries @@ -707,7 +707,7 @@ BaselineScript::callVMEntryFromPCOffset(uint32_t pcOffset) MOZ_CRASH("Invalid PC offset for callVM entry."); } -BaselineICEntry& +ICEntry& BaselineScript::stackCheckICEntry(bool earlyCheck) { // The stack check will always be at offset 0, so just do a linear search @@ -722,7 +722,7 @@ BaselineScript::stackCheckICEntry(bool earlyCheck) MOZ_CRASH("No stack check ICEntry found."); } -BaselineICEntry& +ICEntry& BaselineScript::warmupCountICEntry() { // The stack check will be at a very low offset, so just do a linear search @@ -734,7 +734,7 @@ BaselineScript::warmupCountICEntry() MOZ_CRASH("No warmup count ICEntry found."); } -BaselineICEntry& +ICEntry& BaselineScript::icEntryFromReturnAddress(uint8_t* returnAddr) { MOZ_ASSERT(returnAddr > method_->raw()); @@ -755,12 +755,12 @@ BaselineScript::copyYieldAndAwaitEntries(JSScript* script, Vector& yie } void -BaselineScript::copyICEntries(JSScript* script, const BaselineICEntry* entries) +BaselineScript::copyICEntries(JSScript* script, const ICEntry* entries) { // Fix up the return offset in the IC entries and copy them in. // Also write out the IC entry ptrs in any fallback stubs that were added. for (uint32_t i = 0; i < numICEntries(); i++) { - BaselineICEntry& realEntry = icEntry(i); + ICEntry& realEntry = icEntry(i); realEntry = entries[i]; if (!realEntry.hasStub()) { @@ -1056,7 +1056,7 @@ BaselineScript::purgeOptimizedStubs(Zone* zone) JitSpew(JitSpew_BaselineIC, "Purging optimized stubs"); for (size_t i = 0; i < numICEntries(); i++) { - BaselineICEntry& entry = icEntry(i); + ICEntry& entry = icEntry(i); if (!entry.hasStub()) continue; @@ -1098,7 +1098,7 @@ BaselineScript::purgeOptimizedStubs(Zone* zone) #ifdef DEBUG // All remaining stubs must be allocated in the fallback space. for (size_t i = 0; i < numICEntries(); i++) { - BaselineICEntry& entry = icEntry(i); + ICEntry& entry = icEntry(i); if (!entry.hasStub()) continue; diff --git a/js/src/jit/BaselineJIT.h b/js/src/jit/BaselineJIT.h index 359ff558dbf6..fea59361ef8e 100644 --- a/js/src/jit/BaselineJIT.h +++ b/js/src/jit/BaselineJIT.h @@ -22,7 +22,7 @@ namespace js { namespace jit { class StackValue; -class BaselineICEntry; +class ICEntry; class ICStub; class ControlFlowGraph; @@ -355,8 +355,8 @@ struct BaselineScript return method_->raw() + postDebugPrologueOffset_; } - BaselineICEntry* icEntryList() { - return (BaselineICEntry*)(reinterpret_cast(this) + icEntriesOffset_); + ICEntry* icEntryList() { + return (ICEntry*)(reinterpret_cast(this) + icEntriesOffset_); } uint8_t** yieldEntryList() { return (uint8_t**)(reinterpret_cast(this) + yieldEntriesOffset_); @@ -391,25 +391,25 @@ struct BaselineScript return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); } - BaselineICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset); - BaselineICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset, - BaselineICEntry* prevLookedUpEntry); + ICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset); + ICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset, + ICEntry* prevLookedUpEntry); - BaselineICEntry& icEntry(size_t index); - BaselineICEntry& icEntryFromReturnOffset(CodeOffset returnOffset); - BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset); - BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry); - BaselineICEntry& callVMEntryFromPCOffset(uint32_t pcOffset); - BaselineICEntry& stackCheckICEntry(bool earlyCheck); - BaselineICEntry& warmupCountICEntry(); - BaselineICEntry& icEntryFromReturnAddress(uint8_t* returnAddr); - uint8_t* returnAddressForIC(const BaselineICEntry& ent); + ICEntry& icEntry(size_t index); + ICEntry& icEntryFromReturnOffset(CodeOffset returnOffset); + ICEntry& icEntryFromPCOffset(uint32_t pcOffset); + ICEntry& icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry); + ICEntry& callVMEntryFromPCOffset(uint32_t pcOffset); + ICEntry& stackCheckICEntry(bool earlyCheck); + ICEntry& warmupCountICEntry(); + ICEntry& icEntryFromReturnAddress(uint8_t* returnAddr); + uint8_t* returnAddressForIC(const ICEntry& ent); size_t numICEntries() const { return icEntries_; } - void copyICEntries(JSScript* script, const BaselineICEntry* entries); + void copyICEntries(JSScript* script, const ICEntry* entries); void adoptFallbackStubs(FallbackICStubSpace* stubSpace); void copyYieldAndAwaitEntries(JSScript* script, Vector& yieldAndAwaitOffsets); diff --git a/js/src/jit/CacheIR.cpp b/js/src/jit/CacheIR.cpp index 4a15b13509bc..9868403f0193 100644 --- a/js/src/jit/CacheIR.cpp +++ b/js/src/jit/CacheIR.cpp @@ -33,8 +33,9 @@ const char* const js::jit::CacheKindNames[] = { }; void -CacheIRWriter::assertSameCompartment(JSObject* obj) { - assertSameCompartmentDebugOnly(cx_, obj); +CacheIRWriter::assertSameCompartment(JSObject* obj) +{ + cx_->debugOnlyCheck(obj); } StubField @@ -5721,4 +5722,4 @@ NewObjectIRGenerator::tryAttachStub() trackAttached("NewObjectWithTemplate"); return true; -} \ No newline at end of file +} diff --git a/js/src/jit/CacheIR.h b/js/src/jit/CacheIR.h index ff5c390cdcfc..5aa506cd8e07 100644 --- a/js/src/jit/CacheIR.h +++ b/js/src/jit/CacheIR.h @@ -14,7 +14,9 @@ #include "gc/Rooting.h" #include "jit/CompactBuffer.h" #include "jit/ICState.h" -#include "jit/SharedIC.h" +#include "jit/MacroAssembler.h" +#include "vm/Iteration.h" +#include "vm/Shape.h" namespace js { namespace jit { @@ -422,6 +424,13 @@ enum class GuardClassKind : uint8_t // zone, which refer to the actual shape via a reserved slot. JSObject* NewWrapperWithObjectShape(JSContext* cx, HandleNativeObject obj); +// Enum for stubs handling a combination of typed arrays and typed objects. +enum TypedThingLayout { + Layout_TypedArray, + Layout_OutlineTypedObject, + Layout_InlineTypedObject +}; + void LoadShapeWrapperContents(MacroAssembler& masm, Register obj, Register dst, Label* failure); // Class to record CacheIR + some additional metadata for code generation. @@ -1976,6 +1985,35 @@ class MOZ_RAII NewObjectIRGenerator : public IRGenerator bool tryAttachStub(); }; +static inline uint32_t +SimpleTypeDescrKey(SimpleTypeDescr* descr) +{ + if (descr->is()) + return uint32_t(descr->as().type()) << 1; + return (uint32_t(descr->as().type()) << 1) | 1; +} + +inline bool +SimpleTypeDescrKeyIsScalar(uint32_t key) +{ + return !(key & 1); +} + +inline ScalarTypeDescr::Type +ScalarTypeFromSimpleTypeDescrKey(uint32_t key) +{ + MOZ_ASSERT(SimpleTypeDescrKeyIsScalar(key)); + return ScalarTypeDescr::Type(key >> 1); +} + +inline ReferenceType +ReferenceTypeFromSimpleTypeDescrKey(uint32_t key) +{ + MOZ_ASSERT(!SimpleTypeDescrKeyIsScalar(key)); + return ReferenceType(key >> 1); +} + + } // namespace jit } // namespace js diff --git a/js/src/jit/CacheIRCompiler.cpp b/js/src/jit/CacheIRCompiler.cpp index 16475c0476a8..24055535969a 100644 --- a/js/src/jit/CacheIRCompiler.cpp +++ b/js/src/jit/CacheIRCompiler.cpp @@ -3798,3 +3798,37 @@ CacheIRCompiler::emitLoadObject() emitLoadStubField(obj, reg); return true; } + +void +js::jit::LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result) +{ + switch (layout) { + case Layout_TypedArray: + masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), result); + break; + case Layout_OutlineTypedObject: + masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), result); + break; + case Layout_InlineTypedObject: + masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), result); + break; + default: + MOZ_CRASH(); + } +} + +void +js::jit::LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result) +{ + switch (layout) { + case Layout_TypedArray: + masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result); + break; + case Layout_OutlineTypedObject: + case Layout_InlineTypedObject: + masm.loadTypedObjectLength(obj, result); + break; + default: + MOZ_CRASH(); + } +} \ No newline at end of file diff --git a/js/src/jit/CacheIRCompiler.h b/js/src/jit/CacheIRCompiler.h index 5d7a0c72ec1c..1b39b9e676fe 100644 --- a/js/src/jit/CacheIRCompiler.h +++ b/js/src/jit/CacheIRCompiler.h @@ -902,6 +902,12 @@ class CacheIRStubInfo template void TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo); +void +LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result); + +void +LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result); + } // namespace jit } // namespace js diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 4b6740009721..0d3c3a9a7783 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -2864,43 +2864,6 @@ CodeGenerator::visitStringReplace(LStringReplace* lir) callVM(StringReplaceInfo, lir); } -void -CodeGenerator::emitSharedStub(ICStub::Kind kind, LInstruction* lir) -{ - JSScript* script = lir->mirRaw()->block()->info().script(); - jsbytecode* pc = lir->mirRaw()->toInstruction()->resumePoint()->pc(); - -#ifdef JS_USE_LINK_REGISTER - // Some architectures don't push the return address on the stack but - // use the link register. In that case the stack isn't aligned. Push - // to make sure we are aligned. - masm.Push(Imm32(0)); -#endif - - // Create descriptor signifying end of Ion frame. - uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, - JitStubFrameLayout::Size()); - masm.Push(Imm32(descriptor)); - - // Call into the stubcode. - CodeOffset patchOffset; - IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script); - EmitCallIC(&patchOffset, masm); - entry.setReturnOffset(CodeOffset(masm.currentOffset())); - - SharedStub sharedStub(kind, entry, patchOffset); - masm.propagateOOM(sharedStubs_.append(sharedStub)); - - // Fix up upon return. - uint32_t callOffset = masm.currentOffset(); -#ifdef JS_USE_LINK_REGISTER - masm.freeStack(sizeof(intptr_t) * 2); -#else - masm.freeStack(sizeof(intptr_t)); -#endif - markSafepointAt(callOffset, lir); -} - void CodeGenerator::visitBinaryCache(LBinaryCache* lir) { @@ -2939,34 +2902,6 @@ CodeGenerator::visitBinaryCache(LBinaryCache* lir) } } -void -CodeGenerator::visitBinarySharedStub(LBinarySharedStub* lir) -{ - JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc()); - switch (jsop) { - case JSOP_ADD: - case JSOP_SUB: - case JSOP_MUL: - case JSOP_DIV: - case JSOP_MOD: - case JSOP_POW: - emitSharedStub(ICStub::Kind::BinaryArith_Fallback, lir); - break; - case JSOP_LT: - case JSOP_LE: - case JSOP_GT: - case JSOP_GE: - case JSOP_EQ: - case JSOP_NE: - case JSOP_STRICTEQ: - case JSOP_STRICTNE: - emitSharedStub(ICStub::Kind::Compare_Fallback, lir); - break; - default: - MOZ_CRASH("Unsupported jsop in shared stubs."); - } -} - void CodeGenerator::visitUnaryCache(LUnaryCache* lir) { @@ -2978,32 +2913,6 @@ CodeGenerator::visitUnaryCache(LUnaryCache* lir) addIC(lir, allocateIC(ic)); } -void -CodeGenerator::visitNullarySharedStub(LNullarySharedStub* lir) -{ - jsbytecode* pc = lir->mir()->resumePoint()->pc(); - JSOp jsop = JSOp(*pc); - switch (jsop) { - case JSOP_NEWARRAY: { - uint32_t length = GET_UINT32(pc); - MOZ_ASSERT(length <= INT32_MAX, - "the bytecode emitter must fail to compile code that would " - "produce JSOP_NEWARRAY with a length exceeding int32_t range"); - - // Pass length in R0. - masm.move32(Imm32(AssertedCast(length)), R0.scratchReg()); - emitSharedStub(ICStub::Kind::NewArray_Fallback, lir); - break; - } - case JSOP_NEWINIT: - case JSOP_NEWOBJECT: - emitSharedStub(ICStub::Kind::NewObject_Fallback, lir); - break; - default: - MOZ_CRASH("Unsupported jsop in shared stubs."); - } -} - typedef JSFunction* (*MakeDefaultConstructorFn)(JSContext*, HandleScript, jsbytecode*, HandleObject); static const VMFunction MakeDefaultConstructorInfo = @@ -10386,51 +10295,6 @@ CodeGenerator::generate() return !masm.oom(); } -bool -CodeGenerator::linkSharedStubs(JSContext* cx) -{ - for (uint32_t i = 0; i < sharedStubs_.length(); i++) { - ICStub *stub = nullptr; - - switch (sharedStubs_[i].kind) { - case ICStub::Kind::Compare_Fallback: { - ICCompare_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC); - stub = stubCompiler.getStub(&stubSpace_); - break; - } - case ICStub::Kind::GetProp_Fallback: { - ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC); - stub = stubCompiler.getStub(&stubSpace_); - break; - } - case ICStub::Kind::NewArray_Fallback: { - JSScript* script = sharedStubs_[i].entry.script(); - jsbytecode* pc = sharedStubs_[i].entry.pc(script); - ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array); - if (!group) - return false; - - ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::IonSharedIC); - stub = stubCompiler.getStub(&stubSpace_); - break; - } - case ICStub::Kind::NewObject_Fallback: { - ICNewObject_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC); - stub = stubCompiler.getStub(&stubSpace_); - break; - } - default: - MOZ_CRASH("Unsupported shared stub."); - } - - if (!stub) - return false; - - sharedStubs_[i].entry.setFirstStub(stub); - } - return true; -} - bool CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) { @@ -10459,9 +10323,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) if (scriptCounts_ && !script->hasScriptCounts() && !script->initScriptCounts(cx)) return false; - if (!linkSharedStubs(cx)) - return false; - // Check to make sure we didn't have a mid-build invalidation. If so, we // will trickle to jit::Compile() and return Method_Skipped. uint32_t warmUpCount = script->getWarmUpCount(); @@ -10504,8 +10365,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) recovers_.size(), bailouts_.length(), graph.numConstants(), safepointIndices_.length(), osiIndices_.length(), icList_.length(), runtimeData_.length(), - safepoints_.size(), sharedStubs_.length(), - optimizationLevel); + safepoints_.size(), optimizationLevel); if (!ionScript) return false; auto guardIonScript = mozilla::MakeScopeExit([&ionScript] { @@ -10603,9 +10463,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) script->setIonScript(cx->runtime(), ionScript); - // Adopt fallback shared stubs from the compiler into the ion script. - ionScript->adoptFallbackStubs(&stubSpace_); - Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript), ImmPtr((void*)-1)); @@ -10646,22 +10503,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints) } #endif - // Patch shared stub IC loads using IC entries - for (size_t i = 0; i < sharedStubs_.length(); i++) { - CodeOffset label = sharedStubs_[i].label; - - IonICEntry& entry = ionScript->sharedStubList()[i]; - entry = sharedStubs_[i].entry; - Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label), - ImmPtr(&entry), - ImmPtr((void*)-1)); - - MOZ_ASSERT(entry.hasStub()); - MOZ_ASSERT(entry.firstStub()->isFallback()); - - entry.firstStub()->toFallbackStub()->fixupICEntry(&entry); - } - // for generating inline caches during the execution. if (runtimeData_.length()) ionScript->copyRuntimeData(&runtimeData_[0]); diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h index 0d4f4d4506a7..561a1b93baf7 100644 --- a/js/src/jit/CodeGenerator.h +++ b/js/src/jit/CodeGenerator.h @@ -76,7 +76,6 @@ class CodeGenerator final : public CodeGeneratorSpecific wasm::FuncOffsets* offsets); MOZ_MUST_USE bool link(JSContext* cx, CompilerConstraintList* constraints); - MOZ_MUST_USE bool linkSharedStubs(JSContext* cx); void emitOOLTestObject(Register objreg, Label* ifTruthy, Label* ifFalsy, Register scratch); void emitIntToString(Register input, Register output, Label* ool); @@ -113,7 +112,6 @@ class CodeGenerator final : public CodeGeneratorSpecific void visitOutOfLineNewObject(OutOfLineNewObject* ool); private: - void emitSharedStub(ICStub::Kind kind, LInstruction* lir); void emitPostWriteBarrier(const LAllocation* obj); void emitPostWriteBarrier(Register objreg); @@ -290,18 +288,6 @@ class CodeGenerator final : public CodeGeneratorSpecific Vector ionScriptLabels_; - struct SharedStub { - ICStub::Kind kind; - IonICEntry entry; - CodeOffset label; - - SharedStub(ICStub::Kind kind, IonICEntry entry, CodeOffset label) - : kind(kind), entry(entry), label(label) - {} - }; - - Vector sharedStubs_; - void branchIfInvalidated(Register temp, Label* invalidated); #ifdef DEBUG diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp index 0562887a2ab6..e66512fef9e9 100644 --- a/js/src/jit/Ion.cpp +++ b/js/src/jit/Ion.cpp @@ -830,13 +830,10 @@ IonScript::IonScript(IonCompilationId compilationId) recoversSize_(0), constantTable_(0), constantEntries_(0), - sharedStubList_(0), - sharedStubEntries_(0), invalidationCount_(0), compilationId_(compilationId), optimizationLevel_(OptimizationLevel::Normal), - osrPcMismatchCounter_(0), - fallbackStubSpace_() + osrPcMismatchCounter_(0) { } @@ -848,7 +845,7 @@ IonScript::New(JSContext* cx, IonCompilationId compilationId, size_t constants, size_t safepointIndices, size_t osiIndices, size_t icEntries, size_t runtimeSize, size_t safepointsSize, - size_t sharedStubEntries, OptimizationLevel optimizationLevel) + OptimizationLevel optimizationLevel) { constexpr size_t DataAlignment = sizeof(void*); @@ -871,7 +868,6 @@ IonScript::New(JSContext* cx, IonCompilationId compilationId, size_t paddedICEntriesSize = AlignBytes(icEntries * sizeof(uint32_t), DataAlignment); size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment); size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment); - size_t paddedSharedStubSize = AlignBytes(sharedStubEntries * sizeof(IonICEntry), DataAlignment); size_t bytes = paddedSnapshotsSize + paddedRecoversSize + @@ -881,8 +877,7 @@ IonScript::New(JSContext* cx, IonCompilationId compilationId, paddedOsiIndicesSize + paddedICEntriesSize + paddedRuntimeSize + - paddedSafepointSize + - paddedSharedStubSize; + paddedSafepointSize; IonScript* script = cx->pod_malloc_with_extra(bytes); if (!script) return nullptr; @@ -927,10 +922,6 @@ IonScript::New(JSContext* cx, IonCompilationId compilationId, script->constantEntries_ = constants; offsetCursor += paddedConstantsSize; - script->sharedStubList_ = offsetCursor; - script->sharedStubEntries_ = sharedStubEntries; - offsetCursor += paddedSharedStubSize; - script->frameSlots_ = frameSlots; script->argumentSlots_ = argumentSlots; @@ -941,13 +932,6 @@ IonScript::New(JSContext* cx, IonCompilationId compilationId, return script; } -void -IonScript::adoptFallbackStubs(FallbackICStubSpace* stubSpace) - -{ - fallbackStubSpace()->adoptFrom(stubSpace); -} - void IonScript::trace(JSTracer* trc) { @@ -957,12 +941,6 @@ IonScript::trace(JSTracer* trc) for (size_t i = 0; i < numConstants(); i++) TraceEdge(trc, &getConstant(i), "constant"); - // Mark all IC stub codes hanging off the IC stub entries. - for (size_t i = 0; i < numSharedStubs(); i++) { - IonICEntry& ent = sharedStubList()[i]; - ent.trace(trc); - } - // Trace caches so that the JSScript pointer can be updated if moved. for (size_t i = 0; i < numICs(); i++) getICFromIndex(i).trace(trc); @@ -1131,16 +1109,6 @@ IonScript::Trace(JSTracer* trc, IonScript* script) void IonScript::Destroy(FreeOp* fop, IonScript* script) { - /* - * When the script contains pointers to nursery things, the store buffer can - * contain entries that point into the fallback stub space. Since we can - * destroy scripts outside the context of a GC, this situation could result - * in us trying to mark invalid store buffer entries. - * - * Defer freeing any allocated blocks until after the next minor GC. - */ - script->fallbackStubSpace_.freeAllAfterMinorGC(script->method()->zone()); - fop->delete_(script); } @@ -1150,62 +1118,6 @@ JS::DeletePolicy::operator()(const js::jit::IonScript* scrip IonScript::Destroy(rt_->defaultFreeOp(), const_cast(script)); } -void -IonScript::purgeOptimizedStubs(Zone* zone) -{ - for (size_t i = 0; i < numSharedStubs(); i++) { - IonICEntry& entry = sharedStubList()[i]; - if (!entry.hasStub()) - continue; - - ICStub* lastStub = entry.firstStub(); - while (lastStub->next()) - lastStub = lastStub->next(); - - if (lastStub->isFallback()) { - // Unlink all stubs allocated in the optimized space. - ICStub* stub = entry.firstStub(); - ICStub* prev = nullptr; - - while (stub->next()) { - if (!stub->allocatedInFallbackSpace()) { - lastStub->toFallbackStub()->unlinkStub(zone, prev, stub); - stub = stub->next(); - continue; - } - - prev = stub; - stub = stub->next(); - } - - lastStub->toFallbackStub()->setInvalid(); - - MOZ_ASSERT(!lastStub->isMonitoredFallback(), - "None of the shared stubs used in Ion are monitored"); - } else if (lastStub->isTypeMonitor_Fallback()) { - lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone); - lastStub->toTypeMonitor_Fallback()->setInvalid(); - } else { - MOZ_ASSERT(lastStub->isTableSwitch()); - } - } - -#ifdef DEBUG - // All remaining stubs must be allocated in the fallback space. - for (size_t i = 0; i < numSharedStubs(); i++) { - IonICEntry& entry = sharedStubList()[i]; - if (!entry.hasStub()) - continue; - - ICStub* stub = entry.firstStub(); - while (stub->next()) { - MOZ_ASSERT(stub->allocatedInFallbackSpace()); - stub = stub->next(); - } - } -#endif -} - void IonScript::purgeICs(Zone* zone) { @@ -2733,7 +2645,6 @@ InvalidateActivation(FreeOp* fop, const JitActivationIterator& activations, bool // prevent lastJump_ from appearing to be a bogus pointer, just // in case anyone tries to read it. ionScript->purgeICs(script->zone()); - ionScript->purgeOptimizedStubs(script->zone()); // This frame needs to be invalidated. We do the following: // diff --git a/js/src/jit/IonBuilder.cpp b/js/src/jit/IonBuilder.cpp index ebe374a5ddde..e6b14f61af33 100644 --- a/js/src/jit/IonBuilder.cpp +++ b/js/src/jit/IonBuilder.cpp @@ -6114,9 +6114,8 @@ IonBuilder::compareTryBinaryStub(bool* emitted, MDefinition* left, MDefinition* { MOZ_ASSERT(*emitted == false); - // Try to emit a shared stub cache. - - if (JitOptions.disableSharedStubs) + // Try to emit a CacheIR Stub. + if (JitOptions.disableCacheIR) return Ok(); if (JSOp(*pc) == JSOP_CASE || IsCallPC(pc)) @@ -6490,10 +6489,7 @@ IonBuilder::initializeArrayElement(MDefinition* obj, size_t index, MDefinition* if (needsPostBarrier(value)) current->add(MPostWriteBarrier::New(alloc(), obj, value)); - if ((obj->isNewArray() && obj->toNewArray()->convertDoubleElements()) || - (obj->isNullarySharedStub() && - obj->resultTypeSet()->convertDoubleElements(constraints()) == TemporaryTypeSet::AlwaysConvertToDoubles)) - { + if (obj->isNewArray() && obj->toNewArray()->convertDoubleElements()) { MInstruction* valueDouble = MToDouble::New(alloc(), value); current->add(valueDouble); value = valueDouble; diff --git a/js/src/jit/IonCode.h b/js/src/jit/IonCode.h index 9b1d528cae48..79e22aa60692 100644 --- a/js/src/jit/IonCode.h +++ b/js/src/jit/IonCode.h @@ -26,7 +26,6 @@ namespace jit { class MacroAssembler; class IonBuilder; -class IonICEntry; class JitCode; typedef Vector ObjectVector; @@ -258,10 +257,6 @@ struct IonScript uint32_t constantTable_; uint32_t constantEntries_; - // List of entries to the shared stub. - uint32_t sharedStubList_; - uint32_t sharedStubEntries_; - // Number of references from invalidation records. uint32_t invalidationCount_; @@ -275,9 +270,6 @@ struct IonScript // a LOOPENTRY pc other than osrPc_. uint32_t osrPcMismatchCounter_; - // Allocated space for fallback stubs. - FallbackICStubSpace fallbackStubSpace_; - // TraceLogger events that are baked into the IonScript. TraceLoggerEventVector traceLoggerEvents_; @@ -323,12 +315,6 @@ struct IonScript // Do not call directly, use IonScript::New. This is public for cx->new_. explicit IonScript(IonCompilationId compilationId); - ~IonScript() { - // The contents of the fallback stub space are removed and freed - // separately after the next minor GC. See IonScript::Destroy. - MOZ_ASSERT(fallbackStubSpace_.isEmpty()); - } - static IonScript* New(JSContext* cx, IonCompilationId compilationId, uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize, size_t snapshotsListSize, size_t snapshotsRVATableSize, @@ -336,7 +322,7 @@ struct IonScript size_t constants, size_t safepointIndexEntries, size_t osiIndexEntries, size_t icEntries, size_t runtimeSize, size_t safepointsSize, - size_t sharedStubEntries, OptimizationLevel optimizationLevel); + OptimizationLevel optimizationLevel); static void Trace(JSTracer* trc, IonScript* script); static void Destroy(FreeOp* fop, IonScript* script); @@ -491,12 +477,6 @@ struct IonScript size_t numICs() const { return icEntries_; } - IonICEntry* sharedStubList() { - return (IonICEntry*) &bottomBuffer()[sharedStubList_]; - } - size_t numSharedStubs() const { - return sharedStubEntries_; - } size_t runtimeSize() const { return runtimeSize_; } @@ -555,12 +535,6 @@ struct IonScript recompiling_ = false; } - FallbackICStubSpace* fallbackStubSpace() { - return &fallbackStubSpace_; - } - void adoptFallbackStubs(FallbackICStubSpace* stubSpace); - void purgeOptimizedStubs(Zone* zone); - enum ShouldIncreaseAge { IncreaseAge = true, KeepAge = false diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp index 0447934cd400..4e932c40bd70 100644 --- a/js/src/jit/JitOptions.cpp +++ b/js/src/jit/JitOptions.cpp @@ -125,9 +125,6 @@ DefaultJitOptions::DefaultJitOptions() // Toggles whether CacheIR stubs for binary arith operations are used SET_DEFAULT(disableCacheIRBinaryArith, false); - // Toggles whether shared stubs are used in Ionmonkey. - SET_DEFAULT(disableSharedStubs, false); - // Toggles whether sincos optimization is globally disabled. // See bug984018: The MacOS is the only one that has the sincos fast. #if defined(XP_MACOSX) diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h index 70ee54800bab..3f6a331feb44 100644 --- a/js/src/jit/JitOptions.h +++ b/js/src/jit/JitOptions.h @@ -63,7 +63,6 @@ struct DefaultJitOptions bool disableScalarReplacement; bool disableCacheIR; bool disableCacheIRBinaryArith; - bool disableSharedStubs; bool disableSincos; bool disableSink; bool eagerCompilation; diff --git a/js/src/jit/JitRealm.h b/js/src/jit/JitRealm.h index 6fd6a38a9db1..b5a8db28c366 100644 --- a/js/src/jit/JitRealm.h +++ b/js/src/jit/JitRealm.h @@ -342,12 +342,9 @@ enum class CacheKind : uint8_t; class CacheIRStubInfo; enum class ICStubEngine : uint8_t { - // Baseline IC, see SharedIC.h and BaselineIC.h. + // Baseline IC, see BaselineIC.h. Baseline = 0, - // Ion IC that reuses Baseline IC code, see SharedIC.h. - IonSharedIC, - // Ion IC, see IonIC.h. IonIC }; diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp index 04c7e0387fba..ff35b60b0d05 100644 --- a/js/src/jit/Lowering.cpp +++ b/js/src/jit/Lowering.cpp @@ -2529,22 +2529,6 @@ LIRGenerator::visitBinaryCache(MBinaryCache* ins) assignSafepoint(lir, ins); } - -void -LIRGenerator::visitBinarySharedStub(MBinarySharedStub* ins) -{ - MDefinition* lhs = ins->getOperand(0); - MDefinition* rhs = ins->getOperand(1); - - MOZ_ASSERT(ins->type() == MIRType::Value); - MOZ_ASSERT(ins->type() == MIRType::Value); - - LBinarySharedStub* lir = new(alloc()) LBinarySharedStub(useBoxFixedAtStart(lhs, R0), - useBoxFixedAtStart(rhs, R1)); - defineSharedStubReturn(lir, ins); - assignSafepoint(lir, ins); -} - void LIRGenerator::visitUnaryCache(MUnaryCache* ins) { @@ -2556,17 +2540,6 @@ LIRGenerator::visitUnaryCache(MUnaryCache* ins) assignSafepoint(lir, ins); } -void -LIRGenerator::visitNullarySharedStub(MNullarySharedStub* ins) -{ - MOZ_ASSERT(ins->type() == MIRType::Value); - - LNullarySharedStub* lir = new(alloc()) LNullarySharedStub(); - - defineSharedStubReturn(lir, ins); - assignSafepoint(lir, ins); -} - void LIRGenerator::visitClassConstructor(MClassConstructor* ins) { diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h index 33a033bbdc2a..e424a98fa234 100644 --- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -7112,22 +7112,6 @@ class MOsrReturnValue } }; -class MBinarySharedStub - : public MBinaryInstruction, - public MixPolicy, BoxPolicy<1> >::Data -{ - protected: - explicit MBinarySharedStub(MDefinition* left, MDefinition* right) - : MBinaryInstruction(classOpcode, left, right) - { - setResultType(MIRType::Value); - } - - public: - INSTRUCTION_HEADER(BinarySharedStub) - TRIVIAL_NEW_WRAPPERS -}; - class MBinaryCache : public MBinaryInstruction, public MixPolicy, BoxPolicy<1> >::Data @@ -7159,20 +7143,6 @@ class MUnaryCache TRIVIAL_NEW_WRAPPERS }; -class MNullarySharedStub - : public MNullaryInstruction -{ - explicit MNullarySharedStub() - : MNullaryInstruction(classOpcode) - { - setResultType(MIRType::Value); - } - - public: - INSTRUCTION_HEADER(NullarySharedStub) - TRIVIAL_NEW_WRAPPERS -}; - // Check the current frame for over-recursion past the global stack limit. class MCheckOverRecursed : public MNullaryInstruction diff --git a/js/src/jit/SharedIC.cpp b/js/src/jit/SharedIC.cpp deleted file mode 100644 index bca193b52399..000000000000 --- a/js/src/jit/SharedIC.cpp +++ /dev/null @@ -1,1189 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sts=4 et sw=4 tw=99: - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include "jit/SharedIC.h" - -#include "mozilla/Casting.h" -#include "mozilla/IntegerPrintfMacros.h" -#include "mozilla/Sprintf.h" - -#include "jslibmath.h" -#include "jstypes.h" - -#include "gc/Policy.h" -#include "jit/BaselineCacheIRCompiler.h" -#include "jit/BaselineDebugModeOSR.h" -#include "jit/BaselineIC.h" -#include "jit/JitSpewer.h" -#include "jit/Linker.h" -#include "jit/SharedICHelpers.h" -#ifdef JS_ION_PERF -# include "jit/PerfSpewer.h" -#endif -#include "jit/VMFunctions.h" -#include "vm/Interpreter.h" -#include "vm/StringType.h" - -#include "jit/MacroAssembler-inl.h" -#include "jit/SharedICHelpers-inl.h" -#include "vm/Interpreter-inl.h" - -using mozilla::BitwiseCast; - -namespace js { -namespace jit { - -#ifdef JS_JITSPEW -void -FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...) -{ - if (JitSpewEnabled(JitSpew_BaselineICFallback)) { - RootedScript script(cx, GetTopJitJSScript(cx)); - jsbytecode* pc = stub->icEntry()->pc(script); - - char fmtbuf[100]; - va_list args; - va_start(args, fmt); - (void) VsprintfLiteral(fmtbuf, fmt, args); - va_end(args); - - JitSpew(JitSpew_BaselineICFallback, - "Fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%zu): %s", - script->filename(), - script->lineno(), - script->pcToOffset(pc), - PCToLineNumber(script, pc), - script->getWarmUpCount(), - stub->numOptimizedStubs(), - fmtbuf); - } -} - -void -TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...) -{ - if (JitSpewEnabled(JitSpew_BaselineICFallback)) { - RootedScript script(cx, GetTopJitJSScript(cx)); - jsbytecode* pc = stub->icEntry()->pc(script); - - char fmtbuf[100]; - va_list args; - va_start(args, fmt); - (void) VsprintfLiteral(fmtbuf, fmt, args); - va_end(args); - - JitSpew(JitSpew_BaselineICFallback, - "Type monitor fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%d): %s", - script->filename(), - script->lineno(), - script->pcToOffset(pc), - PCToLineNumber(script, pc), - script->getWarmUpCount(), - (int) stub->numOptimizedMonitorStubs(), - fmtbuf); - } -} -#endif // JS_JITSPEW - -ICFallbackStub* -ICEntry::fallbackStub() const -{ - return firstStub()->getChainFallback(); -} - -void -IonICEntry::trace(JSTracer* trc) -{ - TraceManuallyBarrieredEdge(trc, &script_, "IonICEntry::script_"); - traceEntry(trc); -} - -void -BaselineICEntry::trace(JSTracer* trc) -{ - traceEntry(trc); -} - -void -ICEntry::traceEntry(JSTracer* trc) -{ - if (!hasStub()) - return; - for (ICStub* stub = firstStub(); stub; stub = stub->next()) - stub->trace(trc); -} - -ICStubConstIterator& -ICStubConstIterator::operator++() -{ - MOZ_ASSERT(currentStub_ != nullptr); - currentStub_ = currentStub_->next(); - return *this; -} - - -ICStubIterator::ICStubIterator(ICFallbackStub* fallbackStub, bool end) - : icEntry_(fallbackStub->icEntry()), - fallbackStub_(fallbackStub), - previousStub_(nullptr), - currentStub_(end ? fallbackStub : icEntry_->firstStub()), - unlinked_(false) -{ } - -ICStubIterator& -ICStubIterator::operator++() -{ - MOZ_ASSERT(currentStub_->next() != nullptr); - if (!unlinked_) - previousStub_ = currentStub_; - currentStub_ = currentStub_->next(); - unlinked_ = false; - return *this; -} - -void -ICStubIterator::unlink(JSContext* cx) -{ - MOZ_ASSERT(currentStub_->next() != nullptr); - MOZ_ASSERT(currentStub_ != fallbackStub_); - MOZ_ASSERT(!unlinked_); - - fallbackStub_->unlinkStub(cx->zone(), previousStub_, currentStub_); - - // Mark the current iterator position as unlinked, so operator++ works properly. - unlinked_ = true; -} - -/* static */ bool -ICStub::NonCacheIRStubMakesGCCalls(Kind kind) -{ - MOZ_ASSERT(IsValidKind(kind)); - MOZ_ASSERT(!IsCacheIRKind(kind)); - - switch (kind) { - case Call_Fallback: - case Call_Scripted: - case Call_AnyScripted: - case Call_Native: - case Call_ClassHook: - case Call_ScriptedApplyArray: - case Call_ScriptedApplyArguments: - case Call_ScriptedFunCall: - case Call_ConstStringSplit: - case WarmUpCounter_Fallback: - case RetSub_Fallback: - // These two fallback stubs don't actually make non-tail calls, - // but the fallback code for the bailout path needs to pop the stub frame - // pushed during the bailout. - case GetProp_Fallback: - case SetProp_Fallback: - return true; - default: - return false; - } -} - -bool -ICStub::makesGCCalls() const -{ - switch (kind()) { - case CacheIR_Regular: - return toCacheIR_Regular()->stubInfo()->makesGCCalls(); - case CacheIR_Monitored: - return toCacheIR_Monitored()->stubInfo()->makesGCCalls(); - case CacheIR_Updated: - return toCacheIR_Updated()->stubInfo()->makesGCCalls(); - default: - return NonCacheIRStubMakesGCCalls(kind()); - } -} - -void -ICStub::traceCode(JSTracer* trc, const char* name) -{ - JitCode* stubJitCode = jitCode(); - TraceManuallyBarrieredEdge(trc, &stubJitCode, name); -} - -void -ICStub::updateCode(JitCode* code) -{ - // Write barrier on the old code. - JitCode::writeBarrierPre(jitCode()); - stubCode_ = code->raw(); -} - -/* static */ void -ICStub::trace(JSTracer* trc) -{ - traceCode(trc, "shared-stub-jitcode"); - - // If the stub is a monitored fallback stub, then trace the monitor ICs hanging - // off of that stub. We don't need to worry about the regular monitored stubs, - // because the regular monitored stubs will always have a monitored fallback stub - // that references the same stub chain. - if (isMonitoredFallback()) { - ICTypeMonitor_Fallback* lastMonStub = - toMonitoredFallbackStub()->maybeFallbackMonitorStub(); - if (lastMonStub) { - for (ICStubConstIterator iter(lastMonStub->firstMonitorStub()); - !iter.atEnd(); - iter++) - { - MOZ_ASSERT_IF(iter->next() == nullptr, *iter == lastMonStub); - iter->trace(trc); - } - } - } - - if (isUpdated()) { - for (ICStubConstIterator iter(toUpdatedStub()->firstUpdateStub()); !iter.atEnd(); iter++) { - MOZ_ASSERT_IF(iter->next() == nullptr, iter->isTypeUpdate_Fallback()); - iter->trace(trc); - } - } - - switch (kind()) { - case ICStub::Call_Scripted: { - ICCall_Scripted* callStub = toCall_Scripted(); - TraceEdge(trc, &callStub->callee(), "baseline-callscripted-callee"); - TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callscripted-template"); - break; - } - case ICStub::Call_Native: { - ICCall_Native* callStub = toCall_Native(); - TraceEdge(trc, &callStub->callee(), "baseline-callnative-callee"); - TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callnative-template"); - break; - } - case ICStub::Call_ClassHook: { - ICCall_ClassHook* callStub = toCall_ClassHook(); - TraceNullableEdge(trc, &callStub->templateObject(), "baseline-callclasshook-template"); - break; - } - case ICStub::Call_ConstStringSplit: { - ICCall_ConstStringSplit* callStub = toCall_ConstStringSplit(); - TraceEdge(trc, &callStub->templateObject(), "baseline-callstringsplit-template"); - TraceEdge(trc, &callStub->expectedSep(), "baseline-callstringsplit-sep"); - TraceEdge(trc, &callStub->expectedStr(), "baseline-callstringsplit-str"); - break; - } - case ICStub::TypeMonitor_SingleObject: { - ICTypeMonitor_SingleObject* monitorStub = toTypeMonitor_SingleObject(); - TraceEdge(trc, &monitorStub->object(), "baseline-monitor-singleton"); - break; - } - case ICStub::TypeMonitor_ObjectGroup: { - ICTypeMonitor_ObjectGroup* monitorStub = toTypeMonitor_ObjectGroup(); - TraceEdge(trc, &monitorStub->group(), "baseline-monitor-group"); - break; - } - case ICStub::TypeUpdate_SingleObject: { - ICTypeUpdate_SingleObject* updateStub = toTypeUpdate_SingleObject(); - TraceEdge(trc, &updateStub->object(), "baseline-update-singleton"); - break; - } - case ICStub::TypeUpdate_ObjectGroup: { - ICTypeUpdate_ObjectGroup* updateStub = toTypeUpdate_ObjectGroup(); - TraceEdge(trc, &updateStub->group(), "baseline-update-group"); - break; - } - case ICStub::NewArray_Fallback: { - ICNewArray_Fallback* stub = toNewArray_Fallback(); - TraceNullableEdge(trc, &stub->templateObject(), "baseline-newarray-template"); - TraceEdge(trc, &stub->templateGroup(), "baseline-newarray-template-group"); - break; - } - case ICStub::NewObject_Fallback: { - ICNewObject_Fallback* stub = toNewObject_Fallback(); - TraceNullableEdge(trc, &stub->templateObject(), "baseline-newobject-template"); - break; - } - case ICStub::Rest_Fallback: { - ICRest_Fallback* stub = toRest_Fallback(); - TraceEdge(trc, &stub->templateObject(), "baseline-rest-template"); - break; - } - case ICStub::CacheIR_Regular: - TraceCacheIRStub(trc, this, toCacheIR_Regular()->stubInfo()); - break; - case ICStub::CacheIR_Monitored: - TraceCacheIRStub(trc, this, toCacheIR_Monitored()->stubInfo()); - break; - case ICStub::CacheIR_Updated: { - ICCacheIR_Updated* stub = toCacheIR_Updated(); - TraceNullableEdge(trc, &stub->updateStubGroup(), "baseline-update-stub-group"); - TraceEdge(trc, &stub->updateStubId(), "baseline-update-stub-id"); - TraceCacheIRStub(trc, this, stub->stubInfo()); - break; - } - default: - break; - } -} - -void -ICFallbackStub::unlinkStub(Zone* zone, ICStub* prev, ICStub* stub) -{ - MOZ_ASSERT(stub->next()); - - // If stub is the last optimized stub, update lastStubPtrAddr. - if (stub->next() == this) { - MOZ_ASSERT(lastStubPtrAddr_ == stub->addressOfNext()); - if (prev) - lastStubPtrAddr_ = prev->addressOfNext(); - else - lastStubPtrAddr_ = icEntry()->addressOfFirstStub(); - *lastStubPtrAddr_ = this; - } else { - if (prev) { - MOZ_ASSERT(prev->next() == stub); - prev->setNext(stub->next()); - } else { - MOZ_ASSERT(icEntry()->firstStub() == stub); - icEntry()->setFirstStub(stub->next()); - } - } - - state_.trackUnlinkedStub(); - - if (zone->needsIncrementalBarrier()) { - // We are removing edges from ICStub to gcthings. Perform one final trace - // of the stub for incremental GC, as it must know about those edges. - stub->trace(zone->barrierTracer()); - } - - if (stub->makesGCCalls() && stub->isMonitored()) { - // This stub can make calls so we can return to it if it's on the stack. - // We just have to reset its firstMonitorStub_ field to avoid a stale - // pointer when purgeOptimizedStubs destroys all optimized monitor - // stubs (unlinked stubs won't be updated). - ICTypeMonitor_Fallback* monitorFallback = - toMonitoredFallbackStub()->maybeFallbackMonitorStub(); - MOZ_ASSERT(monitorFallback); - stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback); - } - -#ifdef DEBUG - // Poison stub code to ensure we don't call this stub again. However, if - // this stub can make calls, a pointer to it may be stored in a stub frame - // on the stack, so we can't touch the stubCode_ or GC will crash when - // tracing this pointer. - if (!stub->makesGCCalls()) - stub->stubCode_ = (uint8_t*)0xbad; -#endif -} - -void -ICFallbackStub::unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind) -{ - for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) { - if (iter->kind() == kind) - iter.unlink(cx); - } -} - -void -ICFallbackStub::discardStubs(JSContext* cx) -{ - for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) - iter.unlink(cx); -} - -void -ICTypeMonitor_Fallback::resetMonitorStubChain(Zone* zone) -{ - if (zone->needsIncrementalBarrier()) { - // We are removing edges from monitored stubs to gcthings (JitCode). - // Perform one final trace of all monitor stubs for incremental GC, - // as it must know about those edges. - for (ICStub* s = firstMonitorStub_; !s->isTypeMonitor_Fallback(); s = s->next()) - s->trace(zone->barrierTracer()); - } - - firstMonitorStub_ = this; - numOptimizedMonitorStubs_ = 0; - - if (hasFallbackStub_) { - lastMonitorStubPtrAddr_ = nullptr; - - // Reset firstMonitorStub_ field of all monitored stubs. - for (ICStubConstIterator iter = mainFallbackStub_->beginChainConst(); - !iter.atEnd(); iter++) - { - if (!iter->isMonitored()) - continue; - iter->toMonitoredStub()->resetFirstMonitorStub(this); - } - } else { - icEntry_->setFirstStub(this); - lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub(); - } -} - -void -ICUpdatedStub::resetUpdateStubChain(Zone* zone) -{ - while (!firstUpdateStub_->isTypeUpdate_Fallback()) { - if (zone->needsIncrementalBarrier()) { - // We are removing edges from update stubs to gcthings (JitCode). - // Perform one final trace of all update stubs for incremental GC, - // as it must know about those edges. - firstUpdateStub_->trace(zone->barrierTracer()); - } - firstUpdateStub_ = firstUpdateStub_->next(); - } - - numOptimizedStubs_ = 0; -} - -ICMonitoredStub::ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub) - : ICStub(kind, ICStub::Monitored, stubCode), - firstMonitorStub_(firstMonitorStub) -{ - // In order to silence Coverity - null pointer dereference checker - MOZ_ASSERT(firstMonitorStub_); - // If the first monitored stub is a ICTypeMonitor_Fallback stub, then - // double check that _its_ firstMonitorStub is the same as this one. - MOZ_ASSERT_IF(firstMonitorStub_->isTypeMonitor_Fallback(), - firstMonitorStub_->toTypeMonitor_Fallback()->firstMonitorStub() == - firstMonitorStub_); -} - -bool -ICMonitoredFallbackStub::initMonitoringChain(JSContext* cx, JSScript* script) -{ - MOZ_ASSERT(fallbackMonitorStub_ == nullptr); - - ICTypeMonitor_Fallback::Compiler compiler(cx, this); - ICStubSpace* space = script->baselineScript()->fallbackStubSpace(); - ICTypeMonitor_Fallback* stub = compiler.getStub(space); - if (!stub) - return false; - fallbackMonitorStub_ = stub; - return true; -} - -bool -ICMonitoredFallbackStub::addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, - StackTypeSet* types, HandleValue val) -{ - ICTypeMonitor_Fallback* typeMonitorFallback = getFallbackMonitorStub(cx, frame->script()); - if (!typeMonitorFallback) - return false; - return typeMonitorFallback->addMonitorStubForValue(cx, frame, types, val); -} - -bool -ICUpdatedStub::initUpdatingChain(JSContext* cx, ICStubSpace* space) -{ - MOZ_ASSERT(firstUpdateStub_ == nullptr); - - ICTypeUpdate_Fallback::Compiler compiler(cx); - ICTypeUpdate_Fallback* stub = compiler.getStub(space); - if (!stub) - return false; - - firstUpdateStub_ = stub; - return true; -} - -JitCode* -ICStubCompiler::getStubCode() -{ - JitRealm* realm = cx->realm()->jitRealm(); - - // Check for existing cached stubcode. - uint32_t stubKey = getKey(); - JitCode* stubCode = realm->getStubCode(stubKey); - if (stubCode) - return stubCode; - - // Compile new stubcode. - JitContext jctx(cx, nullptr); - StackMacroAssembler masm; -#ifndef JS_USE_LINK_REGISTER - // The first value contains the return addres, - // which we pull into ICTailCallReg for tail calls. - masm.adjustFrame(sizeof(intptr_t)); -#endif -#ifdef JS_CODEGEN_ARM - masm.setSecondScratchReg(BaselineSecondScratchReg); -#endif - - if (!generateStubCode(masm)) - return nullptr; - Linker linker(masm); - AutoFlushICache afc("getStubCode"); - Rooted newStubCode(cx, linker.newCode(cx, CodeKind::Baseline)); - if (!newStubCode) - return nullptr; - - // Cache newly compiled stubcode. - if (!realm->putStubCode(cx, stubKey, newStubCode)) - return nullptr; - - // After generating code, run postGenerateStubCode(). We must not fail - // after this point. - postGenerateStubCode(masm, newStubCode); - - MOZ_ASSERT(entersStubFrame_ == ICStub::NonCacheIRStubMakesGCCalls(kind)); - MOZ_ASSERT(!inStubFrame_); - -#ifdef JS_ION_PERF - writePerfSpewerJitCodeProfile(newStubCode, "BaselineIC"); -#endif - - return newStubCode; -} - -bool -ICStubCompiler::tailCallVM(const VMFunction& fun, MacroAssembler& masm) -{ - TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(fun); - MOZ_ASSERT(fun.expectTailCall == TailCall); - uint32_t argSize = fun.explicitStackSlots() * sizeof(void*); - if (engine_ == Engine::Baseline) { - EmitBaselineTailCallVM(code, masm, argSize); - } else { - uint32_t stackSize = argSize + fun.extraValuesToPop * sizeof(Value); - EmitIonTailCallVM(code, masm, stackSize); - } - return true; -} - -bool -ICStubCompiler::callVM(const VMFunction& fun, MacroAssembler& masm) -{ - MOZ_ASSERT(inStubFrame_); - - TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(fun); - MOZ_ASSERT(fun.expectTailCall == NonTailCall); - MOZ_ASSERT(engine_ == Engine::Baseline); - - EmitBaselineCallVM(code, masm); - return true; -} - -void -ICStubCompiler::enterStubFrame(MacroAssembler& masm, Register scratch) -{ - MOZ_ASSERT(engine_ == Engine::Baseline); - EmitBaselineEnterStubFrame(masm, scratch); -#ifdef DEBUG - framePushedAtEnterStubFrame_ = masm.framePushed(); -#endif - - MOZ_ASSERT(!inStubFrame_); - inStubFrame_ = true; - -#ifdef DEBUG - entersStubFrame_ = true; -#endif -} - -void -ICStubCompiler::assumeStubFrame() -{ - MOZ_ASSERT(!inStubFrame_); - inStubFrame_ = true; - -#ifdef DEBUG - entersStubFrame_ = true; - - // |framePushed| isn't tracked precisely in ICStubs, so simply assume it to - // be STUB_FRAME_SIZE so that assertions don't fail in leaveStubFrame. - framePushedAtEnterStubFrame_ = STUB_FRAME_SIZE; -#endif -} - -void -ICStubCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon) -{ - MOZ_ASSERT(entersStubFrame_ && inStubFrame_); - inStubFrame_ = false; - - MOZ_ASSERT(engine_ == Engine::Baseline); -#ifdef DEBUG - masm.setFramePushed(framePushedAtEnterStubFrame_); - if (calledIntoIon) - masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra. -#endif - EmitBaselineLeaveStubFrame(masm, calledIntoIon); -} - -void -ICStubCompiler::pushStubPayload(MacroAssembler& masm, Register scratch) -{ - if (engine_ == Engine::IonSharedIC) { - masm.push(Imm32(0)); - return; - } - - if (inStubFrame_) { - masm.loadPtr(Address(BaselineFrameReg, 0), scratch); - masm.pushBaselineFramePtr(scratch, scratch); - } else { - masm.pushBaselineFramePtr(BaselineFrameReg, scratch); - } -} - -void -ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch) -{ - pushStubPayload(masm, scratch); - masm.adjustFrame(sizeof(intptr_t)); -} - -SharedStubInfo::SharedStubInfo(JSContext* cx, void* payload, ICEntry* icEntry) - : maybeFrame_(nullptr), - outerScript_(cx), - innerScript_(cx), - icEntry_(icEntry) -{ - if (payload) { - maybeFrame_ = (BaselineFrame*) payload; - outerScript_ = maybeFrame_->script(); - innerScript_ = maybeFrame_->script(); - } else { - IonICEntry* entry = (IonICEntry*) icEntry; - innerScript_ = entry->script(); - // outerScript_ is initialized lazily. - } -} - -HandleScript -SharedStubInfo::outerScript(JSContext* cx) -{ - if (!outerScript_) { - js::jit::JitActivationIterator actIter(cx); - JSJitFrameIter it(actIter->asJit()); - MOZ_ASSERT(it.isExitFrame()); - ++it; - MOZ_ASSERT(it.isIonJS()); - outerScript_ = it.script(); - MOZ_ASSERT(!it.ionScript()->invalidated()); - } - return outerScript_; -} - -// -void -LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result) -{ - switch (layout) { - case Layout_TypedArray: - masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), result); - break; - case Layout_OutlineTypedObject: - masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), result); - break; - case Layout_InlineTypedObject: - masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), result); - break; - default: - MOZ_CRASH(); - } -} - -void -BaselineScript::noteAccessedGetter(uint32_t pcOffset) -{ - ICEntry& entry = icEntryFromPCOffset(pcOffset); - ICFallbackStub* stub = entry.fallbackStub(); - - if (stub->isGetProp_Fallback()) - stub->toGetProp_Fallback()->noteAccessedGetter(); -} - -// TypeMonitor_Fallback -// - -bool -ICTypeMonitor_Fallback::addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, - StackTypeSet* types, HandleValue val) -{ - MOZ_ASSERT(types); - - // Don't attach too many SingleObject/ObjectGroup stubs. If the value is a - // primitive or if we will attach an any-object stub, we can handle this - // with a single PrimitiveSet or AnyValue stub so we always optimize. - if (numOptimizedMonitorStubs_ >= MAX_OPTIMIZED_STUBS && - val.isObject() && - !types->unknownObject()) - { - return true; - } - - bool wasDetachedMonitorChain = lastMonitorStubPtrAddr_ == nullptr; - MOZ_ASSERT_IF(wasDetachedMonitorChain, numOptimizedMonitorStubs_ == 0); - - if (types->unknown()) { - // The TypeSet got marked as unknown so attach a stub that always - // succeeds. - - // Check for existing TypeMonitor_AnyValue stubs. - for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { - if (iter->isTypeMonitor_AnyValue()) - return true; - } - - // Discard existing stubs. - resetMonitorStubChain(cx->zone()); - wasDetachedMonitorChain = (lastMonitorStubPtrAddr_ == nullptr); - - ICTypeMonitor_AnyValue::Compiler compiler(cx); - ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); - if (!stub) { - ReportOutOfMemory(cx); - return false; - } - - JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for any value", stub); - addOptimizedMonitorStub(stub); - - } else if (val.isPrimitive() || types->unknownObject()) { - if (val.isMagic(JS_UNINITIALIZED_LEXICAL)) - return true; - MOZ_ASSERT(!val.isMagic()); - JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType(); - - // Check for existing TypeMonitor stub. - ICTypeMonitor_PrimitiveSet* existingStub = nullptr; - for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { - if (iter->isTypeMonitor_PrimitiveSet()) { - existingStub = iter->toTypeMonitor_PrimitiveSet(); - if (existingStub->containsType(type)) - return true; - } - } - - if (val.isObject()) { - // Check for existing SingleObject/ObjectGroup stubs and discard - // stubs if we find one. Ideally we would discard just these stubs, - // but unlinking individual type monitor stubs is somewhat - // complicated. - MOZ_ASSERT(types->unknownObject()); - bool hasObjectStubs = false; - for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { - if (iter->isTypeMonitor_SingleObject() || iter->isTypeMonitor_ObjectGroup()) { - hasObjectStubs = true; - break; - } - } - if (hasObjectStubs) { - resetMonitorStubChain(cx->zone()); - wasDetachedMonitorChain = (lastMonitorStubPtrAddr_ == nullptr); - existingStub = nullptr; - } - } - - ICTypeMonitor_PrimitiveSet::Compiler compiler(cx, existingStub, type); - ICStub* stub = existingStub - ? compiler.updateStub() - : compiler.getStub(compiler.getStubSpace(frame->script())); - if (!stub) { - ReportOutOfMemory(cx); - return false; - } - - JitSpew(JitSpew_BaselineIC, " %s TypeMonitor stub %p for primitive type %d", - existingStub ? "Modified existing" : "Created new", stub, type); - - if (!existingStub) { - MOZ_ASSERT(!hasStub(TypeMonitor_PrimitiveSet)); - addOptimizedMonitorStub(stub); - } - - } else if (val.toObject().isSingleton()) { - RootedObject obj(cx, &val.toObject()); - - // Check for existing TypeMonitor stub. - for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { - if (iter->isTypeMonitor_SingleObject() && - iter->toTypeMonitor_SingleObject()->object() == obj) - { - return true; - } - } - - ICTypeMonitor_SingleObject::Compiler compiler(cx, obj); - ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); - if (!stub) { - ReportOutOfMemory(cx); - return false; - } - - JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for singleton %p", - stub, obj.get()); - - addOptimizedMonitorStub(stub); - - } else { - RootedObjectGroup group(cx, val.toObject().group()); - - // Check for existing TypeMonitor stub. - for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) { - if (iter->isTypeMonitor_ObjectGroup() && - iter->toTypeMonitor_ObjectGroup()->group() == group) - { - return true; - } - } - - ICTypeMonitor_ObjectGroup::Compiler compiler(cx, group); - ICStub* stub = compiler.getStub(compiler.getStubSpace(frame->script())); - if (!stub) { - ReportOutOfMemory(cx); - return false; - } - - JitSpew(JitSpew_BaselineIC, " Added TypeMonitor stub %p for ObjectGroup %p", - stub, group.get()); - - addOptimizedMonitorStub(stub); - } - - bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0); - - if (firstMonitorStubAdded) { - // Was an empty monitor chain before, but a new stub was added. This is the - // only time that any main stubs' firstMonitorStub fields need to be updated to - // refer to the newly added monitor stub. - ICStub* firstStub = mainFallbackStub_->icEntry()->firstStub(); - for (ICStubConstIterator iter(firstStub); !iter.atEnd(); iter++) { - // Non-monitored stubs are used if the result has always the same type, - // e.g. a StringLength stub will always return int32. - if (!iter->isMonitored()) - continue; - - // Since we just added the first optimized monitoring stub, any - // existing main stub's |firstMonitorStub| MUST be pointing to the fallback - // monitor stub (i.e. this stub). - MOZ_ASSERT(iter->toMonitoredStub()->firstMonitorStub() == this); - iter->toMonitoredStub()->updateFirstMonitorStub(firstMonitorStub_); - } - } - - return true; -} - -static bool -DoTypeMonitorFallback(JSContext* cx, BaselineFrame* frame, ICTypeMonitor_Fallback* stub, - HandleValue value, MutableHandleValue res) -{ - JSScript* script = frame->script(); - jsbytecode* pc = stub->icEntry()->pc(script); - TypeFallbackICSpew(cx, stub, "TypeMonitor"); - - // Copy input value to res. - res.set(value); - - if (MOZ_UNLIKELY(value.isMagic())) { - // It's possible that we arrived here from bailing out of Ion, and that - // Ion proved that the value is dead and optimized out. In such cases, - // do nothing. However, it's also possible that we have an uninitialized - // this, in which case we should not look for other magic values. - - if (value.whyMagic() == JS_OPTIMIZED_OUT) { - MOZ_ASSERT(!stub->monitorsThis()); - return true; - } - - // In derived class constructors (including nested arrows/eval), the - // |this| argument or GETALIASEDVAR can return the magic TDZ value. - MOZ_ASSERT(value.isMagic(JS_UNINITIALIZED_LEXICAL)); - MOZ_ASSERT(frame->isFunctionFrame() || frame->isEvalFrame()); - MOZ_ASSERT(stub->monitorsThis() || - *GetNextPc(pc) == JSOP_CHECKTHIS || - *GetNextPc(pc) == JSOP_CHECKTHISREINIT || - *GetNextPc(pc) == JSOP_CHECKRETURN); - if (stub->monitorsThis()) - TypeScript::SetThis(cx, script, TypeSet::UnknownType()); - else - TypeScript::Monitor(cx, script, pc, TypeSet::UnknownType()); - return true; - } - - StackTypeSet* types; - uint32_t argument; - if (stub->monitorsArgument(&argument)) { - MOZ_ASSERT(pc == script->code()); - types = TypeScript::ArgTypes(script, argument); - TypeScript::SetArgument(cx, script, argument, value); - } else if (stub->monitorsThis()) { - MOZ_ASSERT(pc == script->code()); - types = TypeScript::ThisTypes(script); - TypeScript::SetThis(cx, script, value); - } else { - types = TypeScript::BytecodeTypes(script, pc); - TypeScript::Monitor(cx, script, pc, types, value); - } - - if (MOZ_UNLIKELY(stub->invalid())) - return true; - - return stub->addMonitorStubForValue(cx, frame, types, value); -} - -typedef bool (*DoTypeMonitorFallbackFn)(JSContext*, BaselineFrame*, ICTypeMonitor_Fallback*, - HandleValue, MutableHandleValue); -static const VMFunction DoTypeMonitorFallbackInfo = - FunctionInfo(DoTypeMonitorFallback, "DoTypeMonitorFallback", - TailCall); - -bool -ICTypeMonitor_Fallback::Compiler::generateStubCode(MacroAssembler& masm) -{ - MOZ_ASSERT(R0 == JSReturnOperand); - - // Restore the tail call register. - EmitRestoreTailCallReg(masm); - - masm.pushValue(R0); - masm.push(ICStubReg); - masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); - - return tailCallVM(DoTypeMonitorFallbackInfo, masm); -} - -bool -ICTypeMonitor_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm) -{ - Label success; - if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))) - masm.branchTestInt32(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)) - masm.branchTestNumber(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED)) - masm.branchTestUndefined(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN)) - masm.branchTestBoolean(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_STRING)) - masm.branchTestString(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL)) - masm.branchTestSymbol(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)) - masm.branchTestObject(Assembler::Equal, R0, &success); - - if (flags_ & TypeToFlag(JSVAL_TYPE_NULL)) - masm.branchTestNull(Assembler::Equal, R0, &success); - - EmitStubGuardFailure(masm); - - masm.bind(&success); - EmitReturnFromIC(masm); - return true; -} - -static void -MaybeWorkAroundAmdBug(MacroAssembler& masm) -{ - // Attempt to work around an AMD bug (see bug 1034706 and bug 1281759), by - // inserting 32-bytes of NOPs. -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) - if (CPUInfo::NeedAmdBugWorkaround()) { - masm.nop(9); - masm.nop(9); - masm.nop(9); - masm.nop(5); - } -#endif -} - -bool -ICTypeMonitor_SingleObject::Compiler::generateStubCode(MacroAssembler& masm) -{ - Label failure; - masm.branchTestObject(Assembler::NotEqual, R0, &failure); - MaybeWorkAroundAmdBug(masm); - - // Guard on the object's identity. - Register obj = masm.extractObject(R0, ExtractTemp0); - Address expectedObject(ICStubReg, ICTypeMonitor_SingleObject::offsetOfObject()); - masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure); - MaybeWorkAroundAmdBug(masm); - - EmitReturnFromIC(masm); - MaybeWorkAroundAmdBug(masm); - - masm.bind(&failure); - EmitStubGuardFailure(masm); - return true; -} - -bool -ICTypeMonitor_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm) -{ - Label failure; - masm.branchTestObject(Assembler::NotEqual, R0, &failure); - MaybeWorkAroundAmdBug(masm); - - // Guard on the object's ObjectGroup. No Spectre mitigations are needed - // here: we're just recording type information for Ion compilation and - // it's safe to speculatively return. - Register obj = masm.extractObject(R0, ExtractTemp0); - Address expectedGroup(ICStubReg, ICTypeMonitor_ObjectGroup::offsetOfGroup()); - masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, expectedGroup, - R1.scratchReg(), &failure); - MaybeWorkAroundAmdBug(masm); - - EmitReturnFromIC(masm); - MaybeWorkAroundAmdBug(masm); - - masm.bind(&failure); - EmitStubGuardFailure(masm); - return true; -} - -bool -ICTypeMonitor_AnyValue::Compiler::generateStubCode(MacroAssembler& masm) -{ - EmitReturnFromIC(masm); - return true; -} - -bool -ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, HandleObject obj, - HandleObjectGroup group, HandleId id, HandleValue val) -{ - EnsureTrackPropertyTypes(cx, obj, id); - - // Make sure that undefined values are explicitly included in the property - // types for an object if generating a stub to write an undefined value. - if (val.isUndefined() && CanHaveEmptyPropertyTypesForOwnProperty(obj)) { - MOZ_ASSERT(obj->group() == group); - AddTypePropertyId(cx, obj, id, val); - } - - bool unknown = false, unknownObject = false; - AutoSweepObjectGroup sweep(group); - if (group->unknownProperties(sweep)) { - unknown = unknownObject = true; - } else { - if (HeapTypeSet* types = group->maybeGetProperty(sweep, id)) { - unknown = types->unknown(); - unknownObject = types->unknownObject(); - } else { - // We don't record null/undefined types for certain TypedObject - // properties. In these cases |types| is allowed to be nullptr - // without implying unknown types. See DoTypeUpdateFallback. - MOZ_ASSERT(obj->is()); - MOZ_ASSERT(val.isNullOrUndefined()); - } - } - MOZ_ASSERT_IF(unknown, unknownObject); - - // Don't attach too many SingleObject/ObjectGroup stubs unless we can - // replace them with a single PrimitiveSet or AnyValue stub. - if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS && - val.isObject() && - !unknownObject) - { - return true; - } - - if (unknown) { - // Attach a stub that always succeeds. We should not have a - // TypeUpdate_AnyValue stub yet. - MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_AnyValue)); - - // Discard existing stubs. - resetUpdateStubChain(cx->zone()); - - ICTypeUpdate_AnyValue::Compiler compiler(cx); - ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); - if (!stub) - return false; - - JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for any value", stub); - addOptimizedUpdateStub(stub); - - } else if (val.isPrimitive() || unknownObject) { - JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType(); - - // Check for existing TypeUpdate stub. - ICTypeUpdate_PrimitiveSet* existingStub = nullptr; - for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { - if (iter->isTypeUpdate_PrimitiveSet()) { - existingStub = iter->toTypeUpdate_PrimitiveSet(); - MOZ_ASSERT(!existingStub->containsType(type)); - } - } - - if (val.isObject()) { - // Discard existing ObjectGroup/SingleObject stubs. - resetUpdateStubChain(cx->zone()); - if (existingStub) - addOptimizedUpdateStub(existingStub); - } - - ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type); - ICStub* stub = existingStub ? compiler.updateStub() - : compiler.getStub(compiler.getStubSpace(outerScript)); - if (!stub) - return false; - if (!existingStub) { - MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet)); - addOptimizedUpdateStub(stub); - } - - JitSpew(JitSpew_BaselineIC, " %s TypeUpdate stub %p for primitive type %d", - existingStub ? "Modified existing" : "Created new", stub, type); - - } else if (val.toObject().isSingleton()) { - RootedObject obj(cx, &val.toObject()); - -#ifdef DEBUG - // We should not have a stub for this object. - for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { - MOZ_ASSERT_IF(iter->isTypeUpdate_SingleObject(), - iter->toTypeUpdate_SingleObject()->object() != obj); - } -#endif - - ICTypeUpdate_SingleObject::Compiler compiler(cx, obj); - ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); - if (!stub) - return false; - - JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for singleton %p", stub, obj.get()); - - addOptimizedUpdateStub(stub); - - } else { - RootedObjectGroup group(cx, val.toObject().group()); - -#ifdef DEBUG - // We should not have a stub for this group. - for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { - MOZ_ASSERT_IF(iter->isTypeUpdate_ObjectGroup(), - iter->toTypeUpdate_ObjectGroup()->group() != group); - } -#endif - - ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group); - ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); - if (!stub) - return false; - - JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for ObjectGroup %p", - stub, group.get()); - - addOptimizedUpdateStub(stub); - } - - return true; -} - -} // namespace jit -} // namespace js diff --git a/js/src/jit/SharedIC.h b/js/src/jit/SharedIC.h deleted file mode 100644 index 17e06668691b..000000000000 --- a/js/src/jit/SharedIC.h +++ /dev/null @@ -1,1786 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sts=4 et sw=4 tw=99: - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef jit_SharedIC_h -#define jit_SharedIC_h - -#include "gc/GC.h" -#include "jit/BaselineICList.h" -#include "jit/BaselineJIT.h" -#include "jit/ICState.h" -#include "jit/MacroAssembler.h" -#include "jit/SharedICList.h" -#include "jit/SharedICRegisters.h" -#include "vm/JSContext.h" -#include "vm/Realm.h" -#include "vm/ReceiverGuard.h" -#include "vm/TypedArrayObject.h" - -namespace js { -namespace jit { - -// [SMDOC] JIT Inline Caches (ICs) -// -// Baseline Inline Caches are polymorphic caches that aggressively -// share their stub code. -// -// Every polymorphic site contains a linked list of stubs which are -// specific to that site. These stubs are composed of a |StubData| -// structure that stores parametrization information (e.g. -// the shape pointer for a shape-check-and-property-get stub), any -// dynamic information (e.g. warm-up counters), a pointer to the stub code, -// and a pointer to the next stub state in the linked list. -// -// Every BaselineScript keeps an table of |CacheDescriptor| data -// structures, which store the following: -// A pointer to the first StubData in the cache. -// The bytecode PC of the relevant IC. -// The machine-code PC where the call to the stubcode returns. -// -// A diagram: -// -// Control flow Pointers -// =======# ----. .----> -// # | | -// #======> \-----/ -// -// -// .---------------------------------------. -// | .-------------------------. | -// | | .----. | | -// Baseline | | | | | | -// JIT Code 0 ^ 1 ^ 2 ^ | | | -// +--------------+ .-->+-----+ +-----+ +-----+ | | | -// | | #=|==>| |==>| |==>| FB | | | | -// | | # | +-----+ +-----+ +-----+ | | | -// | | # | # # # | | | -// |==============|==# | # # # | | | -// |=== IC =======| | # # # | | | -// .->|==============|<===|======#=========#=========# | | | -// | | | | | | | -// | | | | | | | -// | | | | | | | -// | | | | v | | -// | | | | +---------+ | | -// | | | | | Fallback| | | -// | | | | | Stub | | | -// | | | | | Code | | | -// | | | | +---------+ | | -// | +--------------+ | | | -// | |_______ | +---------+ | | -// | | | | Stub |<---/ | -// | IC | \--. | Code | | -// | Descriptor | | +---------+ | -// | Table v | | -// | +-----------------+ | +---------+ | -// \--| Ins | PC | Stub |----/ | Stub |<-------/ -// +-----------------+ | Code | -// | ... | +---------+ -// +-----------------+ -// Shared -// Stub Code -// -// -// Type ICs -// ======== -// -// Type ICs are otherwise regular ICs that are actually nested within -// other IC chains. They serve to optimize locations in the code where the -// baseline compiler would have otherwise had to perform a type Monitor operation -// (e.g. the result of GetProp, GetElem, etc.), or locations where the baseline -// compiler would have had to modify a heap typeset using the type of an input -// value (e.g. SetProp, SetElem, etc.) -// -// There are two kinds of Type ICs: Monitor and Update. -// -// Note that type stub bodies are no-ops. The stubs only exist for their -// guards, and their existence simply signifies that the typeset (implicit) -// that is being checked already contains that type. -// -// TypeMonitor ICs -// --------------- -// Monitor ICs are shared between stubs in the general IC, and monitor the resulting -// types of getter operations (call returns, getprop outputs, etc.) -// -// +-----------+ +-----------+ +-----------+ +-----------+ -// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub | -// +-----------+ +-----------+ +-----------+ +-----------+ -// | | | | -// |------------------/-----------------/ | -// v | -// +-----------+ +-----------+ +-----------+ | -// | Type 1 |---->| Type 2 |---->| Type FB | | -// +-----------+ +-----------+ +-----------+ | -// | | | | -// <----------/-----------------/------------------/------------------/ -// r e t u r n p a t h -// -// After an optimized IC stub successfully executes, it passes control to the type stub -// chain to check the resulting type. If no type stub succeeds, and the monitor fallback -// stub is reached, the monitor fallback stub performs a manual monitor, and also adds the -// appropriate type stub to the chain. -// -// The IC's main fallback, in addition to generating new mainline stubs, also generates -// type stubs as reflected by its returned value. -// -// NOTE: The type IC chain returns directly to the mainline code, not back to the -// stub it was entered from. Thus, entering a type IC is a matter of a |jump|, not -// a |call|. This allows us to safely call a VM Monitor function from within the monitor IC's -// fallback chain, since the return address (needed for stack inspection) is preserved. -// -// -// TypeUpdate ICs -// -------------- -// Update ICs update heap typesets and monitor the input types of setter operations -// (setelem, setprop inputs, etc.). Unlike monitor ICs, they are not shared -// between stubs on an IC, but instead are kept track of on a per-stub basis. -// -// This is because the main stubs for the operation will each identify a potentially -// different ObjectGroup to update. New input types must be tracked on a group-to- -// group basis. -// -// Type-update ICs cannot be called in tail position (they must return to the -// the stub that called them so that the stub may continue to perform its original -// purpose). This means that any VMCall to perform a manual type update from C++ must be -// done from within the main IC stub. This necessitates that the stub enter a -// "BaselineStub" frame before making the call. -// -// If the type-update IC chain could itself make the VMCall, then the BaselineStub frame -// must be entered before calling the type-update chain, and exited afterward. This -// is very expensive for a common case where we expect the type-update fallback to not -// be called. To avoid the cost of entering and exiting a BaselineStub frame when -// using the type-update IC chain, we design the chain to not perform any VM-calls -// in its fallback. -// -// Instead, the type-update IC chain is responsible for returning 1 or 0, depending -// on if a type is represented in the chain or not. The fallback stub simply returns -// 0, and all other optimized stubs return 1. -// If the chain returns 1, then the IC stub goes ahead and performs its operation. -// If the chain returns 0, then the IC stub performs a call to the fallback function -// inline (doing the requisite BaselineStub frame enter/exit). -// This allows us to avoid the expensive subfram enter/exit in the common case. -// -// r e t u r n p a t h -// <--------------.-----------------.-----------------.-----------------. -// | | | | -// +-----------+ +-----------+ +-----------+ +-----------+ -// ---->| Stub 1 |---->| Stub 2 |---->| Stub 3 |---->| FB Stub | -// +-----------+ +-----------+ +-----------+ +-----------+ -// | ^ | ^ | ^ -// | | | | | | -// | | | | | |----------------. -// | | | | v |1 |0 -// | | | | +-----------+ +-----------+ -// | | | | | Type 3.1 |--->| FB 3 | -// | | | | +-----------+ +-----------+ -// | | | | -// | | | \-------------.-----------------. -// | | | | | | -// | | v |1 |1 |0 -// | | +-----------+ +-----------+ +-----------+ -// | | | Type 2.1 |---->| Type 2.2 |---->| FB 2 | -// | | +-----------+ +-----------+ +-----------+ -// | | -// | \-------------.-----------------. -// | | | | -// v |1 |1 |0 -// +-----------+ +-----------+ +-----------+ -// | Type 1.1 |---->| Type 1.2 |---->| FB 1 | -// +-----------+ +-----------+ +-----------+ -// - -class ICStub; -class ICFallbackStub; - -#define FORWARD_DECLARE_STUBS(kindName) class IC##kindName; - IC_BASELINE_STUB_KIND_LIST(FORWARD_DECLARE_STUBS) - IC_SHARED_STUB_KIND_LIST(FORWARD_DECLARE_STUBS) -#undef FORWARD_DECLARE_STUBS - -#ifdef JS_JITSPEW -void FallbackICSpew(JSContext* cx, ICFallbackStub* stub, const char* fmt, ...) - MOZ_FORMAT_PRINTF(3, 4); -void TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...) - MOZ_FORMAT_PRINTF(3, 4); -#else -#define FallbackICSpew(...) -#define TypeFallbackICSpew(...) -#endif - -// -// An entry in the JIT IC descriptor table. -// -class ICEntry -{ - private: - // A pointer to the shared IC stub for this instruction. - ICStub* firstStub_; - - // Offset from the start of the JIT code where the IC - // load and call instructions are. - uint32_t returnOffset_; - - // The PC of this IC's bytecode op within the JSScript. - uint32_t pcOffset_ : 28; - - public: - enum Kind { - // A for-op IC entry. - Kind_Op = 0, - - // A non-op IC entry. - Kind_NonOp, - - // A fake IC entry for returning from a callVM for an op. - Kind_CallVM, - - // A fake IC entry for returning from a callVM not for an op (e.g., in - // the prologue). - Kind_NonOpCallVM, - - // A fake IC entry for returning from a callVM to after the - // warmup counter. - Kind_WarmupCounter, - - // A fake IC entry for returning from a callVM to the interrupt - // handler via the over-recursion check on function entry. - Kind_StackCheck, - - // As above, but for the early check. See emitStackCheck. - Kind_EarlyStackCheck, - - // A fake IC entry for returning from DebugTrapHandler. - Kind_DebugTrap, - - // A fake IC entry for returning from a callVM to - // Debug{Prologue,AfterYield,Epilogue}. - Kind_DebugPrologue, - Kind_DebugAfterYield, - Kind_DebugEpilogue, - - Kind_Invalid - }; - - private: - // What this IC is for. - Kind kind_ : 4; - - // Set the kind and asserts that it's sane. - void setKind(Kind kind) { - MOZ_ASSERT(kind < Kind_Invalid); - kind_ = kind; - MOZ_ASSERT(this->kind() == kind); - } - - public: - ICEntry(uint32_t pcOffset, Kind kind) - : firstStub_(nullptr), returnOffset_(), pcOffset_(pcOffset) - { - // The offset must fit in at least 28 bits, since we shave off 4 for - // the Kind enum. - MOZ_ASSERT(pcOffset_ == pcOffset); - JS_STATIC_ASSERT(BaselineScript::MAX_JSSCRIPT_LENGTH <= (1u << 28) - 1); - MOZ_ASSERT(pcOffset <= BaselineScript::MAX_JSSCRIPT_LENGTH); - setKind(kind); - } - - CodeOffset returnOffset() const { - return CodeOffset(returnOffset_); - } - - void setReturnOffset(CodeOffset offset) { - MOZ_ASSERT(offset.offset() <= (size_t) UINT32_MAX); - returnOffset_ = (uint32_t) offset.offset(); - } - - uint32_t pcOffset() const { - return pcOffset_; - } - - jsbytecode* pc(JSScript* script) const { - return script->offsetToPC(pcOffset_); - } - - Kind kind() const { - // MSVC compiles enums as signed. - return Kind(kind_ & 0xf); - } - bool isForOp() const { - return kind() == Kind_Op; - } - - void setFakeKind(Kind kind) { - MOZ_ASSERT(kind != Kind_Op && kind != Kind_NonOp); - setKind(kind); - } - - bool hasStub() const { - return firstStub_ != nullptr; - } - ICStub* firstStub() const { - MOZ_ASSERT(hasStub()); - return firstStub_; - } - - ICFallbackStub* fallbackStub() const; - - void setFirstStub(ICStub* stub) { - firstStub_ = stub; - } - - static inline size_t offsetOfFirstStub() { - return offsetof(ICEntry, firstStub_); - } - - inline ICStub** addressOfFirstStub() { - return &firstStub_; - } - - protected: - void traceEntry(JSTracer* trc); -}; - -class BaselineICEntry : public ICEntry -{ - public: - BaselineICEntry(uint32_t pcOffset, Kind kind) - : ICEntry(pcOffset, kind) - { } - - void trace(JSTracer* trc); -}; - -class IonICEntry : public ICEntry -{ - JSScript* script_; - - public: - IonICEntry(uint32_t pcOffset, Kind kind, JSScript* script) - : ICEntry(pcOffset, kind), - script_(script) - { } - - JSScript* script() { - return script_; - } - - void trace(JSTracer* trc); -}; - -class ICMonitoredStub; -class ICMonitoredFallbackStub; -class ICUpdatedStub; - -// Constant iterator that traverses arbitrary chains of ICStubs. -// No requirements are made of the ICStub used to construct this -// iterator, aside from that the stub be part of a nullptr-terminated -// chain. -// The iterator is considered to be at its end once it has been -// incremented _past_ the last stub. Thus, if 'atEnd()' returns -// true, the '*' and '->' operations are not valid. -class ICStubConstIterator -{ - friend class ICStub; - friend class ICFallbackStub; - - private: - ICStub* currentStub_; - - public: - explicit ICStubConstIterator(ICStub* currentStub) : currentStub_(currentStub) {} - - static ICStubConstIterator StartingAt(ICStub* stub) { - return ICStubConstIterator(stub); - } - static ICStubConstIterator End(ICStub* stub) { - return ICStubConstIterator(nullptr); - } - - bool operator ==(const ICStubConstIterator& other) const { - return currentStub_ == other.currentStub_; - } - bool operator !=(const ICStubConstIterator& other) const { - return !(*this == other); - } - - ICStubConstIterator& operator++(); - - ICStubConstIterator operator++(int) { - ICStubConstIterator oldThis(*this); - ++(*this); - return oldThis; - } - - ICStub* operator*() const { - MOZ_ASSERT(currentStub_); - return currentStub_; - } - - ICStub* operator ->() const { - MOZ_ASSERT(currentStub_); - return currentStub_; - } - - bool atEnd() const { - return currentStub_ == nullptr; - } -}; - -// Iterator that traverses "regular" IC chains that start at an ICEntry -// and are terminated with an ICFallbackStub. -// -// The iterator is considered to be at its end once it is _at_ the -// fallback stub. Thus, unlike the ICStubConstIterator, operators -// '*' and '->' are valid even if 'atEnd()' returns true - they -// will act on the fallback stub. -// -// This iterator also allows unlinking of stubs being traversed. -// Note that 'unlink' does not implicitly advance the iterator - -// it must be advanced explicitly using '++'. -class ICStubIterator -{ - friend class ICFallbackStub; - - private: - ICEntry* icEntry_; - ICFallbackStub* fallbackStub_; - ICStub* previousStub_; - ICStub* currentStub_; - bool unlinked_; - - explicit ICStubIterator(ICFallbackStub* fallbackStub, bool end=false); - public: - - bool operator ==(const ICStubIterator& other) const { - // == should only ever be called on stubs from the same chain. - MOZ_ASSERT(icEntry_ == other.icEntry_); - MOZ_ASSERT(fallbackStub_ == other.fallbackStub_); - return currentStub_ == other.currentStub_; - } - bool operator !=(const ICStubIterator& other) const { - return !(*this == other); - } - - ICStubIterator& operator++(); - - ICStubIterator operator++(int) { - ICStubIterator oldThis(*this); - ++(*this); - return oldThis; - } - - ICStub* operator*() const { - return currentStub_; - } - - ICStub* operator ->() const { - return currentStub_; - } - - bool atEnd() const { - return currentStub_ == (ICStub*) fallbackStub_; - } - - void unlink(JSContext* cx); -}; - -// -// Base class for all IC stubs. -// -class ICStub -{ - friend class ICFallbackStub; - - public: - enum Kind { - INVALID = 0, -#define DEF_ENUM_KIND(kindName) kindName, - IC_BASELINE_STUB_KIND_LIST(DEF_ENUM_KIND) - IC_SHARED_STUB_KIND_LIST(DEF_ENUM_KIND) -#undef DEF_ENUM_KIND - LIMIT - }; - - static bool IsValidKind(Kind k) { - return (k > INVALID) && (k < LIMIT); - } - static bool IsCacheIRKind(Kind k) { - return k == CacheIR_Regular || k == CacheIR_Monitored || k == CacheIR_Updated; - } - - static const char* KindString(Kind k) { - switch(k) { -#define DEF_KIND_STR(kindName) case kindName: return #kindName; - IC_BASELINE_STUB_KIND_LIST(DEF_KIND_STR) - IC_SHARED_STUB_KIND_LIST(DEF_KIND_STR) -#undef DEF_KIND_STR - default: - MOZ_CRASH("Invalid kind."); - } - } - - enum Trait { - Regular = 0x0, - Fallback = 0x1, - Monitored = 0x2, - MonitoredFallback = 0x3, - Updated = 0x4 - }; - - void traceCode(JSTracer* trc, const char* name); - void updateCode(JitCode* stubCode); - void trace(JSTracer* trc); - - template - static T* New(JSContext* cx, ICStubSpace* space, JitCode* code, Args&&... args) { - if (!code) - return nullptr; - T* result = space->allocate(code, std::forward(args)...); - if (!result) - ReportOutOfMemory(cx); - return result; - } - - protected: - // The raw jitcode to call for this stub. - uint8_t* stubCode_; - - // Pointer to next IC stub. This is null for the last IC stub, which should - // either be a fallback or inert IC stub. - ICStub* next_; - - // A 16-bit field usable by subtypes of ICStub for subtype-specific small-info - uint16_t extra_; - - // The kind of the stub. - // High bit is 'isFallback' flag. - // Second high bit is 'isMonitored' flag. - Trait trait_ : 3; - Kind kind_ : 13; - - inline ICStub(Kind kind, JitCode* stubCode) - : stubCode_(stubCode->raw()), - next_(nullptr), - extra_(0), - trait_(Regular), - kind_(kind) - { - MOZ_ASSERT(stubCode != nullptr); - } - - inline ICStub(Kind kind, Trait trait, JitCode* stubCode) - : stubCode_(stubCode->raw()), - next_(nullptr), - extra_(0), - trait_(trait), - kind_(kind) - { - MOZ_ASSERT(stubCode != nullptr); - } - - inline Trait trait() const { - // Workaround for MSVC reading trait_ as signed value. - return (Trait)(trait_ & 0x7); - } - - public: - - inline Kind kind() const { - return static_cast(kind_); - } - - inline bool isFallback() const { - return trait() == Fallback || trait() == MonitoredFallback; - } - - inline bool isMonitored() const { - return trait() == Monitored; - } - - inline bool isUpdated() const { - return trait() == Updated; - } - - inline bool isMonitoredFallback() const { - return trait() == MonitoredFallback; - } - - inline const ICFallbackStub* toFallbackStub() const { - MOZ_ASSERT(isFallback()); - return reinterpret_cast(this); - } - - inline ICFallbackStub* toFallbackStub() { - MOZ_ASSERT(isFallback()); - return reinterpret_cast(this); - } - - inline const ICMonitoredStub* toMonitoredStub() const { - MOZ_ASSERT(isMonitored()); - return reinterpret_cast(this); - } - - inline ICMonitoredStub* toMonitoredStub() { - MOZ_ASSERT(isMonitored()); - return reinterpret_cast(this); - } - - inline const ICMonitoredFallbackStub* toMonitoredFallbackStub() const { - MOZ_ASSERT(isMonitoredFallback()); - return reinterpret_cast(this); - } - - inline ICMonitoredFallbackStub* toMonitoredFallbackStub() { - MOZ_ASSERT(isMonitoredFallback()); - return reinterpret_cast(this); - } - - inline const ICUpdatedStub* toUpdatedStub() const { - MOZ_ASSERT(isUpdated()); - return reinterpret_cast(this); - } - - inline ICUpdatedStub* toUpdatedStub() { - MOZ_ASSERT(isUpdated()); - return reinterpret_cast(this); - } - -#define KIND_METHODS(kindName) \ - inline bool is##kindName() const { return kind() == kindName; } \ - inline const IC##kindName* to##kindName() const { \ - MOZ_ASSERT(is##kindName()); \ - return reinterpret_cast(this); \ - } \ - inline IC##kindName* to##kindName() { \ - MOZ_ASSERT(is##kindName()); \ - return reinterpret_cast(this); \ - } - IC_BASELINE_STUB_KIND_LIST(KIND_METHODS) - IC_SHARED_STUB_KIND_LIST(KIND_METHODS) -#undef KIND_METHODS - - inline ICStub* next() const { - return next_; - } - - inline bool hasNext() const { - return next_ != nullptr; - } - - inline void setNext(ICStub* stub) { - // Note: next_ only needs to be changed under the compilation lock for - // non-type-monitor/update ICs. - next_ = stub; - } - - inline ICStub** addressOfNext() { - return &next_; - } - - inline JitCode* jitCode() { - return JitCode::FromExecutable(stubCode_); - } - - inline uint8_t* rawStubCode() const { - return stubCode_; - } - - // This method is not valid on TypeUpdate stub chains! - inline ICFallbackStub* getChainFallback() { - ICStub* lastStub = this; - while (lastStub->next_) - lastStub = lastStub->next_; - MOZ_ASSERT(lastStub->isFallback()); - return lastStub->toFallbackStub(); - } - - inline ICStubConstIterator beginHere() { - return ICStubConstIterator::StartingAt(this); - } - - static inline size_t offsetOfNext() { - return offsetof(ICStub, next_); - } - - static inline size_t offsetOfStubCode() { - return offsetof(ICStub, stubCode_); - } - - static inline size_t offsetOfExtra() { - return offsetof(ICStub, extra_); - } - - static bool NonCacheIRStubMakesGCCalls(Kind kind); - bool makesGCCalls() const; - - // Optimized stubs get purged on GC. But some stubs can be active on the - // stack during GC - specifically the ones that can make calls. To ensure - // that these do not get purged, all stubs that can make calls are allocated - // in the fallback stub space. - bool allocatedInFallbackSpace() const { - MOZ_ASSERT(next()); - return makesGCCalls(); - } -}; - -class ICFallbackStub : public ICStub -{ - friend class ICStubConstIterator; - protected: - // Fallback stubs need these fields to easily add new stubs to - // the linked list of stubs for an IC. - - // The IC entry for this linked list of stubs. - ICEntry* icEntry_; - - // The number of stubs kept in the IC entry. - ICState state_; - - // A pointer to the location stub pointer that needs to be - // changed to add a new "last" stub immediately before the fallback - // stub. This'll start out pointing to the icEntry's "firstStub_" - // field, and as new stubs are added, it'll point to the current - // last stub's "next_" field. - ICStub** lastStubPtrAddr_; - - ICFallbackStub(Kind kind, JitCode* stubCode) - : ICStub(kind, ICStub::Fallback, stubCode), - icEntry_(nullptr), - state_(), - lastStubPtrAddr_(nullptr) {} - - ICFallbackStub(Kind kind, Trait trait, JitCode* stubCode) - : ICStub(kind, trait, stubCode), - icEntry_(nullptr), - state_(), - lastStubPtrAddr_(nullptr) - { - MOZ_ASSERT(trait == ICStub::Fallback || - trait == ICStub::MonitoredFallback); - } - - public: - inline ICEntry* icEntry() const { - return icEntry_; - } - - inline size_t numOptimizedStubs() const { - return state_.numOptimizedStubs(); - } - - void setInvalid() { - state_.setInvalid(); - } - - bool invalid() const { - return state_.invalid(); - } - - ICState& state() { - return state_; - } - - // The icEntry and lastStubPtrAddr_ fields can't be initialized when the stub is - // created since the stub is created at compile time, and we won't know the IC entry - // address until after compile when the JitScript is created. This method - // allows these fields to be fixed up at that point. - void fixupICEntry(ICEntry* icEntry) { - MOZ_ASSERT(icEntry_ == nullptr); - MOZ_ASSERT(lastStubPtrAddr_ == nullptr); - icEntry_ = icEntry; - lastStubPtrAddr_ = icEntry_->addressOfFirstStub(); - } - - // Add a new stub to the IC chain terminated by this fallback stub. - void addNewStub(ICStub* stub) { - MOZ_ASSERT(!invalid()); - MOZ_ASSERT(*lastStubPtrAddr_ == this); - MOZ_ASSERT(stub->next() == nullptr); - stub->setNext(this); - *lastStubPtrAddr_ = stub; - lastStubPtrAddr_ = stub->addressOfNext(); - state_.trackAttached(); - } - - ICStubConstIterator beginChainConst() const { - return ICStubConstIterator(icEntry_->firstStub()); - } - - ICStubIterator beginChain() { - return ICStubIterator(this); - } - - bool hasStub(ICStub::Kind kind) const { - for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) { - if (iter->kind() == kind) - return true; - } - return false; - } - - unsigned numStubsWithKind(ICStub::Kind kind) const { - unsigned count = 0; - for (ICStubConstIterator iter = beginChainConst(); !iter.atEnd(); iter++) { - if (iter->kind() == kind) - count++; - } - return count; - } - - void discardStubs(JSContext* cx); - - void unlinkStub(Zone* zone, ICStub* prev, ICStub* stub); - void unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind); -}; - -// Base class for Trait::Regular CacheIR stubs -class ICCacheIR_Regular : public ICStub -{ - const CacheIRStubInfo* stubInfo_; - - public: - ICCacheIR_Regular(JitCode* stubCode, const CacheIRStubInfo* stubInfo) - : ICStub(ICStub::CacheIR_Regular, stubCode), - stubInfo_(stubInfo) - {} - - static ICCacheIR_Regular* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, - ICCacheIR_Regular& other); - - void notePreliminaryObject() { - extra_ = 1; - } - bool hasPreliminaryObject() const { - return extra_; - } - - const CacheIRStubInfo* stubInfo() const { - return stubInfo_; - } - - uint8_t* stubDataStart(); -}; - -// Monitored stubs are IC stubs that feed a single resulting value out to a -// type monitor operation. -class ICMonitoredStub : public ICStub -{ - protected: - // Pointer to the start of the type monitoring stub chain. - ICStub* firstMonitorStub_; - - ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub); - - public: - inline void updateFirstMonitorStub(ICStub* monitorStub) { - // This should only be called once: when the first optimized monitor stub - // is added to the type monitor IC chain. - MOZ_ASSERT(firstMonitorStub_ && firstMonitorStub_->isTypeMonitor_Fallback()); - firstMonitorStub_ = monitorStub; - } - inline void resetFirstMonitorStub(ICStub* monitorFallback) { - MOZ_ASSERT(monitorFallback->isTypeMonitor_Fallback()); - firstMonitorStub_ = monitorFallback; - } - inline ICStub* firstMonitorStub() const { - return firstMonitorStub_; - } - - static inline size_t offsetOfFirstMonitorStub() { - return offsetof(ICMonitoredStub, firstMonitorStub_); - } -}; - -class ICCacheIR_Monitored : public ICMonitoredStub -{ - const CacheIRStubInfo* stubInfo_; - - public: - ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub, - const CacheIRStubInfo* stubInfo) - : ICMonitoredStub(ICStub::CacheIR_Monitored, stubCode, firstMonitorStub), - stubInfo_(stubInfo) - {} - - static ICCacheIR_Monitored* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, - ICCacheIR_Monitored& other); - - void notePreliminaryObject() { - extra_ = 1; - } - bool hasPreliminaryObject() const { - return extra_; - } - - const CacheIRStubInfo* stubInfo() const { - return stubInfo_; - } - - uint8_t* stubDataStart(); -}; - -// Updated stubs are IC stubs that use a TypeUpdate IC to track -// the status of heap typesets that need to be updated. -class ICUpdatedStub : public ICStub -{ - protected: - // Pointer to the start of the type updating stub chain. - ICStub* firstUpdateStub_; - - static const uint32_t MAX_OPTIMIZED_STUBS = 8; - uint32_t numOptimizedStubs_; - - ICUpdatedStub(Kind kind, JitCode* stubCode) - : ICStub(kind, ICStub::Updated, stubCode), - firstUpdateStub_(nullptr), - numOptimizedStubs_(0) - {} - - public: - MOZ_MUST_USE bool initUpdatingChain(JSContext* cx, ICStubSpace* space); - - MOZ_MUST_USE bool addUpdateStubForValue(JSContext* cx, HandleScript script, HandleObject obj, - HandleObjectGroup group, HandleId id, HandleValue val); - - void addOptimizedUpdateStub(ICStub* stub) { - if (firstUpdateStub_->isTypeUpdate_Fallback()) { - stub->setNext(firstUpdateStub_); - firstUpdateStub_ = stub; - } else { - ICStub* iter = firstUpdateStub_; - MOZ_ASSERT(iter->next() != nullptr); - while (!iter->next()->isTypeUpdate_Fallback()) - iter = iter->next(); - MOZ_ASSERT(iter->next()->next() == nullptr); - stub->setNext(iter->next()); - iter->setNext(stub); - } - - numOptimizedStubs_++; - } - - inline ICStub* firstUpdateStub() const { - return firstUpdateStub_; - } - - void resetUpdateStubChain(Zone* zone); - - bool hasTypeUpdateStub(ICStub::Kind kind) { - ICStub* stub = firstUpdateStub_; - do { - if (stub->kind() == kind) - return true; - - stub = stub->next(); - } while (stub); - - return false; - } - - inline uint32_t numOptimizedStubs() const { - return numOptimizedStubs_; - } - - static inline size_t offsetOfFirstUpdateStub() { - return offsetof(ICUpdatedStub, firstUpdateStub_); - } -}; - -class ICCacheIR_Updated : public ICUpdatedStub -{ - const CacheIRStubInfo* stubInfo_; - GCPtrObjectGroup updateStubGroup_; - GCPtrId updateStubId_; - - public: - ICCacheIR_Updated(JitCode* stubCode, const CacheIRStubInfo* stubInfo) - : ICUpdatedStub(ICStub::CacheIR_Updated, stubCode), - stubInfo_(stubInfo), - updateStubGroup_(nullptr), - updateStubId_(JSID_EMPTY) - {} - - static ICCacheIR_Updated* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub, - ICCacheIR_Updated& other); - - GCPtrObjectGroup& updateStubGroup() { - return updateStubGroup_; - } - GCPtrId& updateStubId() { - return updateStubId_; - } - - void notePreliminaryObject() { - extra_ = 1; - } - bool hasPreliminaryObject() const { - return extra_; - } - - const CacheIRStubInfo* stubInfo() const { - return stubInfo_; - } - - uint8_t* stubDataStart(); -}; - -// Base class for stubcode compilers. -class ICStubCompiler -{ - // Prevent GC in the middle of stub compilation. - js::gc::AutoSuppressGC suppressGC; - - public: - using Engine = ICStubEngine; - - protected: - JSContext* cx; - ICStub::Kind kind; - Engine engine_; - bool inStubFrame_; - -#ifdef DEBUG - bool entersStubFrame_; - uint32_t framePushedAtEnterStubFrame_; -#endif - - // By default the stubcode key is just the kind. - virtual int32_t getKey() const { - return static_cast(engine_) | - (static_cast(kind) << 1); - } - - virtual MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) = 0; - virtual void postGenerateStubCode(MacroAssembler& masm, Handle genCode) {} - - JitCode* getStubCode(); - - ICStubCompiler(JSContext* cx, ICStub::Kind kind, Engine engine) - : suppressGC(cx), cx(cx), kind(kind), engine_(engine), inStubFrame_(false) -#ifdef DEBUG - , entersStubFrame_(false), framePushedAtEnterStubFrame_(0) -#endif - {} - - // Push a payload specialized per compiler needed to execute stubs. - void PushStubPayload(MacroAssembler& masm, Register scratch); - void pushStubPayload(MacroAssembler& masm, Register scratch); - - // Emits a tail call to a VMFunction wrapper. - MOZ_MUST_USE bool tailCallVM(const VMFunction& fun, MacroAssembler& masm); - - // Emits a normal (non-tail) call to a VMFunction wrapper. - MOZ_MUST_USE bool callVM(const VMFunction& fun, MacroAssembler& masm); - - // A stub frame is used when a stub wants to call into the VM without - // performing a tail call. This is required for the return address - // to pc mapping to work. - void enterStubFrame(MacroAssembler& masm, Register scratch); - void assumeStubFrame(); - void leaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false); - - // Some stubs need to emit Gecko Profiler updates. This emits the guarding - // jitcode for those stubs. If profiling is not enabled, jumps to the - // given label. - void guardProfilingEnabled(MacroAssembler& masm, Register scratch, Label* skip); - - public: - static inline AllocatableGeneralRegisterSet availableGeneralRegs(size_t numInputs) { - AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); -#if defined(JS_CODEGEN_ARM) - MOZ_ASSERT(!regs.has(BaselineStackReg)); - MOZ_ASSERT(!regs.has(ICTailCallReg)); - regs.take(BaselineSecondScratchReg); -#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) - MOZ_ASSERT(!regs.has(BaselineStackReg)); - MOZ_ASSERT(!regs.has(ICTailCallReg)); - MOZ_ASSERT(!regs.has(BaselineSecondScratchReg)); -#elif defined(JS_CODEGEN_ARM64) - MOZ_ASSERT(!regs.has(PseudoStackPointer)); - MOZ_ASSERT(!regs.has(RealStackPointer)); - MOZ_ASSERT(!regs.has(ICTailCallReg)); -#else - MOZ_ASSERT(!regs.has(BaselineStackReg)); -#endif - regs.take(BaselineFrameReg); - regs.take(ICStubReg); -#ifdef JS_CODEGEN_X64 - regs.take(ExtractTemp0); - regs.take(ExtractTemp1); -#endif - - switch (numInputs) { - case 0: - break; - case 1: - regs.take(R0); - break; - case 2: - regs.take(R0); - regs.take(R1); - break; - default: - MOZ_CRASH("Invalid numInputs"); - } - - return regs; - } - - protected: - template - T* newStub(Args&&... args) { - return ICStub::New(cx, std::forward(args)...); - } - - public: - virtual ICStub* getStub(ICStubSpace* space) = 0; - - static ICStubSpace* StubSpaceForStub(bool makesGCCalls, JSScript* outerScript, Engine engine) { - if (makesGCCalls) { - if (engine == ICStubCompiler::Engine::Baseline) - return outerScript->baselineScript()->fallbackStubSpace(); - return outerScript->ionScript()->fallbackStubSpace(); - } - return outerScript->zone()->jitZone()->optimizedStubSpace(); - } - ICStubSpace* getStubSpace(JSScript* outerScript) { - return StubSpaceForStub(ICStub::NonCacheIRStubMakesGCCalls(kind), outerScript, engine_); - } -}; - -class SharedStubInfo -{ - BaselineFrame* maybeFrame_; - RootedScript outerScript_; - RootedScript innerScript_; - ICEntry* icEntry_; - - public: - SharedStubInfo(JSContext* cx, void* payload, ICEntry* entry); - - ICStubCompiler::Engine engine() const { - return maybeFrame_ - ? ICStubCompiler::Engine::Baseline - : ICStubCompiler::Engine::IonSharedIC; - } - - HandleScript script() const { - MOZ_ASSERT(innerScript_); - return innerScript_; - } - - HandleScript innerScript() const { - MOZ_ASSERT(innerScript_); - return innerScript_; - } - - HandleScript outerScript(JSContext* cx); - - jsbytecode* pc() const { - return icEntry()->pc(innerScript()); - } - - uint32_t pcOffset() const { - return script()->pcToOffset(pc()); - } - - BaselineFrame* frame() const { - MOZ_ASSERT(maybeFrame_); - return maybeFrame_; - } - - BaselineFrame* maybeFrame() const { - return maybeFrame_; - } - - ICEntry* icEntry() const { - return icEntry_; - } -}; - -// Monitored fallback stubs - as the name implies. -class ICMonitoredFallbackStub : public ICFallbackStub -{ - protected: - // Pointer to the fallback monitor stub. Created lazily by - // getFallbackMonitorStub if needed. - ICTypeMonitor_Fallback* fallbackMonitorStub_; - - ICMonitoredFallbackStub(Kind kind, JitCode* stubCode) - : ICFallbackStub(kind, ICStub::MonitoredFallback, stubCode), - fallbackMonitorStub_(nullptr) {} - - public: - MOZ_MUST_USE bool initMonitoringChain(JSContext* cx, JSScript* script); - MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, - StackTypeSet* types, HandleValue val); - - ICTypeMonitor_Fallback* maybeFallbackMonitorStub() const { - return fallbackMonitorStub_; - } - ICTypeMonitor_Fallback* getFallbackMonitorStub(JSContext* cx, JSScript* script) { - if (!fallbackMonitorStub_ && !initMonitoringChain(cx, script)) - return nullptr; - MOZ_ASSERT(fallbackMonitorStub_); - return fallbackMonitorStub_; - } - - static inline size_t offsetOfFallbackMonitorStub() { - return offsetof(ICMonitoredFallbackStub, fallbackMonitorStub_); - } -}; - - -// Base class for stub compilers that can generate multiple stubcodes. -// These compilers need access to the JSOp they are compiling for. -class ICMultiStubCompiler : public ICStubCompiler -{ - protected: - JSOp op; - - // Stub keys for multi-stub kinds are composed of both the kind - // and the op they are compiled for. - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(op) << 17); - } - - ICMultiStubCompiler(JSContext* cx, ICStub::Kind kind, JSOp op, Engine engine) - : ICStubCompiler(cx, kind, engine), op(op) {} -}; - -// TypeCheckPrimitiveSetStub -// Base class for IC stubs (TypeUpdate or TypeMonitor) that check that a given -// value's type falls within a set of primitive types. - -class TypeCheckPrimitiveSetStub : public ICStub -{ - friend class ICStubSpace; - protected: - inline static uint16_t TypeToFlag(JSValueType type) { - return 1u << static_cast(type); - } - - inline static uint16_t ValidFlags() { - return ((TypeToFlag(JSVAL_TYPE_OBJECT) << 1) - 1) & ~TypeToFlag(JSVAL_TYPE_MAGIC); - } - - TypeCheckPrimitiveSetStub(Kind kind, JitCode* stubCode, uint16_t flags) - : ICStub(kind, stubCode) - { - MOZ_ASSERT(kind == TypeMonitor_PrimitiveSet || kind == TypeUpdate_PrimitiveSet); - MOZ_ASSERT(flags && !(flags & ~ValidFlags())); - extra_ = flags; - } - - TypeCheckPrimitiveSetStub* updateTypesAndCode(uint16_t flags, JitCode* code) { - MOZ_ASSERT(flags && !(flags & ~ValidFlags())); - if (!code) - return nullptr; - extra_ = flags; - updateCode(code); - return this; - } - - public: - uint16_t typeFlags() const { - return extra_; - } - - bool containsType(JSValueType type) const { - MOZ_ASSERT(type <= JSVAL_TYPE_OBJECT); - MOZ_ASSERT(type != JSVAL_TYPE_MAGIC); - return extra_ & TypeToFlag(type); - } - - ICTypeMonitor_PrimitiveSet* toMonitorStub() { - return toTypeMonitor_PrimitiveSet(); - } - - ICTypeUpdate_PrimitiveSet* toUpdateStub() { - return toTypeUpdate_PrimitiveSet(); - } - - class Compiler : public ICStubCompiler { - protected: - TypeCheckPrimitiveSetStub* existingStub_; - uint16_t flags_; - - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(flags_) << 17); - } - - public: - Compiler(JSContext* cx, Kind kind, TypeCheckPrimitiveSetStub* existingStub, - JSValueType type) - : ICStubCompiler(cx, kind, Engine::Baseline), - existingStub_(existingStub), - flags_((existingStub ? existingStub->typeFlags() : 0) | TypeToFlag(type)) - { - MOZ_ASSERT_IF(existingStub_, flags_ != existingStub_->typeFlags()); - } - - TypeCheckPrimitiveSetStub* updateStub() { - MOZ_ASSERT(existingStub_); - return existingStub_->updateTypesAndCode(flags_, getStubCode()); - } - }; -}; - -// TypeMonitor - -// The TypeMonitor fallback stub is not always a regular fallback stub. When -// used for monitoring the values pushed by a bytecode it doesn't hold a -// pointer to the IC entry, but rather back to the main fallback stub for the -// IC (from which a pointer to the IC entry can be retrieved). When monitoring -// the types of 'this', arguments or other values with no associated IC, there -// is no main fallback stub, and the IC entry is referenced directly. -class ICTypeMonitor_Fallback : public ICStub -{ - friend class ICStubSpace; - - static const uint32_t MAX_OPTIMIZED_STUBS = 8; - - // Pointer to the main fallback stub for the IC or to the main IC entry, - // depending on hasFallbackStub. - union { - ICMonitoredFallbackStub* mainFallbackStub_; - ICEntry* icEntry_; - }; - - // Pointer to the first monitor stub. - ICStub* firstMonitorStub_; - - // Address of the last monitor stub's field pointing to this - // fallback monitor stub. This will get updated when new - // monitor stubs are created and added. - ICStub** lastMonitorStubPtrAddr_; - - // Count of optimized type monitor stubs in this chain. - uint32_t numOptimizedMonitorStubs_ : 7; - - uint32_t invalid_ : 1; - - // Whether this has a fallback stub referring to the IC entry. - bool hasFallbackStub_ : 1; - - // Index of 'this' or argument which is being monitored, or BYTECODE_INDEX - // if this is monitoring the types of values pushed at some bytecode. - uint32_t argumentIndex_ : 23; - - static const uint32_t BYTECODE_INDEX = (1 << 23) - 1; - - ICTypeMonitor_Fallback(JitCode* stubCode, ICMonitoredFallbackStub* mainFallbackStub, - uint32_t argumentIndex) - : ICStub(ICStub::TypeMonitor_Fallback, stubCode), - mainFallbackStub_(mainFallbackStub), - firstMonitorStub_(thisFromCtor()), - lastMonitorStubPtrAddr_(nullptr), - numOptimizedMonitorStubs_(0), - invalid_(false), - hasFallbackStub_(mainFallbackStub != nullptr), - argumentIndex_(argumentIndex) - { } - - ICTypeMonitor_Fallback* thisFromCtor() { - return this; - } - - void addOptimizedMonitorStub(ICStub* stub) { - MOZ_ASSERT(!invalid()); - stub->setNext(this); - - MOZ_ASSERT((lastMonitorStubPtrAddr_ != nullptr) == - (numOptimizedMonitorStubs_ || !hasFallbackStub_)); - - if (lastMonitorStubPtrAddr_) - *lastMonitorStubPtrAddr_ = stub; - - if (numOptimizedMonitorStubs_ == 0) { - MOZ_ASSERT(firstMonitorStub_ == this); - firstMonitorStub_ = stub; - } else { - MOZ_ASSERT(firstMonitorStub_ != nullptr); - } - - lastMonitorStubPtrAddr_ = stub->addressOfNext(); - numOptimizedMonitorStubs_++; - } - - public: - bool hasStub(ICStub::Kind kind) { - ICStub* stub = firstMonitorStub_; - do { - if (stub->kind() == kind) - return true; - - stub = stub->next(); - } while (stub); - - return false; - } - - inline ICFallbackStub* mainFallbackStub() const { - MOZ_ASSERT(hasFallbackStub_); - return mainFallbackStub_; - } - - inline ICEntry* icEntry() const { - return hasFallbackStub_ ? mainFallbackStub()->icEntry() : icEntry_; - } - - inline ICStub* firstMonitorStub() const { - return firstMonitorStub_; - } - - static inline size_t offsetOfFirstMonitorStub() { - return offsetof(ICTypeMonitor_Fallback, firstMonitorStub_); - } - - inline uint32_t numOptimizedMonitorStubs() const { - return numOptimizedMonitorStubs_; - } - - void setInvalid() { - invalid_ = 1; - } - - bool invalid() const { - return invalid_; - } - - inline bool monitorsThis() const { - return argumentIndex_ == 0; - } - - inline bool monitorsArgument(uint32_t* pargument) const { - if (argumentIndex_ > 0 && argumentIndex_ < BYTECODE_INDEX) { - *pargument = argumentIndex_ - 1; - return true; - } - return false; - } - - inline bool monitorsBytecode() const { - return argumentIndex_ == BYTECODE_INDEX; - } - - // Fixup the IC entry as for a normal fallback stub, for this/arguments. - void fixupICEntry(ICEntry* icEntry) { - MOZ_ASSERT(!hasFallbackStub_); - MOZ_ASSERT(icEntry_ == nullptr); - MOZ_ASSERT(lastMonitorStubPtrAddr_ == nullptr); - icEntry_ = icEntry; - lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub(); - } - - // Create a new monitor stub for the type of the given value, and - // add it to this chain. - MOZ_MUST_USE bool addMonitorStubForValue(JSContext* cx, BaselineFrame* frame, - StackTypeSet* types, HandleValue val); - - void resetMonitorStubChain(Zone* zone); - - // Compiler for this stub kind. - class Compiler : public ICStubCompiler { - ICMonitoredFallbackStub* mainFallbackStub_; - uint32_t argumentIndex_; - - protected: - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - Compiler(JSContext* cx, ICMonitoredFallbackStub* mainFallbackStub) - : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback, Engine::Baseline), - mainFallbackStub_(mainFallbackStub), - argumentIndex_(BYTECODE_INDEX) - { } - - Compiler(JSContext* cx, uint32_t argumentIndex) - : ICStubCompiler(cx, ICStub::TypeMonitor_Fallback, Engine::Baseline), - mainFallbackStub_(nullptr), - argumentIndex_(argumentIndex) - { } - - ICTypeMonitor_Fallback* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode(), mainFallbackStub_, - argumentIndex_); - } - }; -}; - -class ICTypeMonitor_PrimitiveSet : public TypeCheckPrimitiveSetStub -{ - friend class ICStubSpace; - - ICTypeMonitor_PrimitiveSet(JitCode* stubCode, uint16_t flags) - : TypeCheckPrimitiveSetStub(TypeMonitor_PrimitiveSet, stubCode, flags) - {} - - public: - class Compiler : public TypeCheckPrimitiveSetStub::Compiler { - protected: - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - Compiler(JSContext* cx, ICTypeMonitor_PrimitiveSet* existingStub, - JSValueType type) - : TypeCheckPrimitiveSetStub::Compiler(cx, TypeMonitor_PrimitiveSet, existingStub, - type) - {} - - ICTypeMonitor_PrimitiveSet* updateStub() { - TypeCheckPrimitiveSetStub* stub = - this->TypeCheckPrimitiveSetStub::Compiler::updateStub(); - if (!stub) - return nullptr; - return stub->toMonitorStub(); - } - - ICTypeMonitor_PrimitiveSet* getStub(ICStubSpace* space) override { - MOZ_ASSERT(!existingStub_); - return newStub(space, getStubCode(), flags_); - } - }; -}; - -class ICTypeMonitor_SingleObject : public ICStub -{ - friend class ICStubSpace; - - GCPtrObject obj_; - - ICTypeMonitor_SingleObject(JitCode* stubCode, JSObject* obj); - - public: - GCPtrObject& object() { - return obj_; - } - - static size_t offsetOfObject() { - return offsetof(ICTypeMonitor_SingleObject, obj_); - } - - class Compiler : public ICStubCompiler { - protected: - HandleObject obj_; - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - Compiler(JSContext* cx, HandleObject obj) - : ICStubCompiler(cx, TypeMonitor_SingleObject, Engine::Baseline), - obj_(obj) - { } - - ICTypeMonitor_SingleObject* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode(), obj_); - } - }; -}; - -class ICTypeMonitor_ObjectGroup : public ICStub -{ - friend class ICStubSpace; - - GCPtrObjectGroup group_; - - ICTypeMonitor_ObjectGroup(JitCode* stubCode, ObjectGroup* group); - - public: - GCPtrObjectGroup& group() { - return group_; - } - - static size_t offsetOfGroup() { - return offsetof(ICTypeMonitor_ObjectGroup, group_); - } - - class Compiler : public ICStubCompiler { - protected: - HandleObjectGroup group_; - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - Compiler(JSContext* cx, HandleObjectGroup group) - : ICStubCompiler(cx, TypeMonitor_ObjectGroup, Engine::Baseline), - group_(group) - { } - - ICTypeMonitor_ObjectGroup* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode(), group_); - } - }; -}; - -class ICTypeMonitor_AnyValue : public ICStub -{ - friend class ICStubSpace; - - explicit ICTypeMonitor_AnyValue(JitCode* stubCode) - : ICStub(TypeMonitor_AnyValue, stubCode) - {} - - public: - class Compiler : public ICStubCompiler { - protected: - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - explicit Compiler(JSContext* cx) - : ICStubCompiler(cx, TypeMonitor_AnyValue, Engine::Baseline) - { } - - ICTypeMonitor_AnyValue* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode()); - } - }; -}; - -// Compare -// JSOP_LT -// JSOP_LE -// JSOP_GT -// JSOP_GE -// JSOP_EQ -// JSOP_NE -// JSOP_STRICTEQ -// JSOP_STRICTNE - -class ICCompare_Fallback : public ICFallbackStub -{ - friend class ICStubSpace; - - explicit ICCompare_Fallback(JitCode* stubCode) - : ICFallbackStub(ICStub::Compare_Fallback, stubCode) {} - - public: - static const uint32_t MAX_OPTIMIZED_STUBS = 8; - - static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0; - void noteUnoptimizableAccess() { - extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT); - } - bool hadUnoptimizableAccess() const { - return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT); - } - - // Compiler for this stub kind. - class Compiler : public ICStubCompiler { - protected: - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - - public: - explicit Compiler(JSContext* cx, Engine engine) - : ICStubCompiler(cx, ICStub::Compare_Fallback, engine) {} - - ICStub* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode()); - } - }; -}; - -// Enum for stubs handling a combination of typed arrays and typed objects. -enum TypedThingLayout { - Layout_TypedArray, - Layout_OutlineTypedObject, - Layout_InlineTypedObject -}; - -void -StripPreliminaryObjectStubs(JSContext* cx, ICFallbackStub* stub); - -void -LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result); - -void -LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result); - -class ICGetProp_Fallback : public ICMonitoredFallbackStub -{ - friend class ICStubSpace; - - explicit ICGetProp_Fallback(JitCode* stubCode) - : ICMonitoredFallbackStub(ICStub::GetProp_Fallback, stubCode) - { } - - public: - static const size_t UNOPTIMIZABLE_ACCESS_BIT = 0; - static const size_t ACCESSED_GETTER_BIT = 1; - - void noteUnoptimizableAccess() { - extra_ |= (1u << UNOPTIMIZABLE_ACCESS_BIT); - } - bool hadUnoptimizableAccess() const { - return extra_ & (1u << UNOPTIMIZABLE_ACCESS_BIT); - } - - void noteAccessedGetter() { - extra_ |= (1u << ACCESSED_GETTER_BIT); - } - bool hasAccessedGetter() const { - return extra_ & (1u << ACCESSED_GETTER_BIT); - } - - class Compiler : public ICStubCompiler { - protected: - CodeOffset bailoutReturnOffset_; - bool hasReceiver_; - MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm) override; - void postGenerateStubCode(MacroAssembler& masm, Handle code) override; - - virtual int32_t getKey() const override { - return static_cast(engine_) | - (static_cast(kind) << 1) | - (static_cast(hasReceiver_) << 17); - } - - public: - explicit Compiler(JSContext* cx, Engine engine, bool hasReceiver = false) - : ICStubCompiler(cx, ICStub::GetProp_Fallback, engine), - hasReceiver_(hasReceiver) - { } - - ICStub* getStub(ICStubSpace* space) override { - return newStub(space, getStubCode()); - } - }; -}; - -static inline uint32_t -SimpleTypeDescrKey(SimpleTypeDescr* descr) -{ - if (descr->is()) - return uint32_t(descr->as().type()) << 1; - return (uint32_t(descr->as().type()) << 1) | 1; -} - -inline bool -SimpleTypeDescrKeyIsScalar(uint32_t key) -{ - return !(key & 1); -} - -inline ScalarTypeDescr::Type -ScalarTypeFromSimpleTypeDescrKey(uint32_t key) -{ - MOZ_ASSERT(SimpleTypeDescrKeyIsScalar(key)); - return ScalarTypeDescr::Type(key >> 1); -} - -inline ReferenceType -ReferenceTypeFromSimpleTypeDescrKey(uint32_t key) -{ - MOZ_ASSERT(!SimpleTypeDescrKeyIsScalar(key)); - return ReferenceType(key >> 1); -} - -} // namespace jit -} // namespace js - -#endif /* jit_SharedIC_h */ diff --git a/js/src/jit/SharedICList.h b/js/src/jit/SharedICList.h deleted file mode 100644 index b442b5f549f2..000000000000 --- a/js/src/jit/SharedICList.h +++ /dev/null @@ -1,29 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sts=4 et sw=4 tw=99: - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef jit_SharedICList_h -#define jit_SharedICList_h - -namespace js { -namespace jit { - -// List of IC stub kinds that can run in Baseline and in IonMonkey -#define IC_SHARED_STUB_KIND_LIST(_) \ - _(BinaryArith_Fallback) \ - \ - _(Compare_Fallback) \ - \ - _(GetProp_Fallback) \ - \ - _(CacheIR_Regular) \ - _(CacheIR_Monitored) \ - _(CacheIR_Updated) \ - \ - -} // namespace jit -} // namespace js - -#endif /* jit_SharedICList_h */ diff --git a/js/src/jit/arm/SharedICHelpers-arm-inl.h b/js/src/jit/arm/SharedICHelpers-arm-inl.h index ba5f3ef55e7f..d3d097b9a82c 100644 --- a/js/src/jit/arm/SharedICHelpers-arm-inl.h +++ b/js/src/jit/arm/SharedICHelpers-arm-inl.h @@ -44,28 +44,6 @@ EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argS masm.jump(target); } -inline void -EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) -{ - // We assume during this that R0 and R1 have been pushed, and that R2 is - // unused. - MOZ_ASSERT(R2 == ValueOperand(r1, r0)); - - masm.loadPtr(Address(sp, stackSize), r0); - masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), r0); - masm.add32(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), r0); - - // Push frame descriptor and perform the tail call. - // ICTailCallReg (lr) already contains the return address (as we keep - // it there through the stub calls), but the VMWrapper code being called - // expects the return address to also be pushed on the stack. - MOZ_ASSERT(ICTailCallReg == lr); - masm.makeFrameDescriptor(r0, JitFrame_IonJS, ExitFrameLayout::Size()); - masm.push(r0); - masm.push(lr); - masm.jump(target); -} - inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) { diff --git a/js/src/jit/arm64/SharedICHelpers-arm64-inl.h b/js/src/jit/arm64/SharedICHelpers-arm64-inl.h index 97e4e61d869b..04873bea0f34 100644 --- a/js/src/jit/arm64/SharedICHelpers-arm64-inl.h +++ b/js/src/jit/arm64/SharedICHelpers-arm64-inl.h @@ -47,12 +47,6 @@ EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argS masm.jump(target); } -inline void -EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) -{ - MOZ_CRASH("Not implemented yet."); -} - inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) { diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h index d9e91f5ff570..2edb22dfcd4f 100644 --- a/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h +++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h @@ -42,23 +42,6 @@ EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argS masm.jump(target); } -inline void -EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) -{ - Register scratch = R2.scratchReg(); - - masm.loadPtr(Address(sp, stackSize), scratch); - masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch); - masm.addPtr(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch); - - // Push frame descriptor and perform the tail call. - MOZ_ASSERT(ICTailCallReg == ra); - masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size()); - masm.push(scratch); - masm.push(ICTailCallReg); - masm.jump(target); -} - inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) { diff --git a/js/src/jit/none/SharedICHelpers-none-inl.h b/js/src/jit/none/SharedICHelpers-none-inl.h index 79c563dc15ba..73deecef38b6 100644 --- a/js/src/jit/none/SharedICHelpers-none-inl.h +++ b/js/src/jit/none/SharedICHelpers-none-inl.h @@ -13,7 +13,6 @@ namespace js { namespace jit { inline void EmitBaselineTailCallVM(TrampolinePtr, MacroAssembler&, uint32_t) { MOZ_CRASH(); } -inline void EmitIonTailCallVM(TrampolinePtr, MacroAssembler&, uint32_t) { MOZ_CRASH(); } inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler&, Register, uint32_t) { MOZ_CRASH(); } inline void EmitBaselineCallVM(TrampolinePtr, MacroAssembler&) { MOZ_CRASH(); } diff --git a/js/src/jit/shared/BaselineCompiler-shared.h b/js/src/jit/shared/BaselineCompiler-shared.h index 375d08908382..81e99bdae671 100644 --- a/js/src/jit/shared/BaselineCompiler-shared.h +++ b/js/src/jit/shared/BaselineCompiler-shared.h @@ -30,7 +30,7 @@ class BaselineCompilerShared FrameInfo frame; FallbackICStubSpace stubSpace_; - js::Vector icEntries_; + js::Vector icEntries_; // Stores the native code offset for a bytecode pc. struct PCMappingEntry @@ -70,16 +70,16 @@ class BaselineCompilerShared BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script); - BaselineICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) { + ICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) { if (!stub) return nullptr; // Create the entry and add it to the vector. - if (!icEntries_.append(BaselineICEntry(script->pcToOffset(pc), kind))) { + if (!icEntries_.append(ICEntry(script->pcToOffset(pc), kind))) { ReportOutOfMemory(cx); return nullptr; } - BaselineICEntry& vecEntry = icEntries_.back(); + ICEntry& vecEntry = icEntries_.back(); // Set the first stub for the IC entry to the fallback stub vecEntry.setFirstStub(stub); @@ -90,7 +90,7 @@ class BaselineCompilerShared // Append an ICEntry without a stub. bool appendICEntry(ICEntry::Kind kind, uint32_t returnOffset) { - BaselineICEntry entry(script->pcToOffset(pc), kind); + ICEntry entry(script->pcToOffset(pc), kind); entry.setReturnOffset(CodeOffset(returnOffset)); if (!icEntries_.append(entry)) { ReportOutOfMemory(cx); diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h index 6647039dbfa7..e1d3768e3302 100644 --- a/js/src/jit/shared/CodeGenerator-shared-inl.h +++ b/js/src/jit/shared/CodeGenerator-shared-inl.h @@ -222,7 +222,7 @@ int32_t CodeGeneratorShared::SlotToStackOffset(int32_t slot) const { MOZ_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount())); - int32_t offset = masm.framePushed() - frameInitialAdjustment_ - slot; + int32_t offset = masm.framePushed() - slot; MOZ_ASSERT(offset >= 0); return offset; } @@ -236,7 +236,7 @@ CodeGeneratorShared::StackOffsetToSlot(int32_t offset) const // offset = framePushed - frameInitialAdjustment - slot // offset + slot = framePushed - frameInitialAdjustment // slot = framePushed - frameInitialAdjustement - offset - return masm.framePushed() - frameInitialAdjustment_ - offset; + return masm.framePushed() - offset; } // For argument construction for calls. Argslots are Value-sized. diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp index f627bda6eaa6..4e95fd898161 100644 --- a/js/src/jit/shared/CodeGenerator-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-shared.cpp @@ -58,7 +58,6 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, Mac lastOsiPointOffset_(0), safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)), returnLabel_(), - stubSpace_(), nativeToBytecodeMap_(nullptr), nativeToBytecodeMapSize_(0), nativeToBytecodeTableOffset_(0), @@ -76,7 +75,6 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, Mac checkOsiPointRegisters(JitOptions.checkOsiPointRegisters), #endif frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()), - frameInitialAdjustment_(0), frameClass_(FrameSizeClass::None()) { if (gen->isProfilerInstrumentationEnabled()) diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h index 68fb1a4a4a59..1c0e22b58c81 100644 --- a/js/src/jit/shared/CodeGenerator-shared.h +++ b/js/src/jit/shared/CodeGenerator-shared.h @@ -68,8 +68,6 @@ class CodeGeneratorShared : public LElementVisitor // Label for the common return path. NonAssertingLabel returnLabel_; - FallbackICStubSpace stubSpace_; - js::Vector safepointIndices_; js::Vector osiIndices_; @@ -174,11 +172,6 @@ class CodeGeneratorShared : public LElementVisitor // spills. int32_t frameDepth_; - // In some cases, we force stack alignment to platform boundaries, see - // also CodeGeneratorShared constructor. This value records the adjustment - // we've done. - int32_t frameInitialAdjustment_; - // Frame class this frame's size falls into (see IonFrame.h). FrameSizeClass frameClass_; diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h index 9cd7a0637328..8b87e71f3cfd 100644 --- a/js/src/jit/shared/LIR-shared.h +++ b/js/src/jit/shared/LIR-shared.h @@ -4796,26 +4796,6 @@ class LStringReplace: public LCallInstructionHelper<1, 3, 0> } }; -class LBinarySharedStub : public LCallInstructionHelper -{ - public: - LIR_HEADER(BinarySharedStub) - - LBinarySharedStub(const LBoxAllocation& lhs, const LBoxAllocation& rhs) - : LCallInstructionHelper(classOpcode) - { - setBoxOperand(LhsInput, lhs); - setBoxOperand(RhsInput, rhs); - } - - const MBinarySharedStub* mir() const { - return mir_->toBinarySharedStub(); - } - - static const size_t LhsInput = 0; - static const size_t RhsInput = BOX_PIECES; -}; - class LBinaryCache : public LInstructionHelper { public: @@ -4864,20 +4844,6 @@ class LUnaryCache : public LInstructionHelper static const size_t Input = 0; }; -class LNullarySharedStub : public LCallInstructionHelper -{ - public: - LIR_HEADER(NullarySharedStub) - - const MNullarySharedStub* mir() const { - return mir_->toNullarySharedStub(); - } - - LNullarySharedStub() - : LCallInstructionHelper(classOpcode) - {} -}; - class LClassConstructor : public LCallInstructionHelper<1, 0, 0> { public: diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h index 766b99f1644c..217b4ae0ffa7 100644 --- a/js/src/jit/shared/Lowering-shared-inl.h +++ b/js/src/jit/shared/Lowering-shared-inl.h @@ -245,30 +245,6 @@ LIRGeneratorShared::defineInt64(LInstructionHelper* li add(lir); } -void -LIRGeneratorShared::defineSharedStubReturn(LInstruction* lir, MDefinition* mir) -{ - lir->setMir(mir); - - MOZ_ASSERT(lir->isBinarySharedStub() || lir->isNullarySharedStub()); - MOZ_ASSERT(mir->type() == MIRType::Value); - - uint32_t vreg = getVirtualRegister(); - -#if defined(JS_NUNBOX32) - lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, - LGeneralReg(JSReturnReg_Type))); - lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, - LGeneralReg(JSReturnReg_Data))); - getVirtualRegister(); -#elif defined(JS_PUNBOX64) - lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg))); -#endif - - mir->setVirtualRegister(vreg); - add(lir); -} - void LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir) { diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h index acaf46d443f7..06a614f80d0e 100644 --- a/js/src/jit/shared/Lowering-shared.h +++ b/js/src/jit/shared/Lowering-shared.h @@ -176,7 +176,6 @@ class LIRGeneratorShared inline void defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir, LDefinition::Policy policy = LDefinition::REGISTER); - inline void defineSharedStubReturn(LInstruction* lir, MDefinition* mir); inline void defineReturn(LInstruction* lir, MDefinition* mir); template diff --git a/js/src/jit/x64/SharedICHelpers-x64-inl.h b/js/src/jit/x64/SharedICHelpers-x64-inl.h index 1ac06ff14016..304ec3175fa6 100644 --- a/js/src/jit/x64/SharedICHelpers-x64-inl.h +++ b/js/src/jit/x64/SharedICHelpers-x64-inl.h @@ -36,27 +36,6 @@ EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argS masm.jump(target); } -inline void -EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) -{ - // For tail calls, find the already pushed JitFrame_IonJS signifying the - // end of the Ion frame. Retrieve the length of the frame and repush - // JitFrame_IonJS with the extra stacksize, rendering the original - // JitFrame_IonJS obsolete. - - ScratchRegisterScope scratch(masm); - - masm.loadPtr(Address(esp, stackSize), scratch); - masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch); - masm.addq(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch); - - // Push frame descriptor and perform the tail call. - masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size()); - masm.push(scratch); - masm.push(ICTailCallReg); - masm.jump(target); -} - inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) { diff --git a/js/src/jit/x86/SharedICHelpers-x86-inl.h b/js/src/jit/x86/SharedICHelpers-x86-inl.h index 7ed120d89399..c3818f45b059 100644 --- a/js/src/jit/x86/SharedICHelpers-x86-inl.h +++ b/js/src/jit/x86/SharedICHelpers-x86-inl.h @@ -36,25 +36,6 @@ EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argS masm.jump(target); } -inline void -EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) -{ - // For tail calls, find the already pushed JitFrame_IonJS signifying the - // end of the Ion frame. Retrieve the length of the frame and repush - // JitFrame_IonJS with the extra stacksize, rendering the original - // JitFrame_IonJS obsolete. - - masm.loadPtr(Address(esp, stackSize), eax); - masm.shrl(Imm32(FRAMESIZE_SHIFT), eax); - masm.addl(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), eax); - - // Push frame descriptor and perform the tail call. - masm.makeFrameDescriptor(eax, JitFrame_IonJS, ExitFrameLayout::Size()); - masm.push(eax); - masm.push(ICTailCallReg); - masm.jump(target); -} - inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) { diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index d0cdfa994ca7..e206e9cb54de 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -160,7 +160,7 @@ JS::ObjectOpResult::reportStrictErrorOrWarning(JSContext* cx, HandleObject obj, "unsigned value of OkCode must not be an error code"); MOZ_ASSERT(code_ != Uninitialized); MOZ_ASSERT(!ok()); - assertSameCompartment(cx, obj); + cx->check(obj); unsigned flags = strict ? JSREPORT_ERROR : (JSREPORT_WARNING | JSREPORT_STRICT); if (code_ == JSMSG_OBJECT_NOT_EXTENSIBLE) { @@ -207,7 +207,7 @@ JS::ObjectOpResult::reportStrictErrorOrWarning(JSContext* cx, HandleObject obj, MOZ_ASSERT(code_ != Uninitialized); MOZ_ASSERT(!ok()); MOZ_ASSERT(!ErrorTakesArguments(code_)); - assertSameCompartment(cx, obj); + cx->check(obj); unsigned flags = strict ? JSREPORT_ERROR : (JSREPORT_WARNING | JSREPORT_STRICT); return JS_ReportErrorFlagsAndNumberASCII(cx, flags, GetErrorMessage, nullptr, code_); @@ -359,7 +359,7 @@ JS_ValueToObject(JSContext* cx, HandleValue value, MutableHandleObject objp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); if (value.isNullOrUndefined()) { objp.set(nullptr); return true; @@ -376,7 +376,7 @@ JS_ValueToFunction(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); return ReportIfNotFunction(cx, value); } @@ -385,7 +385,7 @@ JS_ValueToConstructor(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); return ReportIfNotFunction(cx, value); } @@ -394,7 +394,7 @@ JS_ValueToSource(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); return ValueToSource(cx, value); } @@ -409,7 +409,7 @@ JS_TypeOfValue(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); return TypeOfValue(value); } @@ -418,7 +418,7 @@ JS_StrictlyEqual(JSContext* cx, HandleValue value1, HandleValue value2, bool* eq { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value1, value2); + cx->check(value1, value2); MOZ_ASSERT(equal); return StrictlyEqual(cx, value1, value2, equal); } @@ -428,7 +428,7 @@ JS_LooselyEqual(JSContext* cx, HandleValue value1, HandleValue value2, bool* equ { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value1, value2); + cx->check(value1, value2); MOZ_ASSERT(equal); return LooselyEqual(cx, value1, value2, equal); } @@ -438,7 +438,7 @@ JS_SameValue(JSContext* cx, HandleValue value1, HandleValue value2, bool* same) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value1, value2); + cx->check(value1, value2); MOZ_ASSERT(same); return SameValue(cx, value1, value2, same); } @@ -1039,7 +1039,7 @@ JS_ResolveStandardClass(JSContext* cx, HandleObject obj, HandleId id, bool* reso AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); Handle global = obj.as(); *resolved = false; @@ -1117,7 +1117,7 @@ JS_EnumerateStandardClasses(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); Handle global = obj.as(); return GlobalObject::initStandardClasses(cx, global); } @@ -1218,7 +1218,7 @@ JS_IdToProtoKey(JSContext* cx, HandleId id) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, id); + cx->check(id); if (!JSID_IS_ATOM(id)) return JSProto_Null; @@ -1296,7 +1296,7 @@ JS_PUBLIC_API(bool) JS::detail::ComputeThis(JSContext* cx, Value* vp, MutableHandleObject thisObject) { AssertHeapIsIdle(); - assertSameCompartment(cx, vp[0], vp[1]); + cx->check(vp[0], vp[1]); MutableHandleValue thisv = MutableHandleValue::fromMarkedLocation(&vp[1]); if (!BoxNonStrictThis(cx, thisv, thisv)) @@ -1625,7 +1625,7 @@ JS_ValueToId(JSContext* cx, HandleValue value, MutableHandleId idp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); return ValueToId(cx, value, idp); } @@ -1634,7 +1634,7 @@ JS_StringToId(JSContext* cx, HandleString string, MutableHandleId idp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, string); + cx->check(string); RootedValue value(cx, StringValue(string)); return ValueToId(cx, value, idp); } @@ -1644,9 +1644,9 @@ JS_IdToValue(JSContext* cx, jsid id, MutableHandleValue vp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, id); + cx->check(id); vp.set(IdToValue(id)); - assertSameCompartment(cx, vp); + cx->check(vp); return true; } @@ -1655,7 +1655,7 @@ JS::ToPrimitive(JSContext* cx, HandleObject obj, JSType hint, MutableHandleValue { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); MOZ_ASSERT(obj != nullptr); MOZ_ASSERT(hint == JSTYPE_UNDEFINED || hint == JSTYPE_STRING || hint == JSTYPE_NUMBER); vp.setObject(*obj); @@ -1718,7 +1718,7 @@ JS_InitClass(JSContext* cx, HandleObject obj, HandleObject parent_proto, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, parent_proto); + cx->check(obj, parent_proto); return InitClass(cx, obj, parent_proto, Valueify(clasp), constructor, nargs, ps, fs, static_ps, static_fs); } @@ -1742,8 +1742,8 @@ JS_InstanceOf(JSContext* cx, HandleObject obj, const JSClass* clasp, CallArgs* a CHECK_REQUEST(cx); #ifdef DEBUG if (args) { - assertSameCompartment(cx, obj); - assertSameCompartment(cx, args->thisv(), args->calleev()); + cx->check(obj); + cx->check(args->thisv(), args->calleev()); } #endif if (!obj || obj->getJSClass() != clasp) { @@ -1758,7 +1758,7 @@ JS_PUBLIC_API(bool) JS_HasInstance(JSContext* cx, HandleObject obj, HandleValue value, bool* bp) { AssertHeapIsIdle(); - assertSameCompartment(cx, obj, value); + cx->check(obj, value); return HasInstance(cx, obj, value, bp); } @@ -1789,7 +1789,7 @@ JS_GetConstructor(JSContext* cx, HandleObject proto) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, proto); + cx->check(proto); RootedValue cval(cx); if (!GetProperty(cx, proto, proto, cx->names().constructor, &cval)) @@ -1929,7 +1929,7 @@ JS_FireOnNewGlobalObject(JSContext* cx, JS::HandleObject global) // to be able to throw errors during delicate global creation routines. // This infallibility will eat OOM and slow script, but if that happens // we'll likely run up into them again soon in a fallible context. - assertSameCompartment(cx, global); + cx->check(global); Rooted globalObject(cx, &global->as()); Debugger::onNewGlobalObject(cx, globalObject); } @@ -1957,7 +1957,7 @@ JS_NewObjectWithGivenProto(JSContext* cx, const JSClass* jsclasp, HandleObject p MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, proto); + cx->check(proto); const Class* clasp = Valueify(jsclasp); if (!clasp) @@ -1986,7 +1986,7 @@ JS_NewObjectForConstructor(JSContext* cx, const JSClass* clasp, const CallArgs& CHECK_REQUEST(cx); Value callee = args.calleev(); - assertSameCompartment(cx, callee); + cx->check(callee); RootedObject obj(cx, &callee.toObject()); return CreateThis(cx, Valueify(clasp), obj); } @@ -2010,7 +2010,7 @@ JS::AssertObjectBelongsToCurrentThread(JSObject* obj) JS_PUBLIC_API(bool) JS_GetPrototype(JSContext* cx, HandleObject obj, MutableHandleObject result) { - assertSameCompartment(cx, obj); + cx->check(obj); return GetPrototype(cx, obj, result); } @@ -2019,7 +2019,7 @@ JS_SetPrototype(JSContext* cx, HandleObject obj, HandleObject proto) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, proto); + cx->check(obj, proto); return SetPrototype(cx, obj, proto); } @@ -2028,28 +2028,28 @@ JS_PUBLIC_API(bool) JS_GetPrototypeIfOrdinary(JSContext* cx, HandleObject obj, bool* isOrdinary, MutableHandleObject result) { - assertSameCompartment(cx, obj); + cx->check(obj); return GetPrototypeIfOrdinary(cx, obj, isOrdinary, result); } JS_PUBLIC_API(bool) JS_IsExtensible(JSContext* cx, HandleObject obj, bool* extensible) { - assertSameCompartment(cx, obj); + cx->check(obj); return IsExtensible(cx, obj, extensible); } JS_PUBLIC_API(bool) JS_PreventExtensions(JSContext* cx, JS::HandleObject obj, ObjectOpResult& result) { - assertSameCompartment(cx, obj); + cx->check(obj); return PreventExtensions(cx, obj, result); } JS_PUBLIC_API(bool) JS_SetImmutablePrototype(JSContext *cx, JS::HandleObject obj, bool *succeeded) { - assertSameCompartment(cx, obj); + cx->check(obj); return SetImmutablePrototype(cx, obj, succeeded); } @@ -2059,7 +2059,7 @@ JS_GetOwnPropertyDescriptorById(JSContext* cx, HandleObject obj, HandleId id, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return GetOwnPropertyDescriptor(cx, obj, id, desc); } @@ -2090,7 +2090,7 @@ JS_PUBLIC_API(bool) JS_GetPropertyDescriptorById(JSContext* cx, HandleObject obj, HandleId id, MutableHandle desc) { - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return GetPropertyDescriptor(cx, obj, id, desc); } @@ -2111,7 +2111,7 @@ DefinePropertyByDescriptor(JSContext* cx, HandleObject obj, HandleId id, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, desc); + cx->check(obj, id, desc); return DefineProperty(cx, obj, id, desc, result); } @@ -2147,7 +2147,7 @@ DefineAccessorPropertyById(JSContext* cx, HandleObject obj, HandleId id, AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, getter, setter); + cx->check(obj, id, getter, setter); return js::DefineAccessorProperty(cx, obj, id, getter, setter, attrs); } @@ -2203,7 +2203,7 @@ DefineDataPropertyById(JSContext* cx, HandleObject obj, HandleId id, HandleValue AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, value); + cx->check(obj, id, value); return js::DefineDataProperty(cx, obj, id, value, attrs); } @@ -2470,7 +2470,7 @@ static bool DefineDataElement(JSContext* cx, HandleObject obj, uint32_t index, HandleValue value, unsigned attrs) { - assertSameCompartment(cx, obj, value); + cx->check(obj, value); AssertHeapIsIdle(); CHECK_REQUEST(cx); RootedId id(cx); @@ -2541,7 +2541,7 @@ JS_HasPropertyById(JSContext* cx, HandleObject obj, HandleId id, bool* foundp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return HasProperty(cx, obj, id, foundp); } @@ -2582,7 +2582,7 @@ JS_HasOwnPropertyById(JSContext* cx, HandleObject obj, HandleId id, bool* foundp { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return HasOwnProperty(cx, obj, id, foundp); } @@ -2603,7 +2603,7 @@ JS_ForwardGetPropertyTo(JSContext* cx, HandleObject obj, HandleId id, HandleValu { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, receiver); + cx->check(obj, id, receiver); return GetProperty(cx, obj, receiver, id, vp); } @@ -2614,7 +2614,7 @@ JS_ForwardGetElementTo(JSContext* cx, HandleObject obj, uint32_t index, HandleOb { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return GetElement(cx, obj, receiver, index, vp); } @@ -2659,7 +2659,7 @@ JS_ForwardSetPropertyTo(JSContext* cx, HandleObject obj, HandleId id, HandleValu { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, v, receiver); + cx->check(obj, id, v, receiver); return SetProperty(cx, obj, id, v, receiver, result); } @@ -2669,7 +2669,7 @@ JS_SetPropertyById(JSContext* cx, HandleObject obj, HandleId id, HandleValue v) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id, v); + cx->check(obj, id, v); RootedValue receiver(cx, ObjectValue(*obj)); ObjectOpResult ignored; @@ -2702,7 +2702,7 @@ SetElement(JSContext* cx, HandleObject obj, uint32_t index, HandleValue v) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, v); + cx->check(obj, v); RootedValue receiver(cx, ObjectValue(*obj)); ObjectOpResult ignored; @@ -2755,7 +2755,7 @@ JS_DeletePropertyById(JSContext* cx, HandleObject obj, HandleId id, ObjectOpResu { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return DeleteProperty(cx, obj, id, result); } @@ -2764,7 +2764,7 @@ JS_PUBLIC_API(bool) JS_DeleteProperty(JSContext* cx, HandleObject obj, const char* name, ObjectOpResult& result) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); JSAtom* atom = Atomize(cx, name, strlen(name)); if (!atom) @@ -2778,7 +2778,7 @@ JS_DeleteUCProperty(JSContext* cx, HandleObject obj, const char16_t* name, size_ ObjectOpResult& result) { CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen)); if (!atom) @@ -2792,7 +2792,7 @@ JS_DeleteElement(JSContext* cx, HandleObject obj, uint32_t index, ObjectOpResult { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return DeleteElement(cx, obj, index, result); } @@ -2823,7 +2823,7 @@ JS_Enumerate(JSContext* cx, HandleObject obj, JS::MutableHandle props) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, props); + cx->check(obj, props); MOZ_ASSERT(props.empty()); AutoIdVector ids(cx); @@ -2852,7 +2852,7 @@ JS_CallFunctionValue(JSContext* cx, HandleObject obj, HandleValue fval, const Ha MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, fval, args); + cx->check(obj, fval, args); InvokeArgs iargs(cx); if (!FillArgumentsFromArraylike(cx, iargs, args)) @@ -2869,7 +2869,7 @@ JS_CallFunction(JSContext* cx, HandleObject obj, HandleFunction fun, const Handl MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, fun, args); + cx->check(obj, fun, args); InvokeArgs iargs(cx); if (!FillArgumentsFromArraylike(cx, iargs, args)) @@ -2887,7 +2887,7 @@ JS_CallFunctionName(JSContext* cx, HandleObject obj, const char* name, const Han MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, args); + cx->check(obj, args); JSAtom* atom = Atomize(cx, name, strlen(name)); if (!atom) @@ -2912,7 +2912,7 @@ JS::Call(JSContext* cx, HandleValue thisv, HandleValue fval, const JS::HandleVal { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, thisv, fval, args); + cx->check(thisv, fval, args); InvokeArgs iargs(cx); if (!FillArgumentsFromArraylike(cx, iargs, args)) @@ -2927,7 +2927,7 @@ JS::Construct(JSContext* cx, HandleValue fval, HandleObject newTarget, const JS: { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, fval, newTarget, args); + cx->check(fval, newTarget, args); if (!IsConstructor(fval)) { ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval, nullptr); @@ -2953,7 +2953,7 @@ JS::Construct(JSContext* cx, HandleValue fval, const JS::HandleValueArray& args, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, fval, args); + cx->check(fval, args); if (!IsConstructor(fval)) { ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval, nullptr); @@ -2975,7 +2975,7 @@ JS_AlreadyHasOwnPropertyById(JSContext* cx, HandleObject obj, HandleId id, bool* { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); if (!obj->isNative()) return js::HasOwnProperty(cx, obj, id, foundp); @@ -3024,7 +3024,7 @@ JS_FreezeObject(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return FreezeObject(cx, obj); } @@ -3042,7 +3042,7 @@ JS_DeepFreezeObject(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); /* Assume that non-extensible objects are already deep-frozen, to avoid divergence. */ bool extensible; @@ -3119,7 +3119,7 @@ JS_DefineObject(JSContext* cx, HandleObject obj, const char* name, const JSClass { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); const Class* clasp = Valueify(jsclasp); if (!clasp) @@ -3273,7 +3273,7 @@ JS::ObjectToCompletePropertyDescriptor(JSContext* cx, { // |obj| can be in a different compartment here. The caller is responsible // for wrapping it (see JS_WrapPropertyDescriptor). - assertSameCompartment(cx, descObj); + cx->check(descObj); if (!ToPropertyDescriptor(cx, descObj, true, desc)) return false; CompletePropertyDescriptor(desc); @@ -3287,7 +3287,7 @@ JS_SetAllNonReservedSlotsToUndefined(JSContext* cx, JSObject* objArg) RootedObject obj(cx, objArg); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); if (!obj->isNative()) return; @@ -3318,7 +3318,7 @@ JS_NewArrayObject(JSContext* cx, const JS::HandleValueArray& contents) AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, contents); + cx->check(contents); return NewDenseCopiedArray(cx, contents.length(), contents.begin()); } @@ -3335,7 +3335,7 @@ JS_NewArrayObject(JSContext* cx, size_t length) inline bool IsGivenTypeObject(JSContext* cx, JS::HandleObject obj, const ESClass& typeClass, bool* isType) { - assertSameCompartment(cx, obj); + cx->check(obj); ESClass cls; if (!GetBuiltinClass(cx, obj, &cls)) @@ -3368,7 +3368,7 @@ JS_GetArrayLength(JSContext* cx, HandleObject obj, uint32_t* lengthp) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return GetLengthProperty(cx, obj, lengthp); } @@ -3377,7 +3377,7 @@ JS_SetArrayLength(JSContext* cx, HandleObject obj, uint32_t length) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return SetLengthProperty(cx, obj, length); } @@ -3473,7 +3473,7 @@ JS::GetSelfHostedFunction(JSContext* cx, const char* selfHostedName, HandleId id MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, id); + cx->check(id); RootedAtom name(cx, IdToFunctionName(cx, id)); if (!name) @@ -3492,7 +3492,7 @@ JS::GetSelfHostedFunction(JSContext* cx, const char* selfHostedName, HandleId id JS_PUBLIC_API(JSFunction*) JS::NewFunctionFromSpec(JSContext* cx, const JSFunctionSpec* fs, HandleId id) { - assertSameCompartment(cx, id); + cx->check(id); // Delay cloning self-hosted functions until they are called. This is // achieved by passing DefineFunction a nullptr JSNative which produces an @@ -3600,7 +3600,7 @@ CloneFunctionObject(JSContext* cx, HandleObject funobj, HandleObject env, Handle { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, env); + cx->check(env); MOZ_ASSERT(env); // Note that funobj can be in a different compartment. @@ -3705,7 +3705,7 @@ JS_GetFunctionArity(JSFunction* fun) JS_PUBLIC_API(bool) JS_GetFunctionLength(JSContext* cx, HandleFunction fun, uint16_t* length) { - assertSameCompartment(cx, fun); + cx->check(fun); return JSFunction::getLength(cx, fun, length); } @@ -3736,7 +3736,7 @@ JS_DefineFunctions(JSContext* cx, HandleObject obj, const JSFunctionSpec* fs) MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return DefineFunctions(cx, obj, fs, NotIntrinsic); } @@ -3748,7 +3748,7 @@ JS_DefineFunction(JSContext* cx, HandleObject obj, const char* name, JSNative ca MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); JSAtom* atom = Atomize(cx, name, strlen(name)); if (!atom) return nullptr; @@ -3764,7 +3764,7 @@ JS_DefineUCFunction(JSContext* cx, HandleObject obj, MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen)); if (!atom) return nullptr; @@ -3779,7 +3779,7 @@ JS_DefineFunctionById(JSContext* cx, HandleObject obj, HandleId id, JSNative cal MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, id); + cx->check(obj, id); return DefineFunction(cx, obj, id, call, nargs, attrs); } @@ -4376,7 +4376,7 @@ JS_BufferIsCompilableUnit(JSContext* cx, HandleObject obj, const char* utf8, siz { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); cx->clearPendingException(); @@ -4471,7 +4471,7 @@ CompileFunction(JSContext* cx, const ReadOnlyCompileOptions& optionsArg, MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, enclosingEnv); + cx->check(enclosingEnv); RootedAtom funAtom(cx); fun.set(NewScriptedFunction(cx, 0, JSFunction::INTERPRETED_NORMAL, @@ -4648,7 +4648,7 @@ JS_DecompileFunction(JSContext* cx, HandleFunction fun) MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, fun); + cx->check(fun); return FunctionToString(cx, fun, /* isToSource = */ false); } @@ -4658,7 +4658,7 @@ ExecuteScript(JSContext* cx, HandleObject scope, HandleScript script, Value* rva MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, scope, script); + cx->check(scope, script); MOZ_ASSERT_IF(!IsGlobalLexicalEnvironment(scope), script->hasNonSyntacticScope()); return Execute(cx, script, *scope, rval); } @@ -4752,7 +4752,7 @@ Evaluate(JSContext* cx, ScopeKind scopeKind, HandleObject env, MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, env); + cx->check(env); MOZ_ASSERT_IF(!IsGlobalLexicalEnvironment(env), scopeKind == ScopeKind::NonSyntactic); options.setIsRunOnce(true); @@ -4893,7 +4893,7 @@ JS::ModuleInstantiate(JSContext* cx, JS::HandleScript script) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, script); + cx->check(script); RootedModuleObject module(cx, script->module()); MOZ_ASSERT(module); return ModuleObject::Instantiate(cx, module); @@ -4904,7 +4904,7 @@ JS::ModuleEvaluate(JSContext* cx, JS::HandleScript script) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, script); + cx->check(script); RootedModuleObject module(cx, script->module()); MOZ_ASSERT(module); return ModuleObject::Evaluate(cx, module); @@ -4915,7 +4915,7 @@ JS::GetRequestedModules(JSContext* cx, JS::HandleScript script) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, script); + cx->check(script); RootedModuleObject module(cx, script->module()); MOZ_ASSERT(module); return &module->requestedModules(); @@ -4926,7 +4926,7 @@ JS::GetRequestedModuleSpecifier(JSContext* cx, JS::HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); JSObject* obj = &value.toObject(); return obj->as().moduleSpecifier(); } @@ -4937,7 +4937,7 @@ JS::GetRequestedModuleSourcePos(JSContext* cx, JS::HandleValue value, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); MOZ_ASSERT(lineNumber); MOZ_ASSERT(columnNumber); auto& requested = value.toObject().as(); @@ -4950,7 +4950,7 @@ JS_New(JSContext* cx, HandleObject ctor, const JS::HandleValueArray& inputArgs) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, ctor, inputArgs); + cx->check(ctor, inputArgs); RootedValue ctorVal(cx, ObjectValue(*ctor)); if (!IsConstructor(ctorVal)) { @@ -5040,7 +5040,7 @@ JS::NewPromiseObject(JSContext* cx, HandleObject executor, HandleObject proto /* MOZ_ASSERT(!cx->zone()->isAtomsZone()); AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, executor, proto); + cx->check(executor, proto); if (!executor) return PromiseObject::createSkippingExecutor(cx); @@ -5136,7 +5136,7 @@ JS::CallOriginalPromiseResolve(JSContext* cx, JS::HandleValue resolutionValue) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, resolutionValue); + cx->check(resolutionValue); RootedObject promise(cx, PromiseObject::unforgeableResolve(cx, resolutionValue)); MOZ_ASSERT_IF(promise, CheckedUnwrap(promise)->is()); @@ -5148,7 +5148,7 @@ JS::CallOriginalPromiseReject(JSContext* cx, JS::HandleValue rejectionValue) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, rejectionValue); + cx->check(rejectionValue); RootedObject promise(cx, PromiseObject::unforgeableReject(cx, rejectionValue)); MOZ_ASSERT_IF(promise, CheckedUnwrap(promise)->is()); @@ -5161,7 +5161,7 @@ ResolveOrRejectPromise(JSContext* cx, JS::HandleObject promiseObj, JS::HandleVal { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, promiseObj, resultOrReason_); + cx->check(promiseObj, resultOrReason_); mozilla::Maybe ar; Rooted promise(cx); @@ -5205,7 +5205,7 @@ CallOriginalPromiseThenImpl(JSContext* cx, JS::HandleObject promiseObj, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, promiseObj, onResolvedObj_, onRejectedObj_); + cx->check(promiseObj, onResolvedObj_, onRejectedObj_); MOZ_ASSERT_IF(onResolvedObj_, IsCallable(onResolvedObj_)); MOZ_ASSERT_IF(onRejectedObj_, IsCallable(onRejectedObj_)); @@ -5446,8 +5446,8 @@ JS::ReadableStreamCancel(JSContext* cx, HandleObject streamObj, HandleValue reas { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); - assertSameCompartment(cx, reason); + cx->check(streamObj); + cx->check(reason); Rooted stream(cx, &streamObj->as()); return ReadableStream::cancel(cx, stream, reason); @@ -5464,7 +5464,7 @@ JS::ReadableStreamGetReader(JSContext* cx, HandleObject streamObj, ReadableStrea { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); + cx->check(streamObj); Rooted stream(cx, &streamObj->as()); return ReadableStream::getReader(cx, stream, mode); @@ -5475,7 +5475,7 @@ JS::ReadableStreamGetExternalUnderlyingSource(JSContext* cx, HandleObject stream { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); + cx->check(streamObj); Rooted stream(cx, &streamObj->as()); return ReadableStream::getExternalSource(cx, stream, source); @@ -5493,7 +5493,7 @@ JS::ReadableStreamUpdateDataAvailableFromSource(JSContext* cx, JS::HandleObject { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); + cx->check(streamObj); Rooted stream(cx, &streamObj->as()); return ReadableStream::updateDataAvailableFromSource(cx, stream, availableData); @@ -5505,7 +5505,7 @@ JS::ReadableStreamTee(JSContext* cx, HandleObject streamObj, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); + cx->check(streamObj); Rooted stream(cx, &streamObj->as()); Rooted branch1Stream(cx); @@ -5531,7 +5531,7 @@ JS::ReadableStreamClose(JSContext* cx, HandleObject streamObj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); + cx->check(streamObj); Rooted stream(cx, &streamObj->as()); return ReadableStream::close(cx, stream); @@ -5542,8 +5542,8 @@ JS::ReadableStreamEnqueue(JSContext* cx, HandleObject streamObj, HandleValue chu { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); - assertSameCompartment(cx, chunk); + cx->check(streamObj); + cx->check(chunk); Rooted stream(cx, &streamObj->as()); if (stream->mode() != JS::ReadableStreamMode::Default) { @@ -5560,8 +5560,8 @@ JS::ReadableByteStreamEnqueueBuffer(JSContext* cx, HandleObject streamObj, Handl { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); - assertSameCompartment(cx, chunkObj); + cx->check(streamObj); + cx->check(chunkObj); Rooted stream(cx, &streamObj->as()); if (stream->mode() != JS::ReadableStreamMode::Byte) { @@ -5592,8 +5592,8 @@ JS::ReadableStreamError(JSContext* cx, HandleObject streamObj, HandleValue error { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, streamObj); - assertSameCompartment(cx, error); + cx->check(streamObj); + cx->check(error); Rooted stream(cx, &streamObj->as()); return js::ReadableStream::error(cx, stream, error); @@ -5610,8 +5610,8 @@ JS::ReadableStreamReaderCancel(JSContext* cx, HandleObject reader, HandleValue r { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, reader); - assertSameCompartment(cx, reason); + cx->check(reader); + cx->check(reason); return js::ReadableStreamReaderCancel(cx, reader, reason); } @@ -5621,7 +5621,7 @@ JS::ReadableStreamReaderReleaseLock(JSContext* cx, HandleObject reader) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, reader); + cx->check(reader); return js::ReadableStreamReaderReleaseLock(cx, reader); } @@ -5631,7 +5631,7 @@ JS::ReadableStreamDefaultReaderRead(JSContext* cx, HandleObject readerObj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, readerObj); + cx->check(readerObj); Rooted reader(cx, &readerObj->as()); return js::ReadableStreamDefaultReader::read(cx, reader); @@ -5642,8 +5642,8 @@ JS::ReadableStreamBYOBReaderRead(JSContext* cx, HandleObject readerObj, HandleOb { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, readerObj); - assertSameCompartment(cx, viewObj); + cx->check(readerObj); + cx->check(viewObj); Rooted reader(cx, &readerObj->as()); Rooted view(cx, &viewObj->as()); @@ -5906,7 +5906,7 @@ JS_GetLatin1StringCharsAndLength(JSContext* cx, const JS::AutoRequireNoGC& nogc, MOZ_ASSERT(plength); AssertHeapIsIdleOrStringIsFlat(str); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); JSLinearString* linear = str->ensureLinear(cx); if (!linear) return nullptr; @@ -5921,7 +5921,7 @@ JS_GetTwoByteStringCharsAndLength(JSContext* cx, const JS::AutoRequireNoGC& nogc MOZ_ASSERT(plength); AssertHeapIsIdleOrStringIsFlat(str); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); JSLinearString* linear = str->ensureLinear(cx); if (!linear) return nullptr; @@ -5940,7 +5940,7 @@ JS_GetStringCharAt(JSContext* cx, JSString* str, size_t index, char16_t* res) { AssertHeapIsIdleOrStringIsFlat(str); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); JSLinearString* linear = str->ensureLinear(cx); if (!linear) @@ -5961,7 +5961,7 @@ JS_CopyStringChars(JSContext* cx, mozilla::Range dest, JSString* str) { AssertHeapIsIdleOrStringIsFlat(str); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); JSLinearString* linear = str->ensureLinear(cx); if (!linear) @@ -5997,7 +5997,7 @@ JS_FlattenString(JSContext* cx, JSString* str) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); JSFlatString* flat = str->ensureFlat(cx); if (!flat) return nullptr; @@ -6160,7 +6160,7 @@ JS::NewSymbol(JSContext* cx, HandleString description) AssertHeapIsIdle(); CHECK_REQUEST(cx); if (description) - assertSameCompartment(cx, description); + cx->check(description); return Symbol::new_(cx, SymbolCode::UniqueSymbol, description); } @@ -6170,7 +6170,7 @@ JS::GetSymbolFor(JSContext* cx, HandleString key) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, key); + cx->check(key); return Symbol::for_(cx, key); } @@ -6228,7 +6228,7 @@ JS_Stringify(JSContext* cx, MutableHandleValue vp, HandleObject replacer, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, replacer, space); + cx->check(replacer, space); StringBuffer sb(cx); if (!sb.ensureTwoByteChars()) return false; @@ -6245,7 +6245,7 @@ JS::ToJSONMaybeSafely(JSContext* cx, JS::HandleObject input, { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, input); + cx->check(input); StringBuffer sb(cx); if (!sb.ensureTwoByteChars()) @@ -6289,7 +6289,7 @@ JS_ParseJSONWithReviver(JSContext* cx, HandleString str, HandleValue reviver, Mu { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, str); + cx->check(str); AutoStableStringChars stableChars(cx); if (!stableChars.init(cx, str)) @@ -6571,7 +6571,7 @@ JS::NewDateObject(JSContext* cx, JS::ClippedTime time) JS_PUBLIC_API(bool) JS_ObjectIsDate(JSContext* cx, HandleObject obj, bool* isDate) { - assertSameCompartment(cx, obj); + cx->check(obj); ESClass cls; if (!GetBuiltinClass(cx, obj, &cls)) @@ -6615,7 +6615,7 @@ JS_SetRegExpInput(JSContext* cx, HandleObject obj, HandleString input) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, input); + cx->check(input); Handle global = obj.as(); RegExpStatics* res = GlobalObject::getRegExpStatics(cx, global); @@ -6679,7 +6679,7 @@ JS_ExecuteRegExpNoStatics(JSContext* cx, HandleObject obj, char16_t* chars, size JS_PUBLIC_API(bool) JS_ObjectIsRegExp(JSContext* cx, HandleObject obj, bool* isRegExp) { - assertSameCompartment(cx, obj); + cx->check(obj); ESClass cls; if (!GetBuiltinClass(cx, obj, &cls)) @@ -6777,7 +6777,7 @@ JS_SetPendingException(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); cx->setPendingException(value); } @@ -6879,7 +6879,7 @@ JS_ErrorFromException(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); return ErrorFromException(cx, obj); } @@ -7308,7 +7308,7 @@ JS_CharsToId(JSContext* cx, JS::TwoByteChars chars, MutableHandleId idp) JS_PUBLIC_API(bool) JS_IsIdentifier(JSContext* cx, HandleString str, bool* isIdentifier) { - assertSameCompartment(cx, str); + cx->check(str); JSLinearString* linearStr = str->ensureLinear(cx); if (!linearStr) @@ -7540,7 +7540,7 @@ JS::detail::AssertArgumentsAreSane(JSContext* cx, HandleValue value) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); } #endif /* JS_DEBUG */ diff --git a/js/src/jsexn.cpp b/js/src/jsexn.cpp index 0d4a1ec81076..4a009f43dac0 100644 --- a/js/src/jsexn.cpp +++ b/js/src/jsexn.cpp @@ -1034,7 +1034,7 @@ JS::CreateError(JSContext* cx, JSExnType type, HandleObject stack, HandleString uint32_t lineNumber, uint32_t columnNumber, JSErrorReport* report, HandleString message, MutableHandleValue rval) { - assertSameCompartment(cx, stack, fileName, message); + cx->check(stack, fileName, message); AssertObjectIsSavedFrameOrWrapper(cx, stack); js::UniquePtr rep; diff --git a/js/src/jsfriendapi.cpp b/js/src/jsfriendapi.cpp index 8ec66def2e10..c97ae6b41aea 100644 --- a/js/src/jsfriendapi.cpp +++ b/js/src/jsfriendapi.cpp @@ -76,7 +76,7 @@ JS_SetGrayGCRootsTracer(JSContext* cx, JSTraceDataOp traceOp, void* data) JS_FRIEND_API(JSObject*) JS_FindCompilationScope(JSContext* cx, HandleObject objArg) { - assertSameCompartment(cx, objArg); + cx->check(objArg); RootedObject obj(cx, objArg); @@ -111,7 +111,7 @@ JS_SplicePrototype(JSContext* cx, HandleObject obj, HandleObject proto) * does not nuke type information for the object. */ CHECK_REQUEST(cx); - assertSameCompartment(cx, obj, proto); + cx->check(obj, proto); if (!obj->isSingleton()) { /* @@ -145,7 +145,7 @@ JS_NewObjectWithUniqueType(JSContext* cx, const JSClass* clasp, HandleObject pro JS_FRIEND_API(JSObject*) JS_NewObjectWithoutMetadata(JSContext* cx, const JSClass* clasp, JS::Handle proto) { - assertSameCompartment(cx, proto); + cx->check(proto); AutoSuppressAllocationMetadataBuilder suppressMetadata(cx); return JS_NewObjectWithGivenProto(cx, clasp, proto); } @@ -253,7 +253,7 @@ JS_DefineFunctionsWithHelp(JSContext* cx, HandleObject obj, const JSFunctionSpec MOZ_ASSERT(!cx->zone()->isAtomsZone()); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); for (; fs->name; fs++) { JSAtom* atom = Atomize(cx, fs->name, strlen(fs->name)); if (!atom) @@ -333,7 +333,7 @@ js::GetBuiltinClass(JSContext* cx, HandleObject obj, ESClass* cls) JS_FRIEND_API(const char*) js::ObjectClassName(JSContext* cx, HandleObject obj) { - assertSameCompartment(cx, obj); + cx->check(obj); return GetObjectClassName(cx, obj); } @@ -393,13 +393,13 @@ js::GetPrototypeNoProxy(JSObject* obj) JS_FRIEND_API(void) js::AssertSameCompartment(JSContext* cx, JSObject* obj) { - assertSameCompartment(cx, obj); + cx->check(obj); } JS_FRIEND_API(void) js::AssertSameCompartment(JSContext* cx, JS::HandleValue v) { - assertSameCompartment(cx, v); + cx->check(v); } #ifdef DEBUG @@ -445,7 +445,7 @@ js::DefineFunctionWithReserved(JSContext* cx, JSObject* objArg, const char* name RootedObject obj(cx, objArg); MOZ_ASSERT(!cx->zone()->isAtomsZone()); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); JSAtom* atom = Atomize(cx, name, strlen(name)); if (!atom) return nullptr; @@ -480,7 +480,7 @@ js::NewFunctionByIdWithReserved(JSContext* cx, JSNative native, unsigned nargs, MOZ_ASSERT(JSID_IS_STRING(id)); MOZ_ASSERT(!cx->zone()->isAtomsZone()); CHECK_REQUEST(cx); - assertSameCompartment(cx, id); + cx->check(id); RootedAtom atom(cx, JSID_TO_ATOM(id)); return (flags & JSFUN_CONSTRUCTOR) ? @@ -513,7 +513,7 @@ js::FunctionHasNativeReserved(JSObject* fun) JS_FRIEND_API(bool) js::GetObjectProto(JSContext* cx, JS::Handle obj, JS::MutableHandle proto) { - assertSameCompartment(cx, obj); + cx->check(obj); if (IsProxy(obj)) return JS_GetPrototype(cx, obj, proto); @@ -659,7 +659,7 @@ JS_FRIEND_API(JSObject*) JS_CloneObject(JSContext* cx, HandleObject obj, HandleObject protoArg) { // |obj| might be in a different compartment. - assertSameCompartment(cx, protoArg); + cx->check(protoArg); Rooted proto(cx, TaggedProto(protoArg.get())); return CloneObject(cx, obj, proto); } @@ -1120,7 +1120,7 @@ JS::ForceLexicalInitialization(JSContext *cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); bool initializedAny = false; NativeObject* nobj = &obj->as(); @@ -1446,7 +1446,7 @@ js::GetAllocationMetadata(JSObject* obj) JS_FRIEND_API(bool) js::ReportIsNotFunction(JSContext* cx, HandleValue v) { - assertSameCompartment(cx, v); + cx->check(v); return ReportIsNotFunction(cx, v, -1); } @@ -1491,7 +1491,7 @@ js::SetWindowProxy(JSContext* cx, HandleObject global, HandleObject windowProxy) AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, global, windowProxy); + cx->check(global, windowProxy); MOZ_ASSERT(IsWindowProxy(windowProxy)); global->as().setWindowProxy(windowProxy); diff --git a/js/src/jspubtd.h b/js/src/jspubtd.h index 1ee6915b07d5..2661c3043ebd 100644 --- a/js/src/jspubtd.h +++ b/js/src/jspubtd.h @@ -143,11 +143,16 @@ RuntimeHeapIsMinorCollecting() return RuntimeHeapState() == HeapState::MinorCollecting; } +static inline bool +RuntimeHeapIsCollecting(HeapState state) +{ + return state == HeapState::MajorCollecting || state == HeapState::MinorCollecting; +} + static inline bool RuntimeHeapIsCollecting() { - HeapState state = RuntimeHeapState(); - return state == HeapState::MajorCollecting || state == HeapState::MinorCollecting; + return RuntimeHeapIsCollecting(RuntimeHeapState()); } static inline bool diff --git a/js/src/moz.build b/js/src/moz.build index 106be1ef1e73..bbd3ab5532b6 100755 --- a/js/src/moz.build +++ b/js/src/moz.build @@ -323,7 +323,6 @@ UNIFIED_SOURCES += [ 'jit/shared/CodeGenerator-shared.cpp', 'jit/shared/Disassembler-shared.cpp', 'jit/shared/Lowering-shared.cpp', - 'jit/SharedIC.cpp', 'jit/Sink.cpp', 'jit/Snapshots.cpp', 'jit/StupidAllocator.cpp', diff --git a/js/src/proxy/CrossCompartmentWrapper.cpp b/js/src/proxy/CrossCompartmentWrapper.cpp index 5fa6d3775677..9d87574779f6 100644 --- a/js/src/proxy/CrossCompartmentWrapper.cpp +++ b/js/src/proxy/CrossCompartmentWrapper.cpp @@ -457,7 +457,7 @@ CrossCompartmentWrapper::regexp_toShared(JSContext* cx, HandleObject wrapper) co // Get an equivalent RegExpShared associated with the current compartment. RootedAtom source(cx, re->getSource()); cx->markAtom(source); - return cx->zone()->regExps.get(cx, source, re->getFlags()); + return cx->zone()->regExps().get(cx, source, re->getFlags()); } bool diff --git a/js/src/proxy/Proxy.cpp b/js/src/proxy/Proxy.cpp index 2596c268b102..d4ff4ce00370 100644 --- a/js/src/proxy/Proxy.cpp +++ b/js/src/proxy/Proxy.cpp @@ -470,7 +470,7 @@ Proxy::enumerate(JSContext* cx, HandleObject proxy) return nullptr; if (!proto) return EnumeratedIdVectorToIterator(cx, proxy, props); - assertSameCompartment(cx, proxy, proto); + cx->check(proxy, proto); AutoIdVector protoProps(cx); if (!GetPropertyKeys(cx, proto, 0, &protoProps)) diff --git a/js/src/shell/ModuleLoader.js b/js/src/shell/ModuleLoader.js index 5246992c5644..82cf6f9ba4ef 100644 --- a/js/src/shell/ModuleLoader.js +++ b/js/src/shell/ModuleLoader.js @@ -3,7 +3,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/* global getModuleLoadPath setModuleResolveHook parseModule os */ +/* global getModuleLoadPath setModuleResolveHook parseModule instantiateModule evaluateModule os */ // A basic synchronous module loader for testing the shell. { @@ -161,8 +161,8 @@ const ReflectLoader = new class { loadAndExecute(path) { let module = this.loadAndParse(path); - module.declarationInstantiation(); - return module.evaluation(); + instantiateModule(module); + return evaluateModule(module); } importRoot(path) { diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp index 11edf64524a1..6ec87ad336ae 100644 --- a/js/src/shell/js.cpp +++ b/js/src/shell/js.cpp @@ -71,6 +71,7 @@ #endif // defined(JS_BUILD_BINAST) #include "frontend/Parser.h" #include "gc/PublicIterators.h" +#include "gc/Zone.h" #include "jit/arm/Simulator-arm.h" #include "jit/InlinableNatives.h" #include "jit/Ion.h" @@ -475,8 +476,14 @@ OffThreadJob::waitUntilDone(JSContext* cx) return token; } +using ScriptObjectMap = JS::WeakCache, + HeapPtr, + MovableCellHasher>, + SystemAllocPolicy>>; + struct ShellCompartmentPrivate { - JS::Heap grayRoot; + GCPtrObject grayRoot; + UniquePtr moduleLoaderScriptObjectMap; }; struct MOZ_STACK_CLASS EnvironmentPreparer : public js::ScriptEnvironmentPreparer { @@ -650,7 +657,7 @@ TraceGrayRoots(JSTracer* trc, void* data) for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) { auto priv = static_cast(JS_GetCompartmentPrivate(comp.get())); if (priv) - JS::TraceEdge(trc, &priv->grayRoot, "test gray root"); + TraceNullableEdge(trc, &priv->grayRoot, "test gray root"); } } } @@ -3600,6 +3607,13 @@ EnsureGeckoProfilingStackInstalled(JSContext* cx, ShellContext* sc) return true; } +static void +DestroyShellCompartmentPrivate(JSFreeOp* fop, JS::Compartment* compartment) +{ + auto priv = static_cast(JS_GetCompartmentPrivate(compartment)); + js_delete(priv); +} + struct WorkerInput { JSRuntime* parentRuntime; @@ -3647,6 +3661,7 @@ WorkerMain(WorkerInput* input) JS::SetWarningReporter(cx, WarningReporter); js::SetPreserveWrapperCallback(cx, DummyPreserveWrapperCallback); JS_InitDestroyPrincipalsCallback(cx, ShellPrincipals::destroy); + JS_SetDestroyCompartmentCallback(cx, DestroyShellCompartmentPrivate); js::UseInternalJobQueues(cx); @@ -4269,6 +4284,89 @@ Compile(JSContext* cx, unsigned argc, Value* vp) return ok; } +static ShellCompartmentPrivate* +EnsureShellCompartmentPrivate(JSContext* cx) +{ + Compartment* comp = cx->compartment(); + auto priv = static_cast(JS_GetCompartmentPrivate(comp)); + if (!priv) { + priv = cx->new_(); + JS_SetCompartmentPrivate(cx->compartment(), priv); + } + return priv; +} + +static ScriptObjectMap* +EnsureModuleLoaderScriptObjectMap(JSContext* cx) +{ + auto priv = EnsureShellCompartmentPrivate(cx); + if (!priv) + return nullptr; + + if (priv->moduleLoaderScriptObjectMap) + return priv->moduleLoaderScriptObjectMap.get(); + + Zone* zone = cx->zone(); + auto* map = cx->new_(zone); + if (!map) + return nullptr; + + priv->moduleLoaderScriptObjectMap.reset(map); + return map; +} + +// An object used to represent a JSScript in the shell's self-hosted module +// loader since we can't pass those directly. +class ShellScriptObject : public NativeObject +{ + public: + static const Class class_; + + enum { + ScriptSlot = 0 + }; + + static JSObject* get(JSContext* cx, HandleScript script); + + JSScript* script() const; +}; + +const Class ShellScriptObject::class_ = { + "ShellScriptObject", + JSCLASS_HAS_RESERVED_SLOTS(1) +}; + +/* static */ JSObject* +ShellScriptObject::get(JSContext* cx, HandleScript script) +{ + auto map = EnsureModuleLoaderScriptObjectMap(cx); + if (!map) + return nullptr; + + auto ptr = map->lookup(script); + if (ptr) + return ptr->value(); + + JSObject* obj = NewObjectWithGivenProto(cx, &class_, nullptr); + if (!obj) + return nullptr; + + obj->as().setReservedSlot(ScriptSlot, PrivateGCThingValue(script)); + + if (!map->put(script, obj)) { + ReportOutOfMemory(cx); + return nullptr; + } + + return obj; +} + +JSScript* +ShellScriptObject::script() const +{ + return getReservedSlot(ScriptSlot).toGCThing()->as(); +} + static bool ParseModule(JSContext* cx, unsigned argc, Value* vp) { @@ -4320,10 +4418,70 @@ ParseModule(JSContext* cx, unsigned argc, Value* vp) if (!script) return false; - args.rval().setObject(*script->module()); + JSObject* obj = ShellScriptObject::get(cx, script); + if (!obj) + return false; + + args.rval().setObject(*obj); return true; } +static bool +ReportArgumentTypeError(JSContext* cx, HandleValue value, const char* expected) +{ + const char* typeName = InformalValueTypeName(value); + JS_ReportErrorASCII(cx, "Expected %s, got %s", expected, typeName); + return false; +} + +static bool +InstantiateModule(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + if (args.length() != 1) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_MORE_ARGS_NEEDED, + "instantiateModule", "0", "s"); + return false; + } + + if (!args[0].isObject() || !args[0].toObject().is()) + return ReportArgumentTypeError(cx, args[0], "ShellScriptObject"); + + JSScript* script = args[0].toObject().as().script(); + RootedModuleObject module(cx, script->module()); + if (!module) { + JS_ReportErrorASCII(cx, "Expected a module script"); + return false; + } + + args.rval().setUndefined(); + return ModuleObject::Instantiate(cx, module); +} + +static bool +EvaluateModule(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + if (args.length() != 1) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_MORE_ARGS_NEEDED, + "evaluateModule", "0", "s"); + return false; + } + + if (!args[0].isObject() || !args[0].toObject().is()) + return ReportArgumentTypeError(cx, args[0], "ShellScriptObject"); + + JSScript* script = args[0].toObject().as().script(); + RootedModuleObject module(cx, script->module()); + if (!module) { + JS_ReportErrorASCII(cx, "Expected a module script"); + return false; + } + + args.rval().setUndefined(); + return ModuleObject::Evaluate(cx, module); +} + static bool SetModuleResolveHook(JSContext* cx, unsigned argc, Value* vp) { @@ -4360,20 +4518,24 @@ CallModuleResolveHook(JSContext* cx, HandleScript script, HandleString specifier } MOZ_ASSERT(hookValue.toObject().is()); + JSObject* obj = ShellScriptObject::get(cx, script); + if (!obj) + return nullptr; + JS::AutoValueArray<2> args(cx); - args[0].setObject(*script->module()); + args[0].setObject(*obj); args[1].setString(specifier); RootedValue result(cx); if (!JS_CallFunctionValue(cx, nullptr, hookValue, args, &result)) return nullptr; - if (!result.isObject() || !result.toObject().is()) { - JS_ReportErrorASCII(cx, "Module resolve hook did not return Module object"); + if (!result.isObject() || !result.toObject().is()) { + JS_ReportErrorASCII(cx, "Module resolve hook did not return script object"); return nullptr; } - return result.toObject().as().script(); + return result.toObject().as().script(); } static bool @@ -4411,6 +4573,125 @@ GetModuleLoadPath(JSContext* cx, unsigned argc, Value* vp) return true; } +static ModuleEnvironmentObject* +GetModuleEnvironment(JSContext* cx, HandleValue scriptValue) +{ + JSScript* script = scriptValue.toObject().as().script(); + RootedModuleObject module(cx, script->module()); + if (!module) { + JS_ReportErrorASCII(cx, "Expecting a module script"); + return nullptr; + } + + if (module->hadEvaluationError()) { + JS_ReportErrorASCII(cx, "Module environment unavailable"); + return nullptr; + } + + // Use the initial environment so that tests can check bindings exist before + // they have been instantiated. + RootedModuleEnvironmentObject env(cx, &module->initialEnvironment()); + MOZ_ASSERT(env); + return env; +} + +static bool +GetModuleEnvironmentNames(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + if (args.length() != 1) { + JS_ReportErrorASCII(cx, "Wrong number of arguments"); + return false; + } + + if (!args[0].isObject() || !args[0].toObject().is()) { + JS_ReportErrorASCII(cx, "First argument should be a ShellScriptObject"); + return false; + } + + RootedModuleEnvironmentObject env(cx, GetModuleEnvironment(cx, args[0])); + if (!env) + return false; + + Rooted ids(cx, IdVector(cx)); + if (!JS_Enumerate(cx, env, &ids)) + return false; + + uint32_t length = ids.length(); + RootedArrayObject array(cx, NewDenseFullyAllocatedArray(cx, length)); + if (!array) + return false; + + array->setDenseInitializedLength(length); + for (uint32_t i = 0; i < length; i++) + array->initDenseElement(i, StringValue(JSID_TO_STRING(ids[i]))); + + args.rval().setObject(*array); + return true; +} + +static bool +GetModuleEnvironmentValue(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + if (args.length() != 2) { + JS_ReportErrorASCII(cx, "Wrong number of arguments"); + return false; + } + + if (!args[0].isObject() || !args[0].toObject().is()) { + JS_ReportErrorASCII(cx, "First argument should be a ShellScriptObject"); + return false; + } + + RootedModuleEnvironmentObject env(cx, GetModuleEnvironment(cx, args[0])); + if (!env) + return false; + + RootedString name(cx, JS::ToString(cx, args[1])); + if (!name) + return false; + + RootedId id(cx); + if (!JS_StringToId(cx, name, &id)) + return false; + + if (!GetProperty(cx, env, env, id, args.rval())) + return false; + + if (args.rval().isMagic(JS_UNINITIALIZED_LEXICAL)) { + ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, id); + return false; + } + + return true; +} + +static bool +GetModuleObject(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + if (args.length() != 1) { + JS_ReportErrorASCII(cx, "Wrong number of arguments"); + return false; + } + + if (!args[0].isObject() || !args[0].toObject().is()) { + JS_ReportErrorASCII(cx, "First argument should be a ShellScriptObject"); + return false; + } + + JSScript* script = args[0].toObject().as().script(); + RootedModuleObject module(cx, script->module()); + if (!module) { + JS_ReportErrorASCII(cx, "Expecting a module script"); + return false; + } + + args.rval().setObject(*module); + return true; +} + #if defined(JS_BUILD_BINAST) static bool @@ -4882,7 +5163,11 @@ FinishOffThreadModule(JSContext* cx, unsigned argc, Value* vp) if (!script) return false; - args.rval().setObject(*script->module()); + JSObject* obj = ShellScriptObject::get(cx, script); + if (!obj) + return false; + + args.rval().setObject(*obj); return true; } @@ -6497,9 +6782,9 @@ DumpScopeChain(JSContext* cx, unsigned argc, Value* vp) // where we can store a JSObject*, and create a new object if one doesn't // already exist. // -// Note that EnsureGrayRoot() will automatically blacken the returned object, -// so it will not actually end up marked gray until the following GC clears the -// black bit (assuming nothing is holding onto it.) +// Note that EnsureGrayRoot() will blacken the returned object, so it will not +// actually end up marked gray until the following GC clears the black bit +// (assuming nothing is holding onto it.) // // The idea is that you can set up a whole graph of objects to be marked gray, // hanging off of the object returned from grayRoot(). Then you GC to clear the @@ -6511,17 +6796,6 @@ DumpScopeChain(JSContext* cx, unsigned argc, Value* vp) // getMarks(), in the form of an array of strings with each index corresponding // to the original objects passed to addMarkObservers(). -static ShellCompartmentPrivate* -EnsureShellCompartmentPrivate(JSContext* cx) -{ - auto priv = static_cast(JS_GetCompartmentPrivate(cx->compartment())); - if (!priv) { - priv = cx->new_(); - JS_SetCompartmentPrivate(cx->compartment(), priv); - } - return priv; -} - static bool EnsureGrayRoot(JSContext* cx, unsigned argc, Value* vp) { @@ -6536,7 +6810,11 @@ EnsureGrayRoot(JSContext* cx, unsigned argc, Value* vp) return false; } - args.rval().setObject(*priv->grayRoot); + // Barrier to enforce the invariant that JS does not touch gray objects. + JSObject* obj = priv->grayRoot; + JS::ExposeObjectToActiveJS(obj); + + args.rval().setObject(*obj); return true; } @@ -7159,7 +7437,15 @@ static const JSFunctionSpecWithHelp shell_functions[] = { JS_FN_HELP("parseModule", ParseModule, 1, 0, "parseModule(code)", -" Parses source text as a module and returns a Module object."), +" Parses source text as a module and returns a script object."), + + JS_FN_HELP("instantiateModule", InstantiateModule, 1, 0, +"instantiateModule(moduleScript)", +" Instantiate a module script graph."), + + JS_FN_HELP("evaluateModule", EvaluateModule, 1, 0, +"evaluateModule(moduleScript)", +" Evaluate a previously instantiated module script graph."), JS_FN_HELP("setModuleResolveHook", SetModuleResolveHook, 1, 0, "setModuleResolveHook(function(module, specifier) {})", @@ -7172,6 +7458,18 @@ static const JSFunctionSpecWithHelp shell_functions[] = { " Return any --module-load-path argument passed to the shell. Used by the\n" " module loader.\n"), + JS_FN_HELP("getModuleEnvironmentNames", GetModuleEnvironmentNames, 1, 0, +"getModuleEnvironmentNames(module)", +" Get the list of a module environment's bound names for a specified module.\n"), + + JS_FN_HELP("getModuleEnvironmentValue", GetModuleEnvironmentValue, 2, 0, +"getModuleEnvironmentValue(module, name)", +" Get the value of a bound name in a module environment.\n"), + + JS_FN_HELP("getModuleObject", GetModuleObject, 1, 0, +"getModuleObject(module)", +" Get the internal JS object that holds module metadata for a module script.\n"), + #if defined(JS_BUILD_BINAST) JS_FN_HELP("parseBin", BinParse, 1, 0, @@ -8881,13 +9179,8 @@ SetContextOptions(JSContext* cx, const OptionParser& op) return OptionFailure("ion-scalar-replacement", str); } - if (const char* str = op.getStringOption("ion-shared-stubs")) { - if (strcmp(str, "on") == 0) - jit::JitOptions.disableSharedStubs = false; - else if (strcmp(str, "off") == 0) - jit::JitOptions.disableSharedStubs = true; - else - return OptionFailure("ion-shared-stubs", str); + if (op.getStringOption("ion-shared-stubs")) { + // Dead option, preserved for now for potential fuzzer interaction. } if (const char* str = op.getStringOption("ion-gvn")) { @@ -9611,6 +9904,7 @@ main(int argc, char** argv, char** envp) JS_SetTrustedPrincipals(cx, &ShellPrincipals::fullyTrusted); JS_SetSecurityCallbacks(cx, &ShellPrincipals::securityCallbacks); JS_InitDestroyPrincipalsCallback(cx, ShellPrincipals::destroy); + JS_SetDestroyCompartmentCallback(cx, DestroyShellCompartmentPrivate); JS_AddInterruptCallback(cx, ShellInterruptCallback); JS::SetBuildIdOp(cx, ShellBuildId); diff --git a/js/src/shell/jsshell.h b/js/src/shell/jsshell.h index 9dbdcb8b5358..8964bf951af9 100644 --- a/js/src/shell/jsshell.h +++ b/js/src/shell/jsshell.h @@ -14,6 +14,7 @@ #include "jsapi.h" +#include "gc/WeakMap.h" #include "js/GCVector.h" #include "threading/ConditionVariable.h" #include "threading/LockGuard.h" @@ -175,6 +176,7 @@ struct ShellContext UniquePtr geckoProfilingStack; JS::UniqueChars moduleLoadPath; + UniquePtr markObservers; // Off-thread parse state. diff --git a/js/src/tests/jstests.list b/js/src/tests/jstests.list index e04344adce53..fee51686f4ff 100644 --- a/js/src/tests/jstests.list +++ b/js/src/tests/jstests.list @@ -378,34 +378,23 @@ skip script test262/language/expressions/prefix-decrement/target-cover-newtarget skip script test262/language/expressions/prefix-decrement/non-simple.js skip script test262/language/asi/S7.9_A5.7_T1.js -# Dependent on evalInWorker, setSharedArrayBuffer, and getSharedArrayBuffer; plus: -# https://bugzilla.mozilla.org/show_bug.cgi?id=1349863 - Enable test262 agent tests in browser -# https://bugzilla.mozilla.org/show_bug.cgi?id=1470490 - Rename Atomics.wake to Atomics.notify -skip include test262/built-ins/Atomics/notify/jstests.list -skip script test262/built-ins/Atomics/wait/false-for-timeout-agent.js -skip script test262/built-ins/Atomics/wait/nan-for-timeout.js -skip script test262/built-ins/Atomics/wait/negative-timeout-agent.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-no-operation.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-add.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-and.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-compareExchange.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-exchange.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-or.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-store.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-sub.js -skip script test262/built-ins/Atomics/wait/no-spurious-wakeup-on-xor.js -skip script test262/built-ins/Atomics/wait/null-for-timeout-agent.js -skip script test262/built-ins/Atomics/wait/object-for-timeout-agent.js -skip script test262/built-ins/Atomics/wait/poisoned-object-for-timeout-throws-agent.js -skip script test262/built-ins/Atomics/wait/symbol-for-index-throws-agent.js -skip script test262/built-ins/Atomics/wait/symbol-for-timeout-throws-agent.js -skip script test262/built-ins/Atomics/wait/symbol-for-value-throws-agent.js -skip script test262/built-ins/Atomics/wait/true-for-timeout-agent.js -skip script test262/built-ins/Atomics/wait/undefined-for-timeout.js -skip script test262/built-ins/Atomics/wait/undefined-index-defaults-to-zero.js -skip script test262/built-ins/Atomics/wait/wait-index-value-not-equal.js -skip script test262/built-ins/Atomics/wait/waiterlist-block-indexedposition-wake.js -skip script test262/built-ins/Atomics/wait/waiterlist-order-of-operations-is-fifo.js +# Dependent on evalInWorker, setSharedArrayBuffer, and +# getSharedArrayBuffer, plus the test cases can't actually run in the +# browser even if that were fixed, https://bugzil.la/1349863 +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/negative-timeout.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/was-woken.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/did-timeout.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/good-views.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/no-spurious-wakeup.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wait/nan-timeout.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-all.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-zero.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-negative.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-nan.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-two.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-in-order.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-one.js +skip-if(!xulRuntime.shell) script test262/built-ins/Atomics/wake/wake-all-on-loc.js # https://bugzilla.mozilla.org/show_bug.cgi?id=1346081 skip script test262/intl402/NumberFormat/prototype/format/format-fraction-digits.js diff --git a/js/src/tests/shell/futex.js b/js/src/tests/shell/futex.js index 94373e143283..f3ad8c0a3e9a 100644 --- a/js/src/tests/shell/futex.js +++ b/js/src/tests/shell/futex.js @@ -20,6 +20,8 @@ var hasSharedArrayBuffer = !!(this.SharedArrayBuffer && // Only run if helper threads are available. if (hasSharedArrayBuffer && helperThreadCount() !== 0) { +var mem = new Int32Array(new SharedArrayBuffer(1024)); + //////////////////////////////////////////////////////////// // wait() returns "not-equal" if the value is not the expected one. @@ -55,7 +57,7 @@ dprint("Sleeping for 2 seconds"); sleep(2); dprint("Waking the main thread now"); setSharedObject(null); -assertEq(Atomics.wake(mem, 0, 1), 1); // Can fail spuriously but very unlikely +assertEq(Atomics.notify(mem, 0, 1), 1); // Can fail spuriously but very unlikely `); var then = Date.now(); @@ -66,14 +68,14 @@ assertEq(getSharedObject(), null); // The worker's clearing of the mbx is visibl //////////////////////////////////////////////////////////// -// Test the default argument to atomics.wake() +// Test the default argument to Atomics.notify() setSharedObject(mem.buffer); evalInWorker(` var mem = new Int32Array(getSharedObject()); sleep(2); // Probably long enough to avoid a spurious error next -assertEq(Atomics.wake(mem, 0), 1); // Last argument to wake should default to +Infinity +assertEq(Atomics.notify(mem, 0), 1); // Last argument to notify should default to +Infinity `); var then = Date.now(); diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp index f3b6893a8a6f..ed80e6f0804f 100644 --- a/js/src/vm/ArrayBufferObject.cpp +++ b/js/src/vm/ArrayBufferObject.cpp @@ -479,7 +479,7 @@ NoteViewBufferWasDetached(ArrayBufferViewObject* view, ArrayBufferObject::detach(JSContext* cx, Handle buffer, BufferContents newContents) { - assertSameCompartment(cx, buffer); + cx->check(buffer); MOZ_ASSERT(!buffer->isPreparedForAsmJS()); // When detaching buffers where we don't know all views, the new data must @@ -1350,7 +1350,7 @@ ArrayBufferObject::stealContents(JSContext* cx, Handle buffe // stealContents() is used internally by the impl of memory growth. MOZ_ASSERT_IF(hasStealableContents, buffer->hasStealableContents() || (buffer->isWasm() && !buffer->isPreparedForAsmJS())); - assertSameCompartment(cx, buffer); + cx->check(buffer); BufferContents oldContents = buffer->contents(); @@ -1813,7 +1813,7 @@ JS_DetachArrayBuffer(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); if (!obj->is()) { JS_ReportErrorASCII(cx, "ArrayBuffer object required"); @@ -1930,7 +1930,7 @@ JS_ExternalizeArrayBufferContents(JSContext* cx, HandleObject obj) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, obj); + cx->check(obj); if (!obj->is()) { JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); @@ -1962,7 +1962,7 @@ JS_StealArrayBufferContents(JSContext* cx, HandleObject objArg) { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, objArg); + cx->check(objArg); JSObject* obj = CheckedUnwrap(objArg); if (!obj) @@ -2050,7 +2050,7 @@ JS_GetArrayBufferViewBuffer(JSContext* cx, HandleObject objArg, bool* isSharedMe { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, objArg); + cx->check(objArg); JSObject* obj = CheckedUnwrap(objArg); if (!obj) diff --git a/js/src/vm/BytecodeUtil.cpp b/js/src/vm/BytecodeUtil.cpp index 4b5a69a904ca..369a6b0ca1c3 100644 --- a/js/src/vm/BytecodeUtil.cpp +++ b/js/src/vm/BytecodeUtil.cpp @@ -2100,7 +2100,7 @@ ExpressionDecompiler::decompilePC(const OffsetAndDefIndex& offsetAndDefIndex) bool ExpressionDecompiler::init() { - assertSameCompartment(cx, script); + cx->check(script); return sprinter.init(); } diff --git a/js/src/vm/Debugger.cpp b/js/src/vm/Debugger.cpp index 2d90cfcb6f14..c1581c6a67ee 100644 --- a/js/src/vm/Debugger.cpp +++ b/js/src/vm/Debugger.cpp @@ -688,7 +688,7 @@ Debugger::Debugger(JSContext* cx, NativeObject* dbg) traceLoggerScriptedCallsLastDrainedSize(0), traceLoggerScriptedCallsLastDrainedIteration(0) { - assertSameCompartment(cx, dbg); + cx->check(dbg); #ifdef JS_TRACE_LOGGING TraceLoggerThread* logger = TraceLoggerForCurrentThread(cx); @@ -1154,7 +1154,7 @@ Debugger::wrapEnvironment(JSContext* cx, Handle env, bool Debugger::wrapDebuggeeValue(JSContext* cx, MutableHandleValue vp) { - assertSameCompartment(cx, object.get()); + cx->check(object.get()); if (vp.isObject()) { RootedObject obj(cx, &vp.toObject()); @@ -1282,7 +1282,7 @@ Debugger::unwrapDebuggeeObject(JSContext* cx, MutableHandleObject obj) bool Debugger::unwrapDebuggeeValue(JSContext* cx, MutableHandleValue vp) { - assertSameCompartment(cx, object.get(), vp); + cx->check(object.get(), vp); if (vp.isObject()) { RootedObject dobj(cx, &vp.toObject()); if (!unwrapDebuggeeObject(cx, &dobj)) @@ -1671,8 +1671,8 @@ Debugger::newCompletionValue(JSContext* cx, ResumeMode resumeMode, const Value& { // We must be in the debugger's compartment, since that's where we want // to construct the completion value. - assertSameCompartment(cx, object.get()); - assertSameCompartment(cx, value_); + cx->check(object.get()); + cx->check(value_); RootedId key(cx); RootedValue value(cx, value_); @@ -2346,7 +2346,7 @@ Debugger::slowPathPromiseHook(JSContext* cx, Hook hook, Handle p if (hook == OnNewPromise) ar.emplace(cx, promise); - assertSameCompartment(cx, promise); + cx->check(promise); RootedValue rval(cx); ResumeMode resumeMode = dispatchHook( @@ -5284,7 +5284,7 @@ class DebuggerScriptSetPrivateMatcher NativeObject* Debugger::newDebuggerScript(JSContext* cx, Handle referent) { - assertSameCompartment(cx, object.get()); + cx->check(object.get()); RootedObject proto(cx, &object->getReservedSlot(JSSLOT_DEBUG_SCRIPT_PROTO).toObject()); MOZ_ASSERT(proto); @@ -5304,7 +5304,7 @@ JSObject* Debugger::wrapVariantReferent(JSContext* cx, Map& map, Handle key, Handle referent) { - assertSameCompartment(cx, object); + cx->check(object); Handle untaggedReferent = referent.template as(); MOZ_ASSERT(cx->compartment() != untaggedReferent->compartment()); @@ -7078,7 +7078,7 @@ class SetDebuggerSourcePrivateMatcher NativeObject* Debugger::newDebuggerSource(JSContext* cx, Handle referent) { - assertSameCompartment(cx, object.get()); + cx->check(object.get()); RootedObject proto(cx, &object->getReservedSlot(JSSLOT_DEBUG_SOURCE_PROTO).toObject()); MOZ_ASSERT(proto); @@ -8031,7 +8031,7 @@ EvaluateInEnv(JSContext* cx, Handle env, AbstractFramePtr frame, mozilla::Range chars, const char* filename, unsigned lineno, MutableHandleValue rval) { - assertSameCompartment(cx, env, frame); + cx->check(env, frame); CompileOptions options(cx); options.setIsRunOnce(true) diff --git a/js/src/vm/EnvironmentObject.cpp b/js/src/vm/EnvironmentObject.cpp index 27579dc8dc99..5e2aab337f15 100644 --- a/js/src/vm/EnvironmentObject.cpp +++ b/js/src/vm/EnvironmentObject.cpp @@ -227,7 +227,7 @@ CallObject* CallObject::create(JSContext* cx, AbstractFramePtr frame) { MOZ_ASSERT(frame.isFunctionFrame()); - assertSameCompartment(cx, frame); + cx->check(frame); RootedObject envChain(cx, frame.environmentChain()); RootedFunction callee(cx, frame.callee()); @@ -913,7 +913,7 @@ LexicalEnvironmentObject::createTemplateObject(JSContext* cx, HandleShape shape, LexicalEnvironmentObject::create(JSContext* cx, Handle scope, HandleObject enclosing, gc::InitialHeap heap) { - assertSameCompartment(cx, enclosing); + cx->check(enclosing); MOZ_ASSERT(scope->hasEnvironment()); RootedShape shape(cx, scope->environmentShape()); @@ -1249,7 +1249,7 @@ EnvironmentIter::EnvironmentIter(JSContext* cx, AbstractFramePtr frame, jsbyteco env_(cx, frame.environmentChain()), frame_(frame) { - assertSameCompartment(cx, frame); + cx->check(frame); settle(); MOZ_GUARD_OBJECT_NOTIFIER_INIT; } @@ -1260,7 +1260,7 @@ EnvironmentIter::EnvironmentIter(JSContext* cx, JSObject* env, Scope* scope, Abs env_(cx, env), frame_(frame) { - assertSameCompartment(cx, frame); + cx->check(frame); settle(); MOZ_GUARD_OBJECT_NOTIFIER_INIT; } @@ -2705,7 +2705,7 @@ DebugEnvironments::takeFrameSnapshot(JSContext* cx, Handlecheck(frame); DebugEnvironments* envs = cx->realm()->debugEnvs(); if (!envs) @@ -2747,7 +2747,7 @@ DebugEnvironments::onPopCall(JSContext* cx, AbstractFramePtr frame) void DebugEnvironments::onPopLexical(JSContext* cx, AbstractFramePtr frame, jsbytecode* pc) { - assertSameCompartment(cx, frame); + cx->check(frame); DebugEnvironments* envs = cx->realm()->debugEnvs(); if (!envs) @@ -2795,7 +2795,7 @@ DebugEnvironments::onPopLexical(JSContext* cx, const EnvironmentIter& ei) void DebugEnvironments::onPopVar(JSContext* cx, AbstractFramePtr frame, jsbytecode* pc) { - assertSameCompartment(cx, frame); + cx->check(frame); DebugEnvironments* envs = cx->realm()->debugEnvs(); if (!envs) @@ -3115,7 +3115,7 @@ GetDebugEnvironment(JSContext* cx, const EnvironmentIter& ei) JSObject* js::GetDebugEnvironmentForFunction(JSContext* cx, HandleFunction fun) { - assertSameCompartment(cx, fun); + cx->check(fun); MOZ_ASSERT(CanUseDebugEnvironmentMaps(cx)); if (!DebugEnvironments::updateLiveEnvironments(cx)) return nullptr; @@ -3129,7 +3129,7 @@ js::GetDebugEnvironmentForFunction(JSContext* cx, HandleFunction fun) JSObject* js::GetDebugEnvironmentForFrame(JSContext* cx, AbstractFramePtr frame, jsbytecode* pc) { - assertSameCompartment(cx, frame); + cx->check(frame); if (CanUseDebugEnvironmentMaps(cx) && !DebugEnvironments::updateLiveEnvironments(cx)) return nullptr; @@ -3155,7 +3155,7 @@ js::CreateObjectsForEnvironmentChain(JSContext* cx, AutoObjectVector& chain, { #ifdef DEBUG for (size_t i = 0; i < chain.length(); ++i) { - assertSameCompartment(cx, chain[i]); + cx->check(chain[i]); MOZ_ASSERT(!chain[i]->is() && !chain[i]->is()); } diff --git a/js/src/vm/ErrorObject.cpp b/js/src/vm/ErrorObject.cpp index d8e202a7be1b..5235a3956d7f 100644 --- a/js/src/vm/ErrorObject.cpp +++ b/js/src/vm/ErrorObject.cpp @@ -46,7 +46,7 @@ js::ErrorObject::init(JSContext* cx, Handle obj, JSExnType type, HandleString message) { AssertObjectIsSavedFrameOrWrapper(cx, stack); - assertSameCompartment(cx, obj, stack); + cx->check(obj, stack); // Null out early in case of error, for exn_finalize's sake. obj->initReservedSlot(ERROR_REPORT_SLOT, PrivateValue(nullptr)); diff --git a/js/src/vm/GlobalObject.cpp b/js/src/vm/GlobalObject.cpp index 4bf8d169c898..707b59b38a41 100644 --- a/js/src/vm/GlobalObject.cpp +++ b/js/src/vm/GlobalObject.cpp @@ -816,7 +816,7 @@ GlobalObject::getDebuggers() const /* static */ GlobalObject::DebuggerVector* GlobalObject::getOrCreateDebuggers(JSContext* cx, Handle global) { - assertSameCompartment(cx, global); + cx->check(global); DebuggerVector* debuggers = global->getDebuggers(); if (debuggers) return debuggers; @@ -835,7 +835,7 @@ GlobalObject::getOrCreateDebuggers(JSContext* cx, Handle global) /* static */ NativeObject* GlobalObject::getOrCreateForOfPICObject(JSContext* cx, Handle global) { - assertSameCompartment(cx, global); + cx->check(global); NativeObject* forOfPIC = global->getForOfPICObject(); if (forOfPIC) return forOfPIC; diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp index 5fff407b7fb7..be76be195d67 100644 --- a/js/src/vm/HelperThreads.cpp +++ b/js/src/vm/HelperThreads.cpp @@ -279,7 +279,7 @@ IonBuilderMatches(const CompilationSelector& selector, jit::IonBuilder* builder) bool match(JSScript* script) { return script == builder_->script(); } bool match(Realm* realm) { return realm == builder_->script()->realm(); } - bool match(Zone* zone) { return zone == builder_->script()->zone(); } + bool match(Zone* zone) { return zone == builder_->script()->zoneFromAnyThread(); } bool match(JSRuntime* runtime) { return runtime == builder_->script()->runtimeFromAnyThread(); } bool match(AllCompilations all) { return true; } bool match(ZonesInState zbs) { @@ -1665,7 +1665,7 @@ GlobalHelperThreadState::finishParseTask(JSContext* cx, ParseTaskKind kind, bool ok = finishCallback(parseTask.get().get()); for (auto& script : parseTask->scripts) - releaseAssertSameCompartment(cx, script); + cx->releaseCheck(script); if (!parseTask->finish(cx) || !ok) return false; diff --git a/js/src/vm/Interpreter-inl.h b/js/src/vm/Interpreter-inl.h index d12e933047b3..6f5e023fc69a 100644 --- a/js/src/vm/Interpreter-inl.h +++ b/js/src/vm/Interpreter-inl.h @@ -527,7 +527,7 @@ GetObjectElementOperation(JSContext* cx, JSOp op, JS::HandleObject obj, JS::Hand return false; } while (false); - assertSameCompartmentDebugOnly(cx, res); + cx->debugOnlyCheck(res); return true; } @@ -574,7 +574,7 @@ GetPrimitiveElementOperation(JSContext* cx, JSOp op, JS::HandleValue receiver, return false; } while (false); - assertSameCompartmentDebugOnly(cx, res); + cx->debugOnlyCheck(res); return true; } diff --git a/js/src/vm/Interpreter.cpp b/js/src/vm/Interpreter.cpp index 0355a673291a..9666f3bd50db 100644 --- a/js/src/vm/Interpreter.cpp +++ b/js/src/vm/Interpreter.cpp @@ -442,13 +442,13 @@ CallJSNative(JSContext* cx, Native native, const CallArgs& args) #ifdef DEBUG bool alreadyThrowing = cx->isExceptionPending(); #endif - assertSameCompartment(cx, args); + cx->check(args); MOZ_ASSERT(!args.callee().is()); AutoRealm ar(cx, &args.callee()); bool ok = native(cx, args.length(), args.base()); if (ok) { - assertSameCompartment(cx, args.rval()); + cx->check(args.rval()); MOZ_ASSERT_IF(!alreadyThrowing, !cx->isExceptionPending()); } return ok; @@ -801,7 +801,7 @@ js::Execute(JSContext* cx, HandleScript script, JSObject& envChainArg, Value* rv #ifdef DEBUG JSObject* s = envChain; do { - assertSameCompartment(cx, s); + cx->check(s); MOZ_ASSERT_IF(!s->enclosingEnvironment(), s->is()); } while ((s = s->enclosingEnvironment())); #endif @@ -1503,7 +1503,7 @@ HandleError(JSContext* cx, InterpreterRegs& regs) } #define REGS (activation.regs()) -#define PUSH_COPY(v) do { *REGS.sp++ = (v); assertSameCompartmentDebugOnly(cx, REGS.sp[-1]); } while (0) +#define PUSH_COPY(v) do { *REGS.sp++ = (v); cx->debugOnlyCheck(REGS.sp[-1]); } while (0) #define PUSH_COPY_SKIP_CHECK(v) *REGS.sp++ = (v) #define PUSH_NULL() REGS.sp++->setNull() #define PUSH_UNDEFINED() REGS.sp++->setUndefined() @@ -1511,9 +1511,9 @@ HandleError(JSContext* cx, InterpreterRegs& regs) #define PUSH_DOUBLE(d) REGS.sp++->setDouble(d) #define PUSH_INT32(i) REGS.sp++->setInt32(i) #define PUSH_SYMBOL(s) REGS.sp++->setSymbol(s) -#define PUSH_STRING(s) do { REGS.sp++->setString(s); assertSameCompartmentDebugOnly(cx, REGS.sp[-1]); } while (0) -#define PUSH_OBJECT(obj) do { REGS.sp++->setObject(obj); assertSameCompartmentDebugOnly(cx, REGS.sp[-1]); } while (0) -#define PUSH_OBJECT_OR_NULL(obj) do { REGS.sp++->setObjectOrNull(obj); assertSameCompartmentDebugOnly(cx, REGS.sp[-1]); } while (0) +#define PUSH_STRING(s) do { REGS.sp++->setString(s); cx->debugOnlyCheck(REGS.sp[-1]); } while (0) +#define PUSH_OBJECT(obj) do { REGS.sp++->setObject(obj); cx->debugOnlyCheck(REGS.sp[-1]); } while (0) +#define PUSH_OBJECT_OR_NULL(obj) do { REGS.sp++->setObjectOrNull(obj); cx->debugOnlyCheck(REGS.sp[-1]); } while (0) #define PUSH_MAGIC(magic) REGS.sp++->setMagic(magic) #define POP_COPY_TO(v) (v) = *--REGS.sp #define POP_RETURN_VALUE() REGS.fp()->setReturnValue(*--REGS.sp) @@ -2961,7 +2961,7 @@ CASE(JSOP_CALLPROP) goto error; TypeScript::Monitor(cx, script, REGS.pc, lval); - assertSameCompartmentDebugOnly(cx, lval); + cx->debugOnlyCheck(lval); } END_CASE(JSOP_GETPROP) @@ -2975,7 +2975,7 @@ CASE(JSOP_GETPROP_SUPER) goto error; TypeScript::Monitor(cx, script, REGS.pc, rref); - assertSameCompartmentDebugOnly(cx, rref); + cx->debugOnlyCheck(rref); REGS.sp--; } @@ -2990,7 +2990,7 @@ CASE(JSOP_GETBOUNDNAME) goto error; TypeScript::Monitor(cx, script, REGS.pc, rval); - assertSameCompartmentDebugOnly(cx, rval); + cx->debugOnlyCheck(rval); } END_CASE(JSOP_GETBOUNDNAME) @@ -3706,7 +3706,7 @@ CASE(JSOP_GETLOCAL) * a use of the variable. */ if (REGS.pc[JSOP_GETLOCAL_LENGTH] != JSOP_POP) - assertSameCompartmentDebugOnly(cx, REGS.sp[-1]); + cx->debugOnlyCheck(REGS.sp[-1]); } END_CASE(JSOP_GETLOCAL) diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp index dce3e99be12c..c8c5207d5969 100644 --- a/js/src/vm/Iteration.cpp +++ b/js/src/vm/Iteration.cpp @@ -935,7 +935,7 @@ js::GetIterator(JSContext* cx, HandleObject obj) return nullptr; PropertyIteratorObject* iterobj = &res->as(); - assertSameCompartment(cx, iterobj); + cx->check(iterobj); // Cache the iterator object. if (numGuards > 0) { diff --git a/js/src/vm/JSContext-inl.h b/js/src/vm/JSContext-inl.h index 5cc586b21c54..8fd37221bb83 100644 --- a/js/src/vm/JSContext-inl.h +++ b/js/src/vm/JSContext-inl.h @@ -20,38 +20,56 @@ namespace js { -class CompartmentChecker +class ContextChecks { - JS::Compartment* compartment; + JSContext* cx; + + JS::Realm* realm() const { + return cx->realm(); + } + JS::Compartment* compartment() const { + return cx->compartment(); + } + JS::Zone* zone() const { + return cx->zone(); + } public: - explicit CompartmentChecker(JSContext* cx) - : compartment(cx->compartment()) + explicit ContextChecks(JSContext* cx) + : cx(cx) { } /* - * Set a breakpoint here (break js::CompartmentChecker::fail) to debug - * compartment mismatches. + * Set a breakpoint here (break js::ContextChecks::fail) to debug + * realm/compartment/zone mismatches. */ + static void fail(JS::Realm* r1, JS::Realm* r2, int argIndex) { + MOZ_CRASH_UNSAFE_PRINTF("*** Realm mismatch %p vs. %p at argument %d\n", + r1, r2, argIndex); + } static void fail(JS::Compartment* c1, JS::Compartment* c2, int argIndex) { MOZ_CRASH_UNSAFE_PRINTF("*** Compartment mismatch %p vs. %p at argument %d\n", - (void*) c1, (void*) c2, argIndex); + c1, c2, argIndex); } - static void fail(JS::Zone* z1, JS::Zone* z2, int argIndex) { MOZ_CRASH_UNSAFE_PRINTF("*** Zone mismatch %p vs. %p at argument %d\n", - (void*) z1, (void*) z2, argIndex); + z1, z2, argIndex); + } + + void check(JS::Realm* r, int argIndex) { + if (r && r != realm()) + fail(realm(), r, argIndex); } void check(JS::Compartment* c, int argIndex) { - if (c && c != compartment) - fail(compartment, c, argIndex); + if (c && c != compartment()) + fail(compartment(), c, argIndex); } - void checkZone(JS::Zone* z, int argIndex) { - if (compartment && z != compartment->zone()) - fail(compartment->zone(), z, argIndex); + void check(JS::Zone* z, int argIndex) { + if (zone() && z != zone()) + fail(zone(), z, argIndex); } void check(JSObject* obj, int argIndex) { @@ -62,21 +80,6 @@ class CompartmentChecker } } - template - void check(const Rooted& rooted, int argIndex) { - check(rooted.get(), argIndex); - } - - template - void check(Handle handle, int argIndex) { - check(handle.get(), argIndex); - } - - template - void check(MutableHandle handle, int argIndex) { - check(handle.get(), argIndex); - } - template void checkAtom(T* thing, int argIndex) { static_assert(mozilla::IsSame::value || @@ -86,11 +89,10 @@ class CompartmentChecker #ifdef DEBUG // Atoms which move across zone boundaries need to be marked in the new // zone, see JS_MarkCrossZoneId. - if (compartment) { - JSRuntime* rt = compartment->runtimeFromAnyThread(); - if (!rt->gc.atomMarking.atomIsMarked(compartment->zone(), thing)) { + if (zone()) { + if (!cx->runtime()->gc.atomMarking.atomIsMarked(zone(), thing)) { MOZ_CRASH_UNSAFE_PRINTF("*** Atom not marked for zone %p at argument %d\n", - compartment->zone(), argIndex); + zone(), argIndex); } } #endif @@ -101,7 +103,7 @@ class CompartmentChecker if (str->isAtom()) checkAtom(&str->asAtom(), argIndex); else - checkZone(str->zone(), argIndex); + check(str->zone(), argIndex); } void check(JS::Symbol* symbol, int argIndex) { @@ -153,10 +155,9 @@ class CompartmentChecker void check(JSScript* script, int argIndex) { MOZ_ASSERT(JS::CellIsNotGray(script)); if (script) - check(script->compartment(), argIndex); + check(script->realm(), argIndex); } - void check(InterpreterFrame* fp, int argIndex); void check(AbstractFramePtr frame, int argIndex); void check(Handle desc, int argIndex) { @@ -173,89 +174,41 @@ class CompartmentChecker } }; -/* - * Don't perform these checks when called from a finalizer. The checking - * depends on other objects not having been swept yet. - */ -#define START_ASSERT_SAME_COMPARTMENT() \ - if (JS::RuntimeHeapIsCollecting()) \ - return; \ - CompartmentChecker c(cx) +} // namespace js -template inline void -releaseAssertSameCompartment(JSContext* cx, const T1& t1) +template inline void +JSContext::checkImpl(int argIndex, const Head& head, const Tail&... tail) { - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); + js::ContextChecks(this).check(head, argIndex); + checkImpl(argIndex + 1, tail...); } -template inline void -assertSameCompartment(JSContext* cx, const T1& t1) +template inline void +JSContext::check(const Args&... args) { #ifdef JS_CRASH_DIAGNOSTICS - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); + if (contextChecksEnabled()) + checkImpl(0, args...); #endif } -template inline void -assertSameCompartmentDebugOnly(JSContext* cx, const T1& t1) +template inline void +JSContext::releaseCheck(const Args&... args) +{ + if (contextChecksEnabled()) + checkImpl(0, args...); +} + +template MOZ_ALWAYS_INLINE void +JSContext::debugOnlyCheck(const Args&... args) { #if defined(DEBUG) && defined(JS_CRASH_DIAGNOSTICS) - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); + if (contextChecksEnabled()) + checkImpl(0, args...); #endif } -template inline void -assertSameCompartment(JSContext* cx, const T1& t1, const T2& t2) -{ -#ifdef JS_CRASH_DIAGNOSTICS - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); - c.check(t2, 3); -#endif -} - -template inline void -assertSameCompartment(JSContext* cx, const T1& t1, const T2& t2, const T3& t3) -{ -#ifdef JS_CRASH_DIAGNOSTICS - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); - c.check(t2, 3); - c.check(t3, 4); -#endif -} - -template inline void -assertSameCompartment(JSContext* cx, - const T1& t1, const T2& t2, const T3& t3, const T4& t4) -{ -#ifdef JS_CRASH_DIAGNOSTICS - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); - c.check(t2, 3); - c.check(t3, 4); - c.check(t4, 5); -#endif -} - -template inline void -assertSameCompartment(JSContext* cx, - const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) -{ -#ifdef JS_CRASH_DIAGNOSTICS - START_ASSERT_SAME_COMPARTMENT(); - c.check(t1, 2); - c.check(t2, 3); - c.check(t3, 4); - c.check(t4, 5); - c.check(t5, 6); -#endif -} - -#undef START_ASSERT_SAME_COMPARTMENT +namespace js { STATIC_PRECONDITION_ASSUME(ubound(args.argv_) >= argc) MOZ_ALWAYS_INLINE bool @@ -264,10 +217,10 @@ CallNativeImpl(JSContext* cx, NativeImpl impl, const CallArgs& args) #ifdef DEBUG bool alreadyThrowing = cx->isExceptionPending(); #endif - assertSameCompartment(cx, args); + cx->check(args); bool ok = impl(cx, args); if (ok) { - assertSameCompartment(cx, args.rval()); + cx->check(args.rval()); MOZ_ASSERT_IF(!alreadyThrowing, !cx->isExceptionPending()); } return ok; @@ -280,10 +233,10 @@ CallJSGetterOp(JSContext* cx, GetterOp op, HandleObject obj, HandleId id, if (!CheckRecursionLimit(cx)) return false; - assertSameCompartment(cx, obj, id, vp); + cx->check(obj, id, vp); bool ok = op(cx, obj, id, vp); if (ok) - assertSameCompartment(cx, vp); + cx->check(vp); return ok; } @@ -294,7 +247,7 @@ CallJSSetterOp(JSContext* cx, SetterOp op, HandleObject obj, HandleId id, Handle if (!CheckRecursionLimit(cx)) return false; - assertSameCompartment(cx, obj, id, v); + cx->check(obj, id, v); return op(cx, obj, id, v, result); } @@ -305,7 +258,7 @@ CallJSAddPropertyOp(JSContext* cx, JSAddPropertyOp op, HandleObject obj, HandleI if (!CheckRecursionLimit(cx)) return false; - assertSameCompartment(cx, obj, id, v); + cx->check(obj, id, v); return op(cx, obj, id, v); } @@ -316,7 +269,7 @@ CallJSDeletePropertyOp(JSContext* cx, JSDeletePropertyOp op, HandleObject receiv if (!CheckRecursionLimit(cx)) return false; - assertSameCompartment(cx, receiver, id); + cx->check(receiver, id); if (op) return op(cx, receiver, id, result); return result.succeed(); @@ -388,9 +341,7 @@ JSContext::setPendingException(JS::HandleValue v) this->overRecursed_ = false; this->throwing = true; this->unwrappedException() = v; - // We don't use assertSameCompartment here to allow - // js::SetPendingExceptionCrossContext to work. - MOZ_ASSERT_IF(v.isObject(), v.toObject().compartment() == compartment()); + check(v); } inline bool diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp index 08c34634bad9..ffb7f4c6b2bd 100644 --- a/js/src/vm/JSContext.cpp +++ b/js/src/vm/JSContext.cpp @@ -1354,7 +1354,7 @@ JSContext::getPendingException(MutableHandleValue rval) clearPendingException(); if (!compartment()->wrap(this, rval)) return false; - assertSameCompartment(this, rval); + this->check(rval); setPendingException(rval); overRecursed_ = wasOverRecursed; return true; @@ -1562,17 +1562,10 @@ JS::AutoCheckRequestDepth::~AutoCheckRequestDepth() #ifdef JS_CRASH_DIAGNOSTICS void -CompartmentChecker::check(InterpreterFrame* fp, int argIndex) -{ - if (fp) - check(fp->environmentChain(), argIndex); -} - -void -CompartmentChecker::check(AbstractFramePtr frame, int argIndex) +ContextChecks::check(AbstractFramePtr frame, int argIndex) { if (frame) - check(frame.environmentChain(), argIndex); + check(frame.realm(), argIndex); } #endif diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h index 7cf32c3b9d80..8bef2eb5d839 100644 --- a/js/src/vm/JSContext.h +++ b/js/src/vm/JSContext.h @@ -954,6 +954,26 @@ struct JSContext : public JS::RootingContext, js::HandleObject incumbentGlobal); void addUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise); void removeUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise); + + private: + // Base case for the recursive function below. + inline void checkImpl(int argIndex) {} + + template + inline void checkImpl(int argIndex, const Head& head, const Tail&... tail); + + bool contextChecksEnabled() const { + // Don't perform these checks when called from a finalizer. The checking + // depends on other objects not having been swept yet. + return !RuntimeHeapIsCollecting(runtime()->heapState()); + } + + public: + // Assert the arguments are in this context's realm (for scripts), + // compartment (for objects) or zone (for strings, symbols). + template inline void check(const Args&... args); + template inline void releaseCheck(const Args&... args); + template MOZ_ALWAYS_INLINE void debugOnlyCheck(const Args&... args); }; /* struct JSContext */ inline JS::Result<> diff --git a/js/src/vm/JSFunction.cpp b/js/src/vm/JSFunction.cpp index 422cbb36162e..516c5c7c4539 100644 --- a/js/src/vm/JSFunction.cpp +++ b/js/src/vm/JSFunction.cpp @@ -731,7 +731,7 @@ bool JS::OrdinaryHasInstance(JSContext* cx, HandleObject objArg, HandleValue v, bool* bp) { AssertHeapIsIdle(); - assertSameCompartment(cx, objArg, v); + cx->check(objArg, v); RootedObject obj(cx, objArg); diff --git a/js/src/vm/JSObject-inl.h b/js/src/vm/JSObject-inl.h index 530a79a954fe..daf874941088 100644 --- a/js/src/vm/JSObject-inl.h +++ b/js/src/vm/JSObject-inl.h @@ -817,7 +817,7 @@ InitClass(JSContext* cx, HandleObject obj, HandleObject parent_proto, MOZ_ALWAYS_INLINE const char* GetObjectClassName(JSContext* cx, HandleObject obj) { - assertSameCompartment(cx, obj); + cx->check(obj); if (obj->is()) return Proxy::className(cx, obj); diff --git a/js/src/vm/JSObject.cpp b/js/src/vm/JSObject.cpp index ebf37f376d31..c6887baf0821 100644 --- a/js/src/vm/JSObject.cpp +++ b/js/src/vm/JSObject.cpp @@ -140,7 +140,7 @@ JS::FromPropertyDescriptor(JSContext* cx, Handle desc, Mutab { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, desc); + cx->check(desc); // Step 1. if (!desc.object()) { @@ -476,7 +476,7 @@ GetSealedOrFrozenAttributes(unsigned attrs, IntegrityLevel level) bool js::SetIntegrityLevel(JSContext* cx, HandleObject obj, IntegrityLevel level) { - assertSameCompartment(cx, obj); + cx->check(obj); // Steps 3-5. (Steps 1-2 are redundant assertions.) if (!PreventExtensions(cx, obj)) @@ -1139,7 +1139,7 @@ JS_CopyPropertyFrom(JSContext* cx, HandleId id, HandleObject target, HandleObject obj, PropertyCopyBehavior copyBehavior) { // |obj| and |cx| are generally not same-compartment with |target| here. - assertSameCompartment(cx, obj, id); + cx->check(obj, id); Rooted desc(cx); if (!GetOwnPropertyDescriptor(cx, obj, id, &desc)) @@ -1376,7 +1376,7 @@ InitializePropertiesFromCompatibleNativeObject(JSContext* cx, HandleNativeObject dst, HandleNativeObject src) { - assertSameCompartment(cx, src, dst); + cx->check(src, dst); MOZ_ASSERT(src->getClass() == dst->getClass()); MOZ_ASSERT(dst->lastProperty()->getObjectFlags() == 0); MOZ_ASSERT(!src->isSingleton()); @@ -1445,7 +1445,7 @@ js::XDRObjectLiteral(XDRState* xdr, MutableHandleObject obj) /* NB: Keep this in sync with DeepCloneObjectLiteral. */ JSContext* cx = xdr->cx(); - assertSameCompartment(cx, obj); + cx->check(obj); // Distinguish between objects and array classes. uint32_t isArray = 0; diff --git a/js/src/vm/JSScript.cpp b/js/src/vm/JSScript.cpp index 5baa753ddf1e..f9811ad6fd8e 100644 --- a/js/src/vm/JSScript.cpp +++ b/js/src/vm/JSScript.cpp @@ -1386,7 +1386,7 @@ ScriptSourceObject::create(JSContext* cx, ScriptSource* source) ScriptSourceObject::initFromOptions(JSContext* cx, HandleScriptSourceObject source, const ReadOnlyCompileOptions& options) { - releaseAssertSameCompartment(cx, source); + cx->releaseCheck(source); MOZ_ASSERT(source->getReservedSlot(ELEMENT_SLOT).isMagic(JS_GENERIC_MAGIC)); MOZ_ASSERT(source->getReservedSlot(ELEMENT_PROPERTY_SLOT).isMagic(JS_GENERIC_MAGIC)); MOZ_ASSERT(source->getReservedSlot(INTRODUCTION_SCRIPT_SLOT).isMagic(JS_GENERIC_MAGIC)); @@ -2753,7 +2753,7 @@ JSScript::partiallyInit(JSContext* cx, HandleScript script, uint32_t nscopes, uint32_t nconsts, uint32_t nobjects, uint32_t ntrynotes, uint32_t nscopenotes, uint32_t nyieldoffsets, uint32_t nTypeSets) { - assertSameCompartment(cx, script); + cx->check(script); size_t size = ScriptDataSize(nscopes, nconsts, nobjects, ntrynotes, nscopenotes, nyieldoffsets); @@ -3824,7 +3824,7 @@ JSScript::setNewStepMode(FreeOp* fop, uint32_t newValue) bool JSScript::incrementStepModeCount(JSContext* cx) { - assertSameCompartment(cx, this); + cx->check(this); MOZ_ASSERT(cx->realm()->isDebuggee()); AutoRealm ar(cx, this); @@ -4282,7 +4282,7 @@ LazyScript::CreateRaw(JSContext* cx, HandleFunction fun, uint64_t packedFields, uint32_t sourceStart, uint32_t sourceEnd, uint32_t toStringStart, uint32_t lineno, uint32_t column) { - assertSameCompartment(cx, fun); + cx->check(fun); MOZ_ASSERT(sourceObject); union { diff --git a/js/src/vm/PIC.cpp b/js/src/vm/PIC.cpp index 883fdb4e93e1..eb0fd5c365de 100644 --- a/js/src/vm/PIC.cpp +++ b/js/src/vm/PIC.cpp @@ -281,7 +281,7 @@ const Class ForOfPIC::class_ = { /* static */ NativeObject* js::ForOfPIC::createForOfPICObject(JSContext* cx, Handle global) { - assertSameCompartment(cx, global); + cx->check(global); NativeObject* obj = NewNativeObjectWithGivenProto(cx, &ForOfPIC::class_, nullptr); if (!obj) return nullptr; diff --git a/js/src/vm/Realm.cpp b/js/src/vm/Realm.cpp index 6b42c8c4e1a3..5f2cf1d567ee 100644 --- a/js/src/vm/Realm.cpp +++ b/js/src/vm/Realm.cpp @@ -642,12 +642,12 @@ void Realm::setNewObjectMetadata(JSContext* cx, HandleObject obj) { MOZ_ASSERT(obj->maybeCCWRealm() == this); - assertSameCompartment(cx, compartment(), obj); + cx->check(compartment(), obj); AutoEnterOOMUnsafeRegion oomUnsafe; if (JSObject* metadata = allocationMetadataBuilder_->build(cx, obj, oomUnsafe)) { MOZ_ASSERT(metadata->maybeCCWRealm() == obj->maybeCCWRealm()); - assertSameCompartment(cx, metadata); + cx->check(metadata); if (!objects_.objectMetadataTable) { auto table = cx->make_unique(cx); diff --git a/js/src/vm/RegExpObject.cpp b/js/src/vm/RegExpObject.cpp index 80aba2331b64..17587be43721 100644 --- a/js/src/vm/RegExpObject.cpp +++ b/js/src/vm/RegExpObject.cpp @@ -287,7 +287,7 @@ RegExpObject::createShared(JSContext* cx, Handle regexp) { MOZ_ASSERT(!regexp->hasShared()); RootedAtom source(cx, regexp->getSource()); - RegExpShared* shared = cx->zone()->regExps.get(cx, source, regexp->getFlags()); + RegExpShared* shared = cx->zone()->regExps().get(cx, source, regexp->getFlags()); if (!shared) return nullptr; diff --git a/js/src/vm/RegExpShared.h b/js/src/vm/RegExpShared.h index 6348221deda6..6f935a670cfe 100644 --- a/js/src/vm/RegExpShared.h +++ b/js/src/vm/RegExpShared.h @@ -19,6 +19,7 @@ #include "gc/Barrier.h" #include "gc/Heap.h" #include "gc/Marking.h" +#include "gc/Zone.h" #include "js/AllocPolicy.h" #include "js/UbiNode.h" #include "js/Vector.h" diff --git a/js/src/vm/RegExpStatics.cpp b/js/src/vm/RegExpStatics.cpp index ba84f1245dec..8a5c34e36c4b 100644 --- a/js/src/vm/RegExpStatics.cpp +++ b/js/src/vm/RegExpStatics.cpp @@ -82,7 +82,7 @@ RegExpStatics::executeLazy(JSContext* cx) /* Retrieve or create the RegExpShared in this zone. */ RootedAtom source(cx, lazySource); - RootedRegExpShared shared(cx, cx->zone()->regExps.get(cx, source, lazyFlags)); + RootedRegExpShared shared(cx, cx->zone()->regExps().get(cx, source, lazyFlags)); if (!shared) return false; diff --git a/js/src/vm/Runtime.cpp b/js/src/vm/Runtime.cpp index 28f0b7fd8a6a..1b07361fe046 100644 --- a/js/src/vm/Runtime.cpp +++ b/js/src/vm/Runtime.cpp @@ -499,7 +499,7 @@ JSContext::requestInterrupt(InterruptReason reason) // not regularly polled. FutexThread::lock(); if (fx.isWaiting()) - fx.wake(FutexThread::WakeForJSInterrupt); + fx.notify(FutexThread::NotifyForJSInterrupt); fx.unlock(); wasm::InterruptRunningCode(this); } diff --git a/js/src/vm/SavedStacks.cpp b/js/src/vm/SavedStacks.cpp index 77480a12cf1e..74c2bde2517f 100644 --- a/js/src/vm/SavedStacks.cpp +++ b/js/src/vm/SavedStacks.cpp @@ -554,7 +554,7 @@ SavedFrame::initFromLookup(JSContext* cx, SavedFrame::HandleLookup lookup) SavedFrame::create(JSContext* cx) { RootedGlobalObject global(cx, cx->global()); - assertSameCompartment(cx, global); + cx->check(global); // Ensure that we don't try to capture the stack again in the // `SavedStacksMetadataBuilder` for this new SavedFrame object, and @@ -564,7 +564,7 @@ SavedFrame::create(JSContext* cx) RootedNativeObject proto(cx, GlobalObject::getOrCreateSavedFramePrototype(cx, global)); if (!proto) return nullptr; - assertSameCompartment(cx, proto); + cx->check(proto); return NewObjectWithGivenProto(cx, proto, TenuredObject); } @@ -1089,7 +1089,7 @@ BuildStackString(JSContext* cx, JSPrincipals* principals, HandleObject stack, JSString* str = sb.finishString(); if (!str) return false; - assertSameCompartment(cx, str); + cx->check(str); stringp.set(str); return true; } @@ -1699,7 +1699,7 @@ SavedStacks::getLocation(JSContext* cx, const FrameIter& iter, // the cache because our compartment's sweep method isn't called when their // compartment gets collected. MOZ_DIAGNOSTIC_ASSERT(&cx->realm()->savedStacks() == this); - assertSameCompartment(cx, iter.compartment()); + cx->check(iter.compartment()); // When we have a |JSScript| for this frame, use a potentially memoized // location from our PCLocationMap and copy it into |locationp|. When we do diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp index fa4887afc382..fb8f208c6fe2 100644 --- a/js/src/vm/SelfHosting.cpp +++ b/js/src/vm/SelfHosting.cpp @@ -1559,7 +1559,7 @@ intrinsic_SetOverlappingTypedElements(JSContext* cx, unsigned argc, Value* vp) MOZ_ASSERT(args.length() == 3); Rooted target(cx, &args[0].toObject().as()); - assertSameCompartment(cx, target); + cx->check(target); MOZ_ASSERT(!target->hasDetachedBuffer(), "shouldn't set elements if underlying buffer is detached"); diff --git a/js/src/vm/StringType.cpp b/js/src/vm/StringType.cpp index e48f578f392a..ae01b75ec7df 100644 --- a/js/src/vm/StringType.cpp +++ b/js/src/vm/StringType.cpp @@ -2186,7 +2186,7 @@ js::ValueToSource(JSContext* cx, HandleValue v) { if (!CheckRecursionLimit(cx)) return nullptr; - assertSameCompartment(cx, v); + cx->check(v); if (v.isUndefined()) return cx->names().void0; diff --git a/js/src/vm/StructuredClone.cpp b/js/src/vm/StructuredClone.cpp index 919780ac85b9..a474b739add5 100644 --- a/js/src/vm/StructuredClone.cpp +++ b/js/src/vm/StructuredClone.cpp @@ -1640,7 +1640,7 @@ JSStructuredCloneWriter::traverseSavedFrame(HandleObject obj) bool JSStructuredCloneWriter::startWrite(HandleValue v) { - assertSameCompartment(context(), v); + context()->check(v); if (v.isString()) { return writeString(SCTAG_STRING, v.toString()); @@ -1943,7 +1943,7 @@ JSStructuredCloneWriter::write(HandleValue v) while (!counts.empty()) { obj = &objs.back().toObject(); - assertSameCompartment(context(), obj); + context()->check(obj); if (counts.back()) { counts.back()--; key = entries.back(); @@ -2941,7 +2941,7 @@ JS_WriteStructuredClone(JSContext* cx, HandleValue value, JSStructuredCloneData* { AssertHeapIsIdle(); CHECK_REQUEST(cx); - assertSameCompartment(cx, value); + cx->check(value); const JSStructuredCloneCallbacks* callbacks = optionalCallbacks; return WriteStructuredClone(cx, value, bufp, scope, cloneDataPolicy, callbacks, closure, @@ -3150,7 +3150,7 @@ JS_PUBLIC_API(bool) JS_WriteTypedArray(JSStructuredCloneWriter* w, HandleValue v) { MOZ_ASSERT(v.isObject()); - assertSameCompartment(w->context(), v); + w->context()->check(v); RootedObject obj(w->context(), &v.toObject()); // Note: writeTypedArray also does a CheckedUnwrap but it assumes this diff --git a/js/src/vm/TypeInference-inl.h b/js/src/vm/TypeInference-inl.h index fbe73166efb8..eac735ea1b42 100644 --- a/js/src/vm/TypeInference-inl.h +++ b/js/src/vm/TypeInference-inl.h @@ -636,7 +636,7 @@ TypeScript::MonitorAssign(JSContext* cx, HandleObject obj, jsid id) /* static */ inline void TypeScript::SetThis(JSContext* cx, JSScript* script, TypeSet::Type type) { - assertSameCompartment(cx, script, type); + cx->check(script, type); AutoSweepTypeScript sweep(script); StackTypeSet* types = ThisTypes(script); @@ -661,7 +661,7 @@ TypeScript::SetThis(JSContext* cx, JSScript* script, const js::Value& value) /* static */ inline void TypeScript::SetArgument(JSContext* cx, JSScript* script, unsigned arg, TypeSet::Type type) { - assertSameCompartment(cx, script, type); + cx->check(script, type); AutoSweepTypeScript sweep(script); StackTypeSet* types = ArgTypes(script, arg); diff --git a/js/src/vm/TypeInference.cpp b/js/src/vm/TypeInference.cpp index d45c8893f494..2f4ac9f0416c 100644 --- a/js/src/vm/TypeInference.cpp +++ b/js/src/vm/TypeInference.cpp @@ -3415,7 +3415,7 @@ js::FillBytecodeTypeMap(JSScript* script, uint32_t* bytecodeMap) void js::TypeMonitorResult(JSContext* cx, JSScript* script, jsbytecode* pc, TypeSet::Type type) { - assertSameCompartment(cx, script, type); + cx->check(script, type); AutoEnterAnalysis enter(cx); @@ -3433,7 +3433,7 @@ void js::TypeMonitorResult(JSContext* cx, JSScript* script, jsbytecode* pc, StackTypeSet* types, TypeSet::Type type) { - assertSameCompartment(cx, script, type); + cx->check(script, type); AutoEnterAnalysis enter(cx); @@ -3468,7 +3468,7 @@ bool JSScript::makeTypes(JSContext* cx) { MOZ_ASSERT(!types_); - assertSameCompartment(cx, this); + cx->check(this); AutoEnterAnalysis enter(cx); @@ -4632,7 +4632,7 @@ Zone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t* compartmentsPrivateData) { *typePool += types.typeLifoAlloc().sizeOfExcludingThis(mallocSizeOf); - *regexpZone += regExps.sizeOfExcludingThis(mallocSizeOf); + *regexpZone += regExps().sizeOfExcludingThis(mallocSizeOf); if (jitZone_) jitZone_->addSizeOfIncludingThis(mallocSizeOf, jitZone, baselineStubsOptimized, cachedCFG); *uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf); diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp index ea81319e1df5..f8c18730eca7 100644 --- a/js/src/wasm/AsmJS.cpp +++ b/js/src/wasm/AsmJS.cpp @@ -106,6 +106,46 @@ enum AsmJSMathBuiltinFunction AsmJSMathBuiltin_clz32 }; +// LitValPOD is a restricted version of LitVal suitable for asm.js that is +// always POD. + +struct LitValPOD +{ + PackedTypeCode valType_; + union U { + uint32_t u32_; + uint64_t u64_; + float f32_; + double f64_; + } u; + + LitValPOD() = default; + + explicit LitValPOD(uint32_t u32) : valType_(ValType(ValType::I32).packed()) { u.u32_ = u32; } + explicit LitValPOD(uint64_t u64) : valType_(ValType(ValType::I64).packed()) { u.u64_ = u64; } + + explicit LitValPOD(float f32) : valType_(ValType(ValType::F32).packed()) { u.f32_ = f32; } + explicit LitValPOD(double f64) : valType_(ValType(ValType::F64).packed()) { u.f64_ = f64; } + + LitVal asLitVal() const { + switch (UnpackTypeCodeType(valType_)) { + case TypeCode::I32: + return LitVal(u.u32_); + case TypeCode::I64: + return LitVal(u.u64_); + case TypeCode::F32: + return LitVal(u.f32_); + case TypeCode::F64: + return LitVal(u.f64_); + default: + MOZ_CRASH("Can't happen"); + } + } +}; + +static_assert(std::is_pod::value, + "must be POD to be simply serialized/deserialized"); + // An AsmJSGlobal represents a JS global variable in the asm.js module function. class AsmJSGlobal { @@ -121,9 +161,8 @@ class AsmJSGlobal struct { VarInitKind initKind_; union U { - ValType importType_; - LitVal val_; - U() : val_(LitVal()) {} + PackedTypeCode importValType_; + LitValPOD val_; } u; } var; uint32_t ffiIndex_; @@ -133,7 +172,6 @@ class AsmJSGlobal ConstantKind kind_; double value_; } constant; - V() : ffiIndex_(0) {} } u; } pod; CacheableChars field_; @@ -157,7 +195,7 @@ class AsmJSGlobal MOZ_ASSERT(pod.which_ == Variable); return pod.u.var.initKind_; } - LitVal varInitVal() const { + LitValPOD varInitVal() const { MOZ_ASSERT(pod.which_ == Variable); MOZ_ASSERT(pod.u.var.initKind_ == InitConstant); return pod.u.var.u.val_; @@ -165,7 +203,7 @@ class AsmJSGlobal ValType varInitImportType() const { MOZ_ASSERT(pod.which_ == Variable); MOZ_ASSERT(pod.u.var.initKind_ == InitImport); - return pod.u.var.u.importType_; + return ValType(pod.u.var.u.importValType_); } uint32_t ffiIndex() const { MOZ_ASSERT(pod.which_ == FFI); @@ -851,16 +889,16 @@ class NumLit return false; } - LitVal value() const { + LitValPOD value() const { switch (which_) { case NumLit::Fixnum: case NumLit::NegativeInt: case NumLit::BigUnsigned: - return LitVal(toUint32()); + return LitValPOD(toUint32()); case NumLit::Float: - return LitVal(toFloat()); + return LitValPOD(toFloat()); case NumLit::Double: - return LitVal(toDouble()); + return LitValPOD(toDouble()); case NumLit::OutOfRangeInt:; } MOZ_CRASH("bad literal"); @@ -1691,7 +1729,7 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED ModuleValidator AsmJSGlobal g(AsmJSGlobal::Variable, std::move(fieldChars)); g.pod.u.var.initKind_ = AsmJSGlobal::InitImport; - g.pod.u.var.u.importType_ = valType; + g.pod.u.var.u.importValType_ = valType.packed(); return asmJSMetadata_->asmJSGlobals.append(std::move(g)); } bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) { @@ -5791,7 +5829,7 @@ HasPureCoercion(JSContext* cx, HandleValue v) static bool ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal, - Maybe* val) + Maybe* val) { switch (global.varInitKind()) { case AsmJSGlobal::InitConstant: @@ -6013,10 +6051,10 @@ GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal, for (const AsmJSGlobal& global : metadata.asmJSGlobals) { switch (global.which()) { case AsmJSGlobal::Variable: { - Maybe litVal; + Maybe litVal; if (!ValidateGlobalVariable(cx, global, importVal, &litVal)) return false; - if (!valImports.append(Val(*litVal))) + if (!valImports.append(Val(litVal->asLitVal()))) return false; break; } diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp index 7373aabf79e2..350191905976 100644 --- a/js/src/wasm/WasmBaselineCompile.cpp +++ b/js/src/wasm/WasmBaselineCompile.cpp @@ -1012,7 +1012,7 @@ BaseLocalIter::BaseLocalIter(const ValTypeVector& locals, size_t argsLength, boo index_(0), localSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0), reservedSize_(localSize_), - frameOffset_(0), + frameOffset_(UINT32_MAX), mirType_(MIRType::Undefined), done_(false) { diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp index 49871f133236..fe9b233ef39d 100644 --- a/js/src/wasm/WasmInstance.cpp +++ b/js/src/wasm/WasmInstance.cpp @@ -373,7 +373,7 @@ Instance::wake(Instance* instance, uint32_t byteOffset, int32_t count) return -1; } - int64_t woken = atomics_wake_impl(instance->sharedMemoryBuffer(), byteOffset, int64_t(count)); + int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(), byteOffset, int64_t(count)); if (woken > INT32_MAX) { JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_WAKE_OVERFLOW); diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp index 5214fb28e15a..84bf5db1e8a7 100644 --- a/js/src/wasm/WasmProcess.cpp +++ b/js/src/wasm/WasmProcess.cpp @@ -43,6 +43,18 @@ typedef Vector CodeSegmentVector; Atomic wasm::CodeExists(false); +// Because of profiling, the thread running wasm might need to know to which +// CodeSegment the current PC belongs, during a call to lookup(). A lookup +// is a read-only operation, and we don't want to take a lock then +// (otherwise, we could have a deadlock situation if an async lookup +// happened on a given thread that was holding mutatorsMutex_ while getting +// sampled). Since the writer could be modifying the data that is getting +// looked up, the writer functions use spin-locks to know if there are any +// observers (i.e. calls to lookup()) of the atomic data. + +static Atomic sNumObservers(0); +static Atomic sShuttingDown(false); + class ProcessCodeSegmentMap { // Since writes (insertions or removals) can happen on any background @@ -53,17 +65,6 @@ class ProcessCodeSegmentMap CodeSegmentVector segments1_; CodeSegmentVector segments2_; - // Because of profiling, the thread running wasm might need to know to which - // CodeSegment the current PC belongs, during a call to lookup(). A lookup - // is a read-only operation, and we don't want to take a lock then - // (otherwise, we could have a deadlock situation if an async lookup - // happened on a given thread that was holding mutatorsMutex_ while getting - // sampled). Since the writer could be modifying the data that is getting - // looked up, the writer functions use spin-locks to know if there are any - // observers (i.e. calls to lookup()) of the atomic data. - - Atomic observers_; - // Except during swapAndWait(), there are no lookup() observers of the // vector pointed to by mutableCodeSegments_ @@ -84,7 +85,7 @@ class ProcessCodeSegmentMap }; void swapAndWait() { - // Both vectors are consistent for look up at this point, although their + // Both vectors are consistent for lookup at this point although their // contents are different: there is no way for the looked up PC to be // in the code segment that is getting registered, because the code // segment is not even fully created yet. @@ -110,13 +111,12 @@ class ProcessCodeSegmentMap // A lookup could have happened on any of the two vectors. Wait for // observers to be done using any vector before mutating. - while (observers_); + while (sNumObservers > 0) {} } public: ProcessCodeSegmentMap() : mutatorsMutex_(mutexid::WasmCodeSegmentMap), - observers_(0), mutableCodeSegments_(&segments1_), readonlyCodeSegments_(&segments2_) { @@ -192,13 +192,6 @@ class ProcessCodeSegmentMap } const CodeSegment* lookup(const void* pc) { - auto decObserver = mozilla::MakeScopeExit([&] { - observers_--; - }); - observers_++; - - // Once atomically-read, the readonly vector is valid as long as - // observers_ has been incremented (see swapAndWait()). const CodeSegmentVector* readonly = readonlyCodeSegments_; size_t index; @@ -213,25 +206,44 @@ class ProcessCodeSegmentMap } }; -static ProcessCodeSegmentMap processCodeSegmentMap; +static ProcessCodeSegmentMap sProcessCodeSegmentMap; bool wasm::RegisterCodeSegment(const CodeSegment* cs) { MOZ_ASSERT(cs->codeTier().code().initialized()); - return processCodeSegmentMap.insert(cs); + return sProcessCodeSegmentMap.insert(cs); } void wasm::UnregisterCodeSegment(const CodeSegment* cs) { - processCodeSegmentMap.remove(cs); + sProcessCodeSegmentMap.remove(cs); } const CodeSegment* wasm::LookupCodeSegment(const void* pc, const CodeRange** codeRange /*= nullptr */) { - if (const CodeSegment* found = processCodeSegmentMap.lookup(pc)) { + // Avoid accessing an uninitialized sProcessCodeSegmentMap if there is a + // crash early in startup. Returning null will allow the crash to propagate + // properly to breakpad. + if (!CodeExists) + return nullptr; + + // Ensure the observer count is above 0 throughout the entire lookup to + // ensure swapAndWait() waits for the lookup to complete. + auto decObserver = mozilla::MakeScopeExit([&] { + MOZ_ASSERT(sNumObservers > 0); + sNumObservers--; + }); + sNumObservers++; + + // Check sShuttingDown with sNumObservers > 0 to ensure the spinloop in + // wasm::ShutDown() is effective. + if (sShuttingDown) + return nullptr; + + if (const CodeSegment* found = sProcessCodeSegmentMap.lookup(pc)) { if (codeRange) { *codeRange = found->isModule() ? found->asModule()->lookupRange(pc) @@ -239,8 +251,10 @@ wasm::LookupCodeSegment(const void* pc, const CodeRange** codeRange /*= nullptr } return found; } + if (codeRange) *codeRange = nullptr; + return nullptr; } @@ -261,6 +275,10 @@ wasm::ShutDown() if (JSRuntime::hasLiveRuntimes()) return; + // After signalling shutdown, wait for currently-active observers to finish. + sShuttingDown = true; + while (sNumObservers > 0) {} + ReleaseBuiltinThunks(); - processCodeSegmentMap.freeAll(); + sProcessCodeSegmentMap.freeAll(); } diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp index f3445adac267..045fe453f945 100644 --- a/js/src/wasm/WasmStubs.cpp +++ b/js/src/wasm/WasmStubs.cpp @@ -53,12 +53,6 @@ AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBefor masm.assertStackAlignment(alignment, addBeforeAssert); } -static unsigned -StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, unsigned bytesToPush) -{ - return StackDecrementForCall(alignment, sizeof(Frame) + masm.framePushed(), bytesToPush); -} - template static unsigned StackArgBytes(const VectorT& args) @@ -69,14 +63,6 @@ StackArgBytes(const VectorT& args) return iter.stackBytesConsumedSoFar(); } -template -static unsigned -StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, const VectorT& args, - unsigned extraBytes = 0) -{ - return StackDecrementForCall(masm, alignment, StackArgBytes(args) + extraBytes); -} - static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe, Register argv, Register scratch) { @@ -563,7 +549,9 @@ GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex, const FuncExport& // Note the jit caller ensures the stack is aligned *after* the call // instruction. - unsigned frameSize = StackDecrementForCall(WasmStackAlignment, 0, bytesNeeded); + unsigned frameSize = StackDecrementForCall(WasmStackAlignment, + masm.framePushed(), + bytesNeeded); #ifdef ENABLE_WASM_GC unsigned savedTlsOffset = frameSize - sizeof(void*); @@ -998,7 +986,10 @@ GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, FuncType GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets); - unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.funcType().args()); + MOZ_ASSERT(masm.framePushed() == 0); + unsigned framePushed = StackDecrementForCall(WasmStackAlignment, + sizeof(Frame), // pushed by prologue + StackArgBytes(fi.funcType().args())); masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0)); MOZ_ASSERT(masm.framePushed() == framePushed); @@ -1085,7 +1076,9 @@ GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu // padding between argv and retaddr ensures that sp is aligned. unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double)); unsigned argBytes = Max(1, fi.funcType().args().length()) * sizeof(Value); - unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes); + unsigned framePushed = StackDecrementForCall(ABIStackAlignment, + sizeof(Frame), // pushed by prologue + argOffset + argBytes); GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp, offsets); @@ -1195,17 +1188,17 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa masm.setFramePushed(0); // JIT calls use the following stack layout (sp grows to the left): - // | retaddr | descriptor | callee | argc | this | arg1..N | - // After the JIT frame, the global register (if present) is saved since the - // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs, - // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing - // the return address. + // | WasmToJSJitFrameLayout | this | arg1..N | + // Unlike most ABIs, the JIT ABI requires that sp be JitStackAlignment- + // aligned *after* pushing the return address. static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes"); const unsigned sizeOfRetAddr = sizeof(void*); const unsigned sizeOfPreFrame = WasmToJSJitFrameLayout::Size() - sizeOfRetAddr; const unsigned sizeOfThisAndArgs = (1 + fi.funcType().args().length()) * sizeof(Value); const unsigned totalJitFrameBytes = sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs; - const unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) - + const unsigned jitFramePushed = StackDecrementForCall(JitStackAlignment, + sizeof(Frame), // pushed by prologue + totalJitFrameBytes) - sizeOfRetAddr; const unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame; @@ -1460,7 +1453,9 @@ wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType, ExitRe masm.setFramePushed(0); ABIFunctionArgs args(abiType); - uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args); + uint32_t framePushed = StackDecrementForCall(ABIStackAlignment, + sizeof(Frame), // pushed by prologue + StackArgBytes(args)); GenerateExitPrologue(masm, framePushed, exitReason, offsets); diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp index 2be6e46a73c9..c220d53aeb47 100644 --- a/js/src/wasm/WasmTypes.cpp +++ b/js/src/wasm/WasmTypes.cpp @@ -189,6 +189,21 @@ FuncType::serialize(uint8_t* cursor) const return cursor; } +namespace js { namespace wasm { + +// ExprType is not POD while ReadScalar requires POD, so specialize. +template <> +inline const uint8_t* +ReadScalar(const uint8_t* src, ExprType* dst) +{ + static_assert(sizeof(PackedTypeCode) == sizeof(ExprType), + "ExprType must carry only a PackedTypeCode"); + memcpy(dst->packedPtr(), src, sizeof(PackedTypeCode)); + return src + sizeof(*dst); +} + +}} + const uint8_t* FuncType::deserialize(const uint8_t* cursor) { diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h index fd2454f0cd4f..75a0535c9a52 100644 --- a/js/src/wasm/WasmTypes.h +++ b/js/src/wasm/WasmTypes.h @@ -168,6 +168,9 @@ struct ShareableBase : AtomicRefCounted enum class PackedTypeCode : uint32_t {}; +static_assert(std::is_pod::value, + "must be POD to be simply serialized/deserialized"); + const uint32_t NoTypeCode = 0xFF; // Only use these const uint32_t NoRefTypeIndex = 0xFFFFFF; // with PackedTypeCode @@ -299,6 +302,10 @@ class ExprType return tc_; } + PackedTypeCode* packedPtr() { + return &tc_; + } + Code code() const { return Code(UnpackTypeCodeType(tc_)); } diff --git a/layout/generic/BlockReflowInput.h b/layout/generic/BlockReflowInput.h index 2aa91c672f44..61cc0f2db773 100644 --- a/layout/generic/BlockReflowInput.h +++ b/layout/generic/BlockReflowInput.h @@ -381,6 +381,10 @@ public: // being N^2. nsFloatCacheFreeList mBelowCurrentLineFloats; + // The list of floats that are waiting on a break opportunity in order to be + // placed, since we're on a nowrap context. + nsTArray mNoWrapFloats; + nscoord mMinLineHeight; int32_t mLineNumber; diff --git a/layout/generic/nsBlockFrame.cpp b/layout/generic/nsBlockFrame.cpp index ad6339c478e7..ecae7b9f0b07 100644 --- a/layout/generic/nsBlockFrame.cpp +++ b/layout/generic/nsBlockFrame.cpp @@ -3962,6 +3962,7 @@ nsBlockFrame::ReflowInlineFrames(BlockReflowInput& aState, aState.FloatManager()->PopState(&floatManagerState); // Clear out float lists aState.mCurrentLineFloats.DeleteAll(); + MOZ_ASSERT(aState.mNoWrapFloats.IsEmpty()); aState.mBelowCurrentLineFloats.DeleteAll(); } @@ -4587,6 +4588,9 @@ nsBlockFrame::PlaceLine(BlockReflowInput& aState, nscoord& aAvailableSpaceBSize, bool* aKeepReflowGoing) { + // Try to position the floats in a nowrap context. + aLineLayout.FlushNoWrapFloats(); + // Trim extra white-space from the line before placing the frames aLineLayout.TrimTrailingWhiteSpace(); diff --git a/layout/generic/nsLineLayout.cpp b/layout/generic/nsLineLayout.cpp index abd176053f6c..1245ab38c916 100644 --- a/layout/generic/nsLineLayout.cpp +++ b/layout/generic/nsLineLayout.cpp @@ -264,6 +264,7 @@ nsLineLayout::EndLineReflow() (!mSpansAllocated && !mSpansFreed && !mSpanFreeList && !mFramesAllocated && !mFramesFreed && !mFrameFreeList), "Allocated frames or spans on non-base line layout?"); + MOZ_ASSERT(mRootSpan == mCurrentSpan); UnlinkFrame(mRootSpan->mFrame); mCurrentSpan = mRootSpan = nullptr; @@ -457,6 +458,12 @@ nsLineLayout::EndSpan(nsIFrame* aFrame) printf(": EndSpan width=%d\n", mCurrentSpan->mICoord - mCurrentSpan->mIStart); #endif PerSpanData* psd = mCurrentSpan; + MOZ_ASSERT(psd->mParent, "We never call this on the root"); + + if (psd->mNoWrap && !psd->mParent->mNoWrap) { + FlushNoWrapFloats(); + } + nscoord iSizeResult = psd->mLastFrame ? (psd->mICoord - psd->mIStart) : 0; mSpanDepth--; @@ -954,27 +961,14 @@ nsLineLayout::ReflowFrame(nsIFrame* aFrame, pfd->mSkipWhenTrimmingWhitespace = true; nsIFrame* outOfFlowFrame = nsLayoutUtils::GetFloatFromPlaceholder(aFrame); if (outOfFlowFrame) { - // Add mTrimmableISize to the available width since if the line ends - // here, the width of the inline content will be reduced by - // mTrimmableISize. - nscoord availableISize = psd->mIEnd - (psd->mICoord - mTrimmableISize); - if (psd->mNoWrap) { - // If we place floats after inline content where there's - // no break opportunity, we don't know how much additional - // width is required for the non-breaking content after the float, - // so we can't know whether the float plus that content will fit - // on the line. So for now, don't place floats after inline - // content where there's no break opportunity. This is incorrect - // but hopefully rare. Fixing it will require significant - // restructuring of line layout. - // We might as well allow zero-width floats to be placed, though. - availableISize = 0; + if (psd->mNoWrap && + !LineIsEmpty() && // We can always place floats in an empty line. + !GetOutermostLineLayout()->mBlockRI->mFlags.mCanHaveTextOverflow) { + // We'll do this at the next break opportunity. + RecordNoWrapFloat(outOfFlowFrame); + } else { + placedFloat = TryToPlaceFloat(outOfFlowFrame); } - placedFloat = GetOutermostLineLayout()-> - AddFloat(outOfFlowFrame, availableISize); - NS_ASSERTION(!(outOfFlowFrame->IsLetterFrame() && - GetFirstLetterStyleOK()), - "FirstLetterStyle set on line with floating first letter"); } } else if (isText) { @@ -1119,8 +1113,8 @@ nsLineLayout::ReflowFrame(nsIFrame* aFrame, VerticalAlignFrames(span); } - if (!continuingTextRun) { - if (!psd->mNoWrap && (!LineIsEmpty() || placedFloat)) { + if (!continuingTextRun && !psd->mNoWrap) { + if (!LineIsEmpty() || placedFloat) { // record soft break opportunity after this content that can't be // part of a text run. This is not a text frame so we know // that offset INT32_MAX means "after the content". @@ -1505,6 +1499,60 @@ nsLineLayout::DumpPerSpanData(PerSpanData* psd, int32_t aIndent) } #endif +void +nsLineLayout::RecordNoWrapFloat(nsIFrame* aFloat) +{ + GetOutermostLineLayout()->mBlockRI->mNoWrapFloats.AppendElement(aFloat); +} + +void +nsLineLayout::FlushNoWrapFloats() +{ + auto& noWrapFloats = GetOutermostLineLayout()->mBlockRI->mNoWrapFloats; + for (nsIFrame* floatedFrame : noWrapFloats) { + TryToPlaceFloat(floatedFrame); + } + noWrapFloats.Clear(); +} + +bool +nsLineLayout::TryToPlaceFloat(nsIFrame* aFloat) +{ + // Add mTrimmableISize to the available width since if the line ends here, the + // width of the inline content will be reduced by mTrimmableISize. + nscoord availableISize = mCurrentSpan->mIEnd - (mCurrentSpan->mICoord - mTrimmableISize); + NS_ASSERTION(!(aFloat->IsLetterFrame() && GetFirstLetterStyleOK()), + "FirstLetterStyle set on line with floating first letter"); + return GetOutermostLineLayout()->AddFloat(aFloat, availableISize); +} + +bool +nsLineLayout::NotifyOptionalBreakPosition(nsIFrame* aFrame, + int32_t aOffset, + bool aFits, + gfxBreakPriority aPriority) +{ + MOZ_ASSERT(!aFits || !mNeedBackup, + "Shouldn't be updating the break position with a break that fits " + "after we've already flagged an overrun"); + MOZ_ASSERT(mCurrentSpan, "Should be doing line layout"); + if (mCurrentSpan->mNoWrap) { + FlushNoWrapFloats(); + } + + // Remember the last break position that fits; if there was no break that fit, + // just remember the first break + if ((aFits && aPriority >= mLastOptionalBreakPriority) || + !mLastOptionalBreakFrame) { + mLastOptionalBreakFrame = aFrame; + mLastOptionalBreakFrameOffset = aOffset; + mLastOptionalBreakPriority = aPriority; + } + return aFrame && mForceBreakFrame == aFrame && + mForceBreakFrameOffset == aOffset; +} + + #define VALIGN_OTHER 0 #define VALIGN_TOP 1 #define VALIGN_BOTTOM 2 diff --git a/layout/generic/nsLineLayout.h b/layout/generic/nsLineLayout.h index cd57173cc8e6..38eab887c255 100644 --- a/layout/generic/nsLineLayout.h +++ b/layout/generic/nsLineLayout.h @@ -251,21 +251,21 @@ public: * @return true if we are actually reflowing with forced break position and we * should break here */ - bool NotifyOptionalBreakPosition(nsIFrame* aFrame, int32_t aOffset, - bool aFits, gfxBreakPriority aPriority) { - NS_ASSERTION(!aFits || !mNeedBackup, - "Shouldn't be updating the break position with a break that fits after we've already flagged an overrun"); - // Remember the last break position that fits; if there was no break that fit, - // just remember the first break - if ((aFits && aPriority >= mLastOptionalBreakPriority) || - !mLastOptionalBreakFrame) { - mLastOptionalBreakFrame = aFrame; - mLastOptionalBreakFrameOffset = aOffset; - mLastOptionalBreakPriority = aPriority; - } - return aFrame && mForceBreakFrame == aFrame && - mForceBreakFrameOffset == aOffset; - } + bool NotifyOptionalBreakPosition(nsIFrame* aFrame, + int32_t aOffset, + bool aFits, + gfxBreakPriority aPriority); + + // Tries to place a float, and records whether the float actually was placed. + bool TryToPlaceFloat(nsIFrame* aFloat); + + // Records a floating frame in a nowrap context for it to be placed on the + // next break opportunity. + void RecordNoWrapFloat(nsIFrame* aFloat); + + // Tries to place the floats from the nowrap context. + void FlushNoWrapFloats(); + /** * Like NotifyOptionalBreakPosition, but here it's OK for mNeedBackup * to be set, because the caller is merely pruning some saved break position(s) diff --git a/layout/reftests/bugs/reftest.list b/layout/reftests/bugs/reftest.list index 56d2ab021e37..5d4345dd80e0 100644 --- a/layout/reftests/bugs/reftest.list +++ b/layout/reftests/bugs/reftest.list @@ -1151,7 +1151,7 @@ fuzzy-if(skiaContent,0-1,0-3280) == 438987-2c.html 438987-2-ref.html fuzzy-if(skiaContent,0-1,0-1) == 440112.html 440112-ref.html == 440149-1.html 440149-1-ref.html == 441259-1.html 441259-1-ref.html -fails == 441259-2.html 441259-2-ref.html # bug 441400 +== 441259-2.html 441259-2-ref.html fuzzy-if(skiaContent,0-1,0-3) == 442542-1.html 442542-1-ref.html == 444015-1.html 444015-1-ref.html == 444375-1.html 444375-1-ref.html diff --git a/layout/reftests/css-grid/grid-container-baselines-003-ref.html b/layout/reftests/css-grid/grid-container-baselines-003-ref.html index aadbb6bda275..0aa0ea8953e0 100644 --- a/layout/reftests/css-grid/grid-container-baselines-003-ref.html +++ b/layout/reftests/css-grid/grid-container-baselines-003-ref.html @@ -87,7 +87,7 @@ A
A
B
A
B
+ class="a f" style="float:right; width:30px; position:relative; left:-4px; padding-bottom:22px;">A
B
C
D
E
F
diff --git a/layout/reftests/floats/float-nowrap-1-notref.html b/layout/reftests/floats/float-nowrap-1-notref.html new file mode 100644 index 000000000000..3a5f7f1eea3e --- /dev/null +++ b/layout/reftests/floats/float-nowrap-1-notref.html @@ -0,0 +1,18 @@ + + +
+ + Some text that overflows my parent. +
diff --git a/layout/reftests/floats/float-nowrap-1.html b/layout/reftests/floats/float-nowrap-1.html new file mode 100644 index 000000000000..c1a34c2e1a21 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-1.html @@ -0,0 +1,18 @@ + + +
+ Some text that overflows my parent. + +
diff --git a/layout/reftests/floats/float-nowrap-2.html b/layout/reftests/floats/float-nowrap-2.html new file mode 100644 index 000000000000..3946b2eda1fb --- /dev/null +++ b/layout/reftests/floats/float-nowrap-2.html @@ -0,0 +1,19 @@ + + +
+ Some text that + + overflows my parent. +
diff --git a/layout/reftests/floats/float-nowrap-3-ref.html b/layout/reftests/floats/float-nowrap-3-ref.html new file mode 100644 index 000000000000..1ef631be3e05 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-3-ref.html @@ -0,0 +1,23 @@ + + +
+ Some + + text that overflows my parent. + + +
diff --git a/layout/reftests/floats/float-nowrap-3.html b/layout/reftests/floats/float-nowrap-3.html new file mode 100644 index 000000000000..4d50adb4f4b5 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-3.html @@ -0,0 +1,22 @@ + + +
+ Some + + text that overflows my parent. + +
diff --git a/layout/reftests/floats/float-nowrap-4-ref.html b/layout/reftests/floats/float-nowrap-4-ref.html new file mode 100644 index 000000000000..00e87b2c5fa3 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-4-ref.html @@ -0,0 +1,23 @@ + + +
+ Some + + + text that overflows my parent. + +
diff --git a/layout/reftests/floats/float-nowrap-4.html b/layout/reftests/floats/float-nowrap-4.html new file mode 100644 index 000000000000..db5ec926a7a0 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-4.html @@ -0,0 +1,22 @@ + + +
+ Some + + text that overflows my parent. + +
diff --git a/layout/reftests/floats/float-nowrap-5-ref.html b/layout/reftests/floats/float-nowrap-5-ref.html new file mode 100644 index 000000000000..c1321bd56d03 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-5-ref.html @@ -0,0 +1,2 @@ + +
Hello Kittie diff --git a/layout/reftests/floats/float-nowrap-5.html b/layout/reftests/floats/float-nowrap-5.html new file mode 100644 index 000000000000..2d940910b087 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-5.html @@ -0,0 +1,10 @@ + + +
KittieHello  diff --git a/layout/reftests/floats/float-nowrap-6.html b/layout/reftests/floats/float-nowrap-6.html new file mode 100644 index 000000000000..11cc83eaa18b --- /dev/null +++ b/layout/reftests/floats/float-nowrap-6.html @@ -0,0 +1,10 @@ + + +
Hello Kittie diff --git a/layout/reftests/floats/float-nowrap-7.html b/layout/reftests/floats/float-nowrap-7.html new file mode 100644 index 000000000000..530909e0a256 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-7.html @@ -0,0 +1,20 @@ + + +
+ S
ome text that overflows my parent.
+
diff --git a/layout/reftests/floats/float-nowrap-8.html b/layout/reftests/floats/float-nowrap-8.html new file mode 100644 index 000000000000..5b3e5b15dc35 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-8.html @@ -0,0 +1,18 @@ + + +
+ Some + text that overflows my parent. +
diff --git a/layout/reftests/floats/float-nowrap-9.html b/layout/reftests/floats/float-nowrap-9.html new file mode 100644 index 000000000000..b857ff6d1086 --- /dev/null +++ b/layout/reftests/floats/float-nowrap-9.html @@ -0,0 +1,22 @@ + + +
+ Some + + text that overflows my parent. + +
diff --git a/layout/reftests/floats/reftest.list b/layout/reftests/floats/reftest.list index 58eab284efac..78c9391ad1fd 100644 --- a/layout/reftests/floats/reftest.list +++ b/layout/reftests/floats/reftest.list @@ -117,3 +117,13 @@ random-if(/^Windows\x20NT\x206\.1/.test(http.oscpu)) == float-in-rtl-slr-2d.html == float-in-rtl-slr-4b.html float-in-rtl-slr-4-ref.html == float-in-rtl-slr-4c.html float-in-rtl-slr-4-ref.html == float-in-rtl-slr-4d.html float-in-rtl-slr-4-ref.html +!= float-nowrap-1.html float-nowrap-1-notref.html +== float-nowrap-2.html float-nowrap-1.html +== float-nowrap-3.html float-nowrap-3-ref.html +!= float-nowrap-3.html float-nowrap-4.html +== float-nowrap-4.html float-nowrap-4-ref.html +== float-nowrap-5.html float-nowrap-5-ref.html +== float-nowrap-6.html float-nowrap-5-ref.html +== float-nowrap-7.html float-nowrap-1.html +== float-nowrap-8.html float-nowrap-1.html +== float-nowrap-9.html float-nowrap-3-ref.html diff --git a/mozglue/misc/TimeStamp_windows.cpp b/mozglue/misc/TimeStamp_windows.cpp index 6af8a1b28e74..95af0e7fb46b 100644 --- a/mozglue/misc/TimeStamp_windows.cpp +++ b/mozglue/misc/TimeStamp_windows.cpp @@ -428,6 +428,12 @@ BaseTimeDurationPlatformUtils::ResolutionInTicks() static bool HasStableTSC() { +#if defined(_M_ARM64) + // AArch64 defines that its system counter run at a constant rate + // regardless of the current clock frequency of the system. See "The + // Generic Timer", section D7, in the ARMARM for ARMv8. + return true; +#else union { int regs[4]; @@ -463,6 +469,7 @@ HasStableTSC() // if bit 8 is set than TSC will run at a constant rate // in all ACPI P-states, C-states and T-states return regs[3] & (1 << 8); +#endif } static bool gInitialized = false; diff --git a/security/sandbox/common/SandboxSettings.h b/security/sandbox/common/SandboxSettings.h index 82cd591832e6..48290d805d25 100644 --- a/security/sandbox/common/SandboxSettings.h +++ b/security/sandbox/common/SandboxSettings.h @@ -21,5 +21,9 @@ bool IsContentSandboxEnabled(); int ClampFlashSandboxLevel(const int aLevel); #endif +#if defined(__OpenBSD__) +bool StartOpenBSDSandbox(GeckoProcessType type); +#endif + } #endif // mozilla_SandboxPolicies_h diff --git a/servo/components/style/invalidation/element/state_and_attributes.rs b/servo/components/style/invalidation/element/state_and_attributes.rs index 1eb022b298a7..fa0ccdd8be9a 100644 --- a/servo/components/style/invalidation/element/state_and_attributes.rs +++ b/servo/components/style/invalidation/element/state_and_attributes.rs @@ -159,13 +159,18 @@ where // force a restyle here. Matching doesn't depend on the actual visited // state at all, so we can't look at matching results to decide what to // do for this case. - if state_changes.intersects(ElementState::IN_VISITED_OR_UNVISITED_STATE) { + if state_changes.intersects(ElementState::IN_VISITED_OR_UNVISITED_STATE) && + self.shared_context.visited_styles_enabled + { trace!(" > visitedness change, force subtree restyle"); - // If we get here with visited links disabled, we should probably - // just avoid the restyle and remove the state change here, not only - // as an optimization, but also because it kind of would kill the + // We shouldn't get here with visited links disabled, but it's hard + // to assert in cases where you record a visitedness change and + // afterwards you change some of the stuff (like the pref) that + // changes whether visited styles are enabled. + // + // So just avoid the restyle here, because it kind of would kill the // point of disabling visited links. - debug_assert!(self.shared_context.visited_styles_enabled); + // // We can't just return here because there may also be attribute // changes as well that imply additional hints for siblings. self.data.hint.insert(RestyleHint::restyle_subtree()); diff --git a/taskcluster/docs/actions.rst b/taskcluster/docs/actions.rst index 72c3c09b4cc7..5526f6038df2 100644 --- a/taskcluster/docs/actions.rst +++ b/taskcluster/docs/actions.rst @@ -79,6 +79,9 @@ the entire task-group (result-set or push in Treeherder terminology). To create an action that shows up in the context menu for a task we would specify the ``context`` parameter. +The ``order`` value is the sort key defining the order of actions in the +resulting ``actions.json`` file. If multiple actions have the same name and +match the same task, the action with the smallest ``order`` will be used. Setting the Action Context .......................... diff --git a/taskcluster/taskgraph/actions/add_new_jobs.py b/taskcluster/taskgraph/actions/add_new_jobs.py index 0da5a4521a45..75b257261acc 100644 --- a/taskcluster/taskgraph/actions/add_new_jobs.py +++ b/taskcluster/taskgraph/actions/add_new_jobs.py @@ -18,7 +18,7 @@ from .util import (create_tasks, fetch_graph_and_labels) generic=True, symbol='add-new', description="Add new jobs using task labels.", - order=10000, + order=100, context=[], schema={ 'type': 'object', diff --git a/taskcluster/taskgraph/actions/add_talos.py b/taskcluster/taskgraph/actions/add_talos.py index 91f4e6f75bfb..cae1a52290dc 100644 --- a/taskcluster/taskgraph/actions/add_talos.py +++ b/taskcluster/taskgraph/actions/add_talos.py @@ -21,7 +21,7 @@ logger = logging.getLogger(__name__) generic=True, symbol='raT', description="Add all Talos tasks to a push.", - order=100, # Useful for sheriffs, but not top of the list + order=150, context=[], schema={ 'type': 'object', diff --git a/taskcluster/taskgraph/actions/backfill.py b/taskcluster/taskgraph/actions/backfill.py index 85a807ce5671..7650ae443805 100644 --- a/taskcluster/taskgraph/actions/backfill.py +++ b/taskcluster/taskgraph/actions/backfill.py @@ -31,7 +31,7 @@ logger = logging.getLogger(__name__) description=('Take the label of the current task, ' 'and trigger the task with that label ' 'on previous pushes in the same project.'), - order=0, + order=200, context=[{}], # This will be available for all tasks schema={ 'type': 'object', diff --git a/taskcluster/taskgraph/actions/cancel.py b/taskcluster/taskgraph/actions/cancel.py index 6b8c8de3ccd5..da8cf0b2b552 100644 --- a/taskcluster/taskgraph/actions/cancel.py +++ b/taskcluster/taskgraph/actions/cancel.py @@ -19,7 +19,7 @@ from .registry import register_callback_action description=( 'Cancel the given task' ), - order=100, + order=350, context=[{}] ) def cancel_action(parameters, graph_config, input, task_group_id, task_id, task): diff --git a/taskcluster/taskgraph/actions/cancel_all.py b/taskcluster/taskgraph/actions/cancel_all.py index 50efb8da3e22..8b532769a71c 100644 --- a/taskcluster/taskgraph/actions/cancel_all.py +++ b/taskcluster/taskgraph/actions/cancel_all.py @@ -47,7 +47,7 @@ def list_group(task_group_id, session): 'Cancel all running and pending tasks created by the decision task ' 'this action task is associated with.' ), - order=100, + order=400, context=[] ) def cancel_all_action(parameters, graph_config, input, task_group_id, task_id, task): diff --git a/taskcluster/taskgraph/actions/create_interactive.py b/taskcluster/taskgraph/actions/create_interactive.py index 758258dc69d5..ca7f0b89c7e5 100644 --- a/taskcluster/taskgraph/actions/create_interactive.py +++ b/taskcluster/taskgraph/actions/create_interactive.py @@ -34,7 +34,7 @@ task. You may need to wait for it to begin running. description=( 'Create a a copy of the task that you can interact with' ), - order=1, + order=50, context=[{'worker-implementation': 'docker-worker'}], schema={ 'type': 'object', diff --git a/taskcluster/taskgraph/actions/mochitest_retrigger.py b/taskcluster/taskgraph/actions/mochitest_retrigger.py deleted file mode 100644 index d85bbafeb413..000000000000 --- a/taskcluster/taskgraph/actions/mochitest_retrigger.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- - -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -from __future__ import absolute_import, print_function, unicode_literals - -import json -import logging - -from slugid import nice as slugid - -from .util import (create_task_from_def, fetch_graph_and_labels) -from .registry import register_callback_action -from taskgraph.util.parameterization import resolve_task_references - -TASKCLUSTER_QUEUE_URL = "https://queue.taskcluster.net/v1/task" - -logger = logging.getLogger(__name__) - - -@register_callback_action( - name='retrigger-mochitest-reftest-with-options', - title='Mochitest/Reftest Retrigger', - kind='hook', - generic=True, - symbol='tr', - description="Retriggers the specified mochitest/reftest job with additional options", - context=[{'test-type': 'mochitest'}, - {'test-type': 'reftest'}], - order=0, - schema={ - 'type': 'object', - 'properties': { - 'path': { - 'type': 'string', - 'maxLength': 255, - 'default': '', - 'title': 'Path name', - 'description': 'Path of test to retrigger' - }, - 'logLevel': { - 'type': 'string', - 'enum': ['debug', 'info', 'warning', 'error', 'critical'], - 'default': 'debug', - 'title': 'Log level', - 'description': 'Log level for output (default is DEBUG, which is highest)' - }, - 'runUntilFail': { - 'type': 'boolean', - 'default': True, - 'title': 'Run until failure', - 'description': ('Runs the specified set of tests repeatedly ' - 'until failure (or 30 times)') - }, - 'repeat': { - 'type': 'integer', - 'default': 30, - 'minimum': 1, - 'title': 'Run tests N times', - 'description': ('Run tests repeatedly (usually used in ' - 'conjunction with runUntilFail)') - }, - 'environment': { - 'type': 'object', - 'default': {'MOZ_LOG': ''}, - 'title': 'Extra environment variables', - 'description': 'Extra environment variables to use for this run', - 'additionalProperties': {'type': 'string'} - }, - 'preferences': { - 'type': 'object', - 'default': {'mygeckopreferences.pref': 'myvalue2'}, - 'title': 'Extra gecko (about:config) preferences', - 'description': 'Extra gecko (about:config) preferences to use for this run', - 'additionalProperties': {'type': 'string'} - } - }, - 'additionalProperties': False, - 'required': ['path'] - } -) -def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task): - decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( - parameters, graph_config) - - pre_task = full_task_graph.tasks[task['metadata']['name']] - - # fix up the task's dependencies, similar to how optimization would - # have done in the decision - dependencies = {name: label_to_taskid[label] - for name, label in pre_task.dependencies.iteritems()} - new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies) - new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues()) - - # don't want to run mozharness tests, want a custom mach command instead - new_task_definition['payload']['command'] += ['--no-run-tests'] - - custom_mach_command = [task['tags']['test-type']] - - # mochitests may specify a flavor - if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'): - custom_mach_command += [ - '--keep-open=false', - '-f', - new_task_definition['payload']['env']['MOCHITEST_FLAVOR'] - ] - - enable_e10s = json.loads(new_task_definition['payload']['env'].get( - 'ENABLE_E10S', 'true')) - if not enable_e10s: - custom_mach_command += ['--disable-e10s'] - - custom_mach_command += ['--log-tbpl=-', - '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))] - if input.get('runUntilFail'): - custom_mach_command += ['--run-until-failure'] - if input.get('repeat'): - custom_mach_command += ['--repeat', str(input.get('repeat', 30))] - - # add any custom gecko preferences - for (key, val) in input.get('preferences', {}).iteritems(): - custom_mach_command += ['--setpref', '{}={}'.format(key, val)] - - custom_mach_command += [input['path']] - new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join( - custom_mach_command) - - # update environment - new_task_definition['payload']['env'].update(input.get('environment', {})) - - # tweak the treeherder symbol - new_task_definition['extra']['treeherder']['symbol'] += '-custom' - - logging.info("New task definition: %s", new_task_definition) - - # actually create the new task - new_task_id = slugid() - create_task_from_def(new_task_id, new_task_definition, parameters['level']) diff --git a/taskcluster/taskgraph/actions/purge_caches.py b/taskcluster/taskgraph/actions/purge_caches.py index 715e84869b00..03ccc15fd36a 100644 --- a/taskcluster/taskgraph/actions/purge_caches.py +++ b/taskcluster/taskgraph/actions/purge_caches.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) 'Purge any caches associated with this task ' 'across all workers of the same workertype as the task.' ), - order=100, + order=450, context=[{'worker-implementation': 'docker-worker'}] ) def purge_caches_action(parameters, graph_config, input, task_group_id, task_id, task): diff --git a/taskcluster/taskgraph/actions/release_promotion.py b/taskcluster/taskgraph/actions/release_promotion.py index 0751b0f5d915..9770dcfd6fbc 100644 --- a/taskcluster/taskgraph/actions/release_promotion.py +++ b/taskcluster/taskgraph/actions/release_promotion.py @@ -56,7 +56,7 @@ def get_flavors(graph_config, param): title='Release Promotion', symbol='${input.release_promotion_flavor}', description="Promote a release.", - order=10000, + order=500, context=[], available=is_release_promotion_available, schema=lambda graph_config: { diff --git a/taskcluster/taskgraph/actions/rerun.py b/taskcluster/taskgraph/actions/rerun.py index 53e8c22f57d4..9e314cdfd026 100644 --- a/taskcluster/taskgraph/actions/rerun.py +++ b/taskcluster/taskgraph/actions/rerun.py @@ -32,7 +32,7 @@ RERUN_STATES = ('exception', 'failed') 'This only works on failed or exception tasks in the original taskgraph,' ' and is CoT friendly.' ), - order=1, + order=300, context=[{}], schema={ 'type': 'object', diff --git a/taskcluster/taskgraph/actions/retrigger.py b/taskcluster/taskgraph/actions/retrigger.py index 259f0a0acf4f..6885d6e11bed 100644 --- a/taskcluster/taskgraph/actions/retrigger.py +++ b/taskcluster/taskgraph/actions/retrigger.py @@ -6,18 +6,174 @@ from __future__ import absolute_import, print_function, unicode_literals +import json import logging +import textwrap +from slugid import nice as slugid from .util import ( combine_task_graph_files, create_tasks, - fetch_graph_and_labels + fetch_graph_and_labels, + relativize_datestamps, + create_task_from_def, ) +from ..util.parameterization import resolve_task_references from .registry import register_callback_action logger = logging.getLogger(__name__) +@register_callback_action( + name='retrigger', + cb_name='retrigger-mochitest', + title='Retrigger Mochitest/Reftest', + symbol='rt', + kind='hook', + generic=True, + description="Retriggers the specified mochitest/reftest job with additional options", + context=[{'test-type': 'mochitest'}, + {'test-type': 'reftest'}], + order=10, + schema={ + 'type': 'object', + 'properties': { + 'path': { + 'type': 'string', + 'maxLength': 255, + 'default': '', + 'title': 'Path name', + 'description': 'Path of test to retrigger' + }, + 'logLevel': { + 'type': 'string', + 'enum': ['debug', 'info', 'warning', 'error', 'critical'], + 'default': 'debug', + 'title': 'Log level', + 'description': 'Log level for output (default is DEBUG, which is highest)' + }, + 'runUntilFail': { + 'type': 'boolean', + 'default': True, + 'title': 'Run until failure', + 'description': ('Runs the specified set of tests repeatedly ' + 'until failure (or 30 times)') + }, + 'repeat': { + 'type': 'integer', + 'default': 30, + 'minimum': 1, + 'title': 'Run tests N times', + 'description': ('Run tests repeatedly (usually used in ' + 'conjunction with runUntilFail)') + }, + 'environment': { + 'type': 'object', + 'default': {'MOZ_LOG': ''}, + 'title': 'Extra environment variables', + 'description': 'Extra environment variables to use for this run', + 'additionalProperties': {'type': 'string'} + }, + 'preferences': { + 'type': 'object', + 'default': {'mygeckopreferences.pref': 'myvalue2'}, + 'title': 'Extra gecko (about:config) preferences', + 'description': 'Extra gecko (about:config) preferences to use for this run', + 'additionalProperties': {'type': 'string'} + } + }, + 'additionalProperties': False, + 'required': ['path'] + } +) +def mochitest_retrigger_action(parameters, graph_config, input, task_group_id, task_id, task): + decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( + parameters, graph_config) + + pre_task = full_task_graph.tasks[task['metadata']['name']] + + # fix up the task's dependencies, similar to how optimization would + # have done in the decision + dependencies = {name: label_to_taskid[label] + for name, label in pre_task.dependencies.iteritems()} + new_task_definition = resolve_task_references(pre_task.label, pre_task.task, dependencies) + new_task_definition.setdefault('dependencies', []).extend(dependencies.itervalues()) + + # don't want to run mozharness tests, want a custom mach command instead + new_task_definition['payload']['command'] += ['--no-run-tests'] + + custom_mach_command = [task['tags']['test-type']] + + # mochitests may specify a flavor + if new_task_definition['payload']['env'].get('MOCHITEST_FLAVOR'): + custom_mach_command += [ + '--keep-open=false', + '-f', + new_task_definition['payload']['env']['MOCHITEST_FLAVOR'] + ] + + enable_e10s = json.loads(new_task_definition['payload']['env'].get( + 'ENABLE_E10S', 'true')) + if not enable_e10s: + custom_mach_command += ['--disable-e10s'] + + custom_mach_command += ['--log-tbpl=-', + '--log-tbpl-level={}'.format(input.get('logLevel', 'debug'))] + if input.get('runUntilFail'): + custom_mach_command += ['--run-until-failure'] + if input.get('repeat'): + custom_mach_command += ['--repeat', str(input.get('repeat', 30))] + + # add any custom gecko preferences + for (key, val) in input.get('preferences', {}).iteritems(): + custom_mach_command += ['--setpref', '{}={}'.format(key, val)] + + custom_mach_command += [input['path']] + new_task_definition['payload']['env']['CUSTOM_MACH_COMMAND'] = ' '.join( + custom_mach_command) + + # update environment + new_task_definition['payload']['env'].update(input.get('environment', {})) + + # tweak the treeherder symbol + new_task_definition['extra']['treeherder']['symbol'] += '-custom' + + logging.info("New task definition: %s", new_task_definition) + + # actually create the new task + new_task_id = slugid() + create_task_from_def(new_task_id, new_task_definition, parameters['level']) + + +@register_callback_action( + title='Retrigger', + name='retrigger', + symbol='rt', + kind='hook', + cb_name='retrigger-decision', + description=textwrap.dedent('''\ + Create a clone of the task (retriggering decision, action, and cron tasks requires + special scopes).'''), + order=11, + context=[ + {'kind': 'decision-task'}, + {'kind': 'action-callback'}, + {'kind': 'cron-task'}, + ], +) +def retrigger_decision_action(parameters, graph_config, input, task_group_id, task_id, task): + decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels( + parameters, graph_config) + """For a single task, we try to just run exactly the same task once more. + It's quite possible that we don't have the scopes to do so (especially for + an action), but this is best-effort.""" + + # make all of the timestamps relative; they will then be turned back into + # absolute timestamps relative to the current time. + task = relativize_datestamps(task) + create_task_from_def(slugid(), task, parameters['level']) + + @register_callback_action( title='Retrigger', name='retrigger', @@ -25,9 +181,9 @@ logger = logging.getLogger(__name__) kind='hook', generic=True, description=( - 'Create a clone of the task.\n\n' + 'Create a clone of the task.' ), - order=1, + order=19, # must be greater than other orders in this file, as this is the fallback version context=[{}], schema={ 'type': 'object', @@ -56,6 +212,7 @@ def retrigger_action(parameters, graph_config, input, task_group_id, task_id, ta parameters, graph_config) label = task['metadata']['name'] + with_downstream = ' ' to_run = [label] diff --git a/taskcluster/taskgraph/actions/run_missing_tests.py b/taskcluster/taskgraph/actions/run_missing_tests.py index b084ad5bfed9..9b4f02815726 100644 --- a/taskcluster/taskgraph/actions/run_missing_tests.py +++ b/taskcluster/taskgraph/actions/run_missing_tests.py @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) "This action is for use on pushes that will be merged into another branch," "to check that optimization hasn't hidden any failures." ), - order=100, # Useful for sheriffs, but not top of the list + order=250, context=[], # Applies to decision task ) def run_missing_tests(parameters, graph_config, input, task_group_id, task_id, task): diff --git a/taskcluster/taskgraph/actions/util.py b/taskcluster/taskgraph/actions/util.py index f6e2aba54e2d..8656fa1a7be4 100644 --- a/taskcluster/taskgraph/actions/util.py +++ b/taskcluster/taskgraph/actions/util.py @@ -10,6 +10,7 @@ import copy import logging import requests import os +import re from requests.exceptions import HTTPError @@ -17,7 +18,13 @@ from taskgraph import create from taskgraph.decision import read_artifact, write_artifact from taskgraph.taskgraph import TaskGraph from taskgraph.optimize import optimize_task_graph -from taskgraph.util.taskcluster import get_session, find_task_id, get_artifact, list_tasks +from taskgraph.util.taskcluster import ( + get_session, + find_task_id, + get_artifact, + list_tasks, + parse_time, +) logger = logging.getLogger(__name__) @@ -164,3 +171,30 @@ def combine_task_graph_files(suffixes): for suffix in suffixes: all.update(read_artifact('task-graph-{}.json'.format(suffix))) write_artifact('task-graph.json', all) + + +def relativize_datestamps(task_def): + """ + Given a task definition as received from the queue, convert all datestamps + to {relative_datestamp: ..} format, with the task creation time as "now". + The result is useful for handing to ``create_task``. + """ + base = parse_time(task_def['created']) + # borrowed from https://github.com/epoberezkin/ajv/blob/master/lib/compile/formats.js + ts_pattern = re.compile( + r'^\d\d\d\d-[0-1]\d-[0-3]\d[t\s]' + r'(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?' + r'(?:z|[+-]\d\d:\d\d)$', re.I) + + def recurse(value): + if isinstance(value, basestring): + if ts_pattern.match(value): + value = parse_time(value) + diff = value - base + return {'relative-datestamp': '{} seconds'.format(int(diff.total_seconds()))} + if isinstance(value, list): + return [recurse(e) for e in value] + if isinstance(value, dict): + return {k: recurse(v) for k, v in value.items()} + return value + return recurse(task_def) diff --git a/taskcluster/taskgraph/taskgraph.py b/taskcluster/taskgraph/taskgraph.py index b772dd0805b8..e0d98a2f4020 100644 --- a/taskcluster/taskgraph/taskgraph.py +++ b/taskcluster/taskgraph/taskgraph.py @@ -30,6 +30,9 @@ class TaskGraph(object): "Get a task by label" return self.tasks[label] + def __contains__(self, label): + return label in self.tasks + def __iter__(self): "Iterate over tasks in undefined order" return self.tasks.itervalues() diff --git a/taskcluster/taskgraph/test/python.ini b/taskcluster/taskgraph/test/python.ini index dba67c507817..4482f8846f42 100644 --- a/taskcluster/taskgraph/test/python.ini +++ b/taskcluster/taskgraph/test/python.ini @@ -2,6 +2,7 @@ subsuite = taskgraph skip-if = python == 3 +[test_actions_util.py] [test_create.py] [test_cron_util.py] [test_decision.py] @@ -21,6 +22,7 @@ skip-if = python == 3 [test_util_python_path.py] [test_util_runnable_jobs.py] [test_util_schema.py] +[test_util_taskcluster.py] [test_util_templates.py] [test_util_time.py] [test_util_treeherder.py] diff --git a/taskcluster/taskgraph/test/test_actions_util.py b/taskcluster/taskgraph/test/test_actions_util.py new file mode 100644 index 000000000000..0fbdda9fb7c6 --- /dev/null +++ b/taskcluster/taskgraph/test/test_actions_util.py @@ -0,0 +1,46 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function, unicode_literals + +import unittest +from mozunit import main +from taskgraph.actions.util import ( + relativize_datestamps +) + +TASK_DEF = { + 'created': '2017-10-10T18:33:03.460Z', + # note that this is not an even number of seconds off! + 'deadline': '2017-10-11T18:33:03.461Z', + 'dependencies': [], + 'expires': '2018-10-10T18:33:04.461Z', + 'payload': { + 'artifacts': { + 'public': { + 'expires': '2018-10-10T18:33:03.463Z', + 'path': '/builds/worker/artifacts', + 'type': 'directory', + }, + }, + 'maxRunTime': 1800, + }, +} + + +class TestRelativize(unittest.TestCase): + + def test_relativize(self): + rel = relativize_datestamps(TASK_DEF) + import pprint + pprint.pprint(rel) + assert rel['created'] == {'relative-datestamp': '0 seconds'} + assert rel['deadline'] == {'relative-datestamp': '86400 seconds'} + assert rel['expires'] == {'relative-datestamp': '31536001 seconds'} + assert rel['payload']['artifacts']['public']['expires'] == \ + {'relative-datestamp': '31536000 seconds'} + + +if __name__ == '__main__': + main() diff --git a/taskcluster/taskgraph/test/test_taskgraph.py b/taskcluster/taskgraph/test/test_taskgraph.py index 21ea24d77e3f..3f461ab34bd5 100644 --- a/taskcluster/taskgraph/test/test_taskgraph.py +++ b/taskcluster/taskgraph/test/test_taskgraph.py @@ -74,6 +74,27 @@ class TestTaskGraph(unittest.TestCase): tasks, new_graph = TaskGraph.from_json(graph.to_json()) self.assertEqual(graph, new_graph) + simple_graph = TaskGraph(tasks={ + 'a': Task( + kind='fancy', + label='a', + attributes={}, + dependencies={'prereq': 'b'}, # must match edges, below + optimization={'seta': None}, + task={'task': 'def'}), + 'b': Task( + kind='pre', + label='b', + attributes={}, + dependencies={}, + optimization={'seta': None}, + task={'task': 'def2'}), + }, graph=Graph(nodes={'a', 'b'}, edges={('a', 'b', 'prereq')})) + + def test_contains(self): + assert 'a' in self.simple_graph + assert 'c' not in self.simple_graph + if __name__ == '__main__': main() diff --git a/taskcluster/taskgraph/test/test_util_taskcluster.py b/taskcluster/taskgraph/test/test_util_taskcluster.py new file mode 100644 index 000000000000..0d355a5d8fdc --- /dev/null +++ b/taskcluster/taskgraph/test/test_util_taskcluster.py @@ -0,0 +1,24 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +from __future__ import absolute_import, print_function, unicode_literals + +import datetime +import unittest + +import mozunit +from taskgraph.util.taskcluster import ( + parse_time +) + + +class TestTCUtils(unittest.TestCase): + + def test_parse_time(self): + exp = datetime.datetime(2018, 10, 10, 18, 33, 3, 463000) + assert parse_time('2018-10-10T18:33:03.463Z') == exp + + +if __name__ == '__main__': + mozunit.main() diff --git a/taskcluster/taskgraph/util/taskcluster.py b/taskcluster/taskgraph/util/taskcluster.py index aeed2afc6c72..c4f2b28fee32 100644 --- a/taskcluster/taskgraph/util/taskcluster.py +++ b/taskcluster/taskgraph/util/taskcluster.py @@ -17,10 +17,10 @@ from requests.adapters import HTTPAdapter from taskgraph.task import Task _PUBLIC_TC_ARTIFACT_LOCATION = \ - 'https://queue.taskcluster.net/v1/task/{task_id}/artifacts/{artifact_prefix}/{postfix}' + 'https://queue.taskcluster.net/v1/task/{task_id}/artifacts/{artifact_prefix}/{postfix}' _PRIVATE_TC_ARTIFACT_LOCATION = \ - 'http://taskcluster/queue/v1/task/{task_id}/artifacts/{artifact_prefix}/{postfix}' + 'http://taskcluster/queue/v1/task/{task_id}/artifacts/{artifact_prefix}/{postfix}' logger = logging.getLogger(__name__) @@ -151,10 +151,15 @@ def list_tasks(index_path, use_proxy=False): # all of these tasks should be created with the same expires time so they end up in # order from earliest to latest action. If more correctness is needed, consider # fetching each task and sorting on the created date. - results.sort(key=lambda t: datetime.datetime.strptime(t['expires'], '%Y-%m-%dT%H:%M:%S.%fZ')) + results.sort(key=lambda t: parse_time(t['expires'])) return [t['taskId'] for t in results] +def parse_time(timestamp): + """Turn a "JSON timestamp" as used in TC APIs into a datetime""" + return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ') + + def get_task_url(task_id, use_proxy=False): if use_proxy: TASK_URL = 'http://taskcluster/queue/v1/task/{}' diff --git a/testing/geckodriver/CHANGES.md b/testing/geckodriver/CHANGES.md index 619b6df98b4b..cf2665e3cb12 100644 --- a/testing/geckodriver/CHANGES.md +++ b/testing/geckodriver/CHANGES.md @@ -12,6 +12,14 @@ Unreleased - Support for WebDriver web element, web frame, and web window identifiers from Firefox. +### Changed + +- The HTTP status codes used for [`ScriptTimeout`] and [`Timeout`] + errors has changed from Request Timeout (408) to Internal Server + Error (500) in order to not break HTTP/1.1 `Keep-Alive` support, + as HTTP clients interpret the old status code to mean they should + duplicate the request. + 0.21.0 (2018-06-15) ------------------- diff --git a/testing/geckodriver/Cargo.toml b/testing/geckodriver/Cargo.toml index 3ac2678bbc08..1ba280300354 100644 --- a/testing/geckodriver/Cargo.toml +++ b/testing/geckodriver/Cargo.toml @@ -12,7 +12,7 @@ publish = false base64 = "0.6" chrono = "^0.2" clap = { version = "^2.19", default-features = false, features = ["suggestions", "wrap_help"] } -hyper = "0.10" +hyper = "0.12" lazy_static = "1.0" log = { version = "0.4", features = ["std"] } mozprofile = { path = "../mozbase/rust/mozprofile" } diff --git a/testing/geckodriver/src/marionette.rs b/testing/geckodriver/src/marionette.rs index c85bc4553a2b..ab9bde2c11ce 100644 --- a/testing/geckodriver/src/marionette.rs +++ b/testing/geckodriver/src/marionette.rs @@ -1,5 +1,5 @@ use base64; -use hyper::method::Method; +use hyper::Method; use mozprofile::preferences::Pref; use mozprofile::profile::Profile; use mozrunner::runner::{FirefoxProcess, FirefoxRunner, Runner, RunnerProcess}; @@ -67,32 +67,32 @@ const LEGACY_ELEMENT_KEY: &'static str = "ELEMENT"; pub fn extension_routes() -> Vec<(Method, &'static str, GeckoExtensionRoute)> { return vec![ ( - Method::Get, + Method::GET, "/session/{sessionId}/moz/context", GeckoExtensionRoute::GetContext, ), ( - Method::Post, + Method::POST, "/session/{sessionId}/moz/context", GeckoExtensionRoute::SetContext, ), ( - Method::Post, + Method::POST, "/session/{sessionId}/moz/xbl/{elementId}/anonymous_children", GeckoExtensionRoute::XblAnonymousChildren, ), ( - Method::Post, + Method::POST, "/session/{sessionId}/moz/xbl/{elementId}/anonymous_by_attribute", GeckoExtensionRoute::XblAnonymousByAttribute, ), ( - Method::Post, + Method::POST, "/session/{sessionId}/moz/addon/install", GeckoExtensionRoute::InstallAddon, ), ( - Method::Post, + Method::POST, "/session/{sessionId}/moz/addon/uninstall", GeckoExtensionRoute::UninstallAddon, ), diff --git a/testing/talos/talos/results.py b/testing/talos/talos/results.py index 88d198c6d670..c5b649e3a78a 100755 --- a/testing/talos/talos/results.py +++ b/testing/talos/talos/results.py @@ -415,13 +415,17 @@ class BrowserLogResults(object): def xperf(self, counter_results): """record xperf counters in counter_results dictionary""" + session_store_counter = 'time_to_session_store_window_restored_ms' + counters = ['main_startup_fileio', 'main_startup_netio', 'main_normal_fileio', 'main_normal_netio', 'nonmain_startup_fileio', 'nonmain_normal_fileio', - 'nonmain_normal_netio'] + 'nonmain_normal_netio', + session_store_counter, + ] mainthread_counter_keys = ['readcount', 'readbytes', 'writecount', 'writebytes'] @@ -491,6 +495,15 @@ class BrowserLogResults(object): .append([int(values[mainthread_counter_keys[i]]), values['filename']]) + if session_store_counter in counter_results.keys(): + filename = 'etl_output_session_restore_stats.csv' + # This file is a csv but it only contains one field, so we'll just + # obtain the value by converting the second line in the file. + with open(filename, 'r') as contents: + lines = contents.read().splitlines() + value = float(lines[1].strip()) + counter_results.setdefault(session_store_counter, []).append(value) + def mainthread_io(self, counter_results): """record mainthread IO counters in counter_results dictionary""" diff --git a/testing/talos/talos/test.py b/testing/talos/talos/test.py index 0c1bb5c434cf..70afa73c77e0 100644 --- a/testing/talos/talos/test.py +++ b/testing/talos/talos/test.py @@ -512,7 +512,9 @@ class tp5n(PageloaderTest): 'nonmain_startup_fileio', 'nonmain_normal_fileio', 'nonmain_normal_netio', 'mainthread_readcount', 'mainthread_readbytes', 'mainthread_writecount', - 'mainthread_writebytes'] + 'mainthread_writebytes', + 'time_to_session_store_window_restored_ms', + ] xperf_providers = ['PROC_THREAD', 'LOADER', 'HARD_FAULTS', 'FILENAME', 'FILE_IO', 'FILE_IO_INIT'] xperf_user_providers = ['Mozilla Generic Provider', diff --git a/testing/talos/talos/xtalos/etlparser.py b/testing/talos/talos/xtalos/etlparser.py index 34682c1139fd..b63b19d725b8 100644 --- a/testing/talos/talos/xtalos/etlparser.py +++ b/testing/talos/talos/xtalos/etlparser.py @@ -13,7 +13,6 @@ import shutil import subprocess import sys -import mozfile import xtalos EVENTNAME_INDEX = 0 @@ -334,8 +333,6 @@ def etlparser(xperf_path, etl_filename, processID, approot=None, if debug: uploadFile(csvname) - else: - mozfile.remove(csvname) output = "thread, stage, counter, value\n" for cntr in sorted(io.iterkeys()): diff --git a/testing/talos/talos/xtalos/parse_xperf.py b/testing/talos/talos/xtalos/parse_xperf.py index 8937ece03cb6..05ed39465fd5 100644 --- a/testing/talos/talos/xtalos/parse_xperf.py +++ b/testing/talos/talos/xtalos/parse_xperf.py @@ -10,7 +10,39 @@ import subprocess import sys import etlparser +import mozfile import xtalos +from xperf_analyzer import (ProcessStart, SessionStoreWindowRestored, + XPerfAttribute, XPerfFile, XPerfInterval) + + +def run_session_restore_analysis(debug=False, **kwargs): + required = ('csv_filename', 'outputFile') + for r in required: + if r not in kwargs: + raise xtalos.XTalosError('%s required' % r) + + final_output_file = "%s_session_restore_stats%s" % os.path.splitext( + kwargs['outputFile']) + + output = 'time_to_session_store_window_restored_ms\n' + + with XPerfFile(csvfile=kwargs['csv_filename'], debug=debug) as xperf: + fx_start = ProcessStart('firefox.exe') + ss_window_restored = SessionStoreWindowRestored() + + interval = XPerfInterval(fx_start, ss_window_restored) + xperf.add_attr(interval) + + xperf.analyze() + + output += "%.3f\n" % (interval.get_results()[XPerfAttribute.RESULT]) + + with open(final_output_file, 'w') as out: + out.write(output) + + if debug: + etlparser.uploadFile(final_output_file) def stop(xperf_path, debug=False): @@ -57,6 +89,12 @@ def stop_from_config(config_file=None, debug=False, **kwargs): error_filename=kwargs['error_filename'], processID=kwargs['processID']) + csv_base = '%s.csv' % kwargs['etl_filename'] + run_session_restore_analysis(csv_filename=csv_base, debug=debug, **kwargs) + + if not debug: + mozfile.remove(csv_base) + def main(args=sys.argv[1:]): @@ -64,7 +102,7 @@ def main(args=sys.argv[1:]): parser = xtalos.XtalosOptions() args = parser.parse_args(args) - # start xperf + # stop xperf try: stop_from_config(config_file=args.configFile, debug=args.debug_level >= xtalos.DEBUG_INFO, diff --git a/testing/web-platform/meta/MANIFEST.json b/testing/web-platform/meta/MANIFEST.json index 1a471895ac37..a1a7377f1fc1 100644 --- a/testing/web-platform/meta/MANIFEST.json +++ b/testing/web-platform/meta/MANIFEST.json @@ -168511,6 +168511,18 @@ {} ] ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002.xhtml": [ + [ + "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002.xhtml", + [ + [ + "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002-ref.xhtml", + "==" + ] + ], + {} + ] + ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001a.xhtml": [ [ "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001a.xhtml", @@ -168535,6 +168547,18 @@ {} ] ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002.xhtml": [ + [ + "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002.xhtml", + [ + [ + "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002-ref.xhtml", + "==" + ] + ], + {} + ] + ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-wmvert-001.xhtml": [ [ "/css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-wmvert-001.xhtml", @@ -183463,12 +183487,12 @@ {} ] ], - "html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html": [ + "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html": [ [ - "/html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html", + "/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html", [ [ - "/html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html", + "/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html", "==" ] ], @@ -195399,6 +195423,21 @@ {} ] ], + "async-local-storage/helpers/als-tests.js": [ + [ + {} + ] + ], + "async-local-storage/helpers/class-assert.js": [ + [ + {} + ] + ], + "async-local-storage/helpers/equality-asserters.js": [ + [ + {} + ] + ], "audio-output/META.yml": [ [ {} @@ -241704,7 +241743,7 @@ {} ] ], - "css/compositing/support/parsing-testcommon.js": [ + "css/compositing/parsing/support/parsing-testcommon.js": [ [ {} ] @@ -242389,6 +242428,11 @@ {} ] ], + "css/css-backgrounds/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-backgrounds/reference/60x60-green-background.html": [ [ {} @@ -242884,11 +242928,6 @@ {} ] ], - "css/css-backgrounds/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-backgrounds/support/pattern-grg-rgr-grg.png": [ [ {} @@ -243059,7 +243098,7 @@ {} ] ], - "css/css-box/support/parsing-testcommon.js": [ + "css/css-box/parsing/support/parsing-testcommon.js": [ [ {} ] @@ -243089,12 +243128,12 @@ {} ] ], - "css/css-cascade/reference/ref-filled-green-100px-square.xht": [ + "css/css-cascade/parsing/support/parsing-testcommon.js": [ [ {} ] ], - "css/css-cascade/support/parsing-testcommon.js": [ + "css/css-cascade/reference/ref-filled-green-100px-square.xht": [ [ {} ] @@ -243159,12 +243198,12 @@ {} ] ], - "css/css-color/rebeccapurple-ref.html": [ + "css/css-color/parsing/support/parsing-testcommon.js": [ [ {} ] ], - "css/css-color/support/parsing-testcommon.js": [ + "css/css-color/rebeccapurple-ref.html": [ [ {} ] @@ -254219,6 +254258,11 @@ {} ] ], + "css/css-images/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-images/support/1x1-green.gif": [ [ {} @@ -254314,11 +254358,6 @@ {} ] ], - "css/css-images/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-images/support/pattern-grg-rgr-grg.png": [ [ {} @@ -254904,7 +254943,7 @@ {} ] ], - "css/css-masking/support/parsing-testcommon.js": [ + "css/css-masking/parsing/support/parsing-testcommon.js": [ [ {} ] @@ -257489,6 +257528,11 @@ {} ] ], + "css/css-shapes/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-shapes/shape-outside/formatting-context/reference/shape-outside-formatting-context-ref.html": [ [ {} @@ -257874,11 +257918,6 @@ {} ] ], - "css/css-shapes/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-shapes/support/pattern-grg-rgr-grg.png": [ [ {} @@ -260819,6 +260858,11 @@ {} ] ], + "css/css-transforms/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-transforms/patternTransform/reference/svg-patternTransform-combination-ref.html": [ [ {} @@ -261189,11 +261233,6 @@ {} ] ], - "css/css-transforms/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-transforms/support/pattern-grg-rgr-grg.png": [ [ {} @@ -262404,6 +262443,11 @@ {} ] ], + "css/css-ui/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-ui/reference/box-sizing-001-ref.html": [ [ {} @@ -263684,11 +263728,6 @@ {} ] ], - "css/css-ui/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-ui/support/r1-1.svg": [ [ {} @@ -264589,6 +264628,11 @@ {} ] ], + "css/css-writing-modes/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/css-writing-modes/reference/available-size-001-ref.html": [ [ {} @@ -266169,11 +266213,6 @@ {} ] ], - "css/css-writing-modes/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/css-writing-modes/support/pass-cdts-abs-pos-non-replaced.png": [ [ {} @@ -267484,6 +267523,11 @@ {} ] ], + "css/filter-effects/parsing/support/parsing-testcommon.js": [ + [ + {} + ] + ], "css/filter-effects/reference/filters-opacity-001-ref.html": [ [ {} @@ -267589,11 +267633,6 @@ {} ] ], - "css/filter-effects/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/filter-effects/support/pattern-grg-rgr-grg.png": [ [ {} @@ -267804,7 +267843,7 @@ {} ] ], - "css/motion/support/parsing-testcommon.js": [ + "css/motion/parsing/support/parsing-testcommon.js": [ [ {} ] @@ -269804,11 +269843,6 @@ {} ] ], - "css/support/parsing-testcommon.js": [ - [ - {} - ] - ], "css/support/pattern-grg-rgr-grg.png": [ [ {} @@ -271339,11 +271373,21 @@ {} ] ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002-ref.xhtml": [ + [ + {} + ] + ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001-ref.xhtml": [ [ {} ] ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002-ref.xhtml": [ + [ + {} + ] + ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-wmvert-001-ref.xhtml": [ [ {} @@ -274579,6 +274623,11 @@ {} ] ], + "docs/_appendix/reverting.md": [ + [ + {} + ] + ], "docs/_appendix/test-templates.md": [ [ {} @@ -285564,7 +285613,7 @@ {} ] ], - "html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html": [ + "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html": [ [ {} ] @@ -288819,6 +288868,11 @@ {} ] ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/global-variables-frame.html": [ + [ + {} + ] + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/set-document-domain.html": [ [ {} @@ -291634,6 +291688,66 @@ {} ] ], + "network-error-logging/META.yml": [ + [ + {} + ] + ], + "network-error-logging/README.md": [ + [ + {} + ] + ], + "network-error-logging/support/clear-policy-pass.png": [ + [ + {} + ] + ], + "network-error-logging/support/clear-policy-pass.png.sub.headers": [ + [ + {} + ] + ], + "network-error-logging/support/lock.py": [ + [ + {} + ] + ], + "network-error-logging/support/nel.sub.js": [ + [ + {} + ] + ], + "network-error-logging/support/no-policy-pass.png": [ + [ + {} + ] + ], + "network-error-logging/support/pass.png": [ + [ + {} + ] + ], + "network-error-logging/support/pass.png.sub.headers": [ + [ + {} + ] + ], + "network-error-logging/support/report.py": [ + [ + {} + ] + ], + "network-error-logging/support/subdomains-pass.png": [ + [ + {} + ] + ], + "network-error-logging/support/subdomains-pass.png.sub.headers": [ + [ + {} + ] + ], "notifications/META.yml": [ [ {} @@ -298604,6 +298718,11 @@ {} ] ], + "service-workers/cache-storage/resources/vary.py": [ + [ + {} + ] + ], "service-workers/cache-storage/script-tests/cache-abort.js": [ [ {} @@ -322955,9 +323074,39 @@ {} ] ], - "async-local-storage/storage-smoke-test.https.tentative.html": [ + "async-local-storage/api-surface.tentative.https.html": [ [ - "/async-local-storage/storage-smoke-test.https.tentative.html", + "/async-local-storage/api-surface.tentative.https.html", + {} + ] + ], + "async-local-storage/key-types.tentative.https.html": [ + [ + "/async-local-storage/key-types.tentative.https.html", + {} + ] + ], + "async-local-storage/non-secure-context-dynamic-import.tentative.html": [ + [ + "/async-local-storage/non-secure-context-dynamic-import.tentative.html", + {} + ] + ], + "async-local-storage/non-secure-context-import-statement.tentative.html": [ + [ + "/async-local-storage/non-secure-context-import-statement.tentative.html", + {} + ] + ], + "async-local-storage/non-secure-context-script-element.tentative.html": [ + [ + "/async-local-storage/non-secure-context-script-element.tentative.html", + {} + ] + ], + "async-local-storage/storage-smoke-test.tentative.https.html": [ + [ + "/async-local-storage/storage-smoke-test.tentative.https.html", {} ] ], @@ -328415,12 +328564,6 @@ {} ] ], - "css/css-backgrounds/box-shadow-syntax-001.html": [ - [ - "/css/css-backgrounds/box-shadow-syntax-001.html", - {} - ] - ], "css/css-backgrounds/parsing/background-attachment-invalid.html": [ [ "/css/css-backgrounds/parsing/background-attachment-invalid.html", @@ -341944,6 +342087,10 @@ "/encoding/idlharness.any.html", {} ], + [ + "/encoding/idlharness.any.serviceworker.html", + {} + ], [ "/encoding/idlharness.any.sharedworker.html", {} @@ -341951,10 +342098,6 @@ [ "/encoding/idlharness.any.worker.html", {} - ], - [ - "/encoding/idlharness.https.any.serviceworker.html", - {} ] ], "encoding/iso-2022-jp-decoder.any.js": [ @@ -350726,6 +350869,10 @@ "/fetch/api/abort/general.any.html", {} ], + [ + "/fetch/api/abort/general.any.serviceworker.html", + {} + ], [ "/fetch/api/abort/general.any.sharedworker.html", {} @@ -350733,10 +350880,6 @@ [ "/fetch/api/abort/general.any.worker.html", {} - ], - [ - "/fetch/api/abort/general.https.any.serviceworker.html", - {} ] ], "fetch/api/abort/keepalive.html": [ @@ -352484,6 +352627,10 @@ "/hr-time/idlharness.any.html", {} ], + [ + "/hr-time/idlharness.any.serviceworker.html", + {} + ], [ "/hr-time/idlharness.any.sharedworker.html", {} @@ -352491,10 +352638,6 @@ [ "/hr-time/idlharness.any.worker.html", {} - ], - [ - "/hr-time/idlharness.https.any.serviceworker.html", - {} ] ], "hr-time/monotonic-clock.any.js": [ @@ -355753,6 +355896,30 @@ {} ] ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html": [ + [ + "/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html", + {} + ] + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html": [ + [ + "/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html", + {} + ] + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html": [ + [ + "/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html", + {} + ] + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html": [ + [ + "/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html", + {} + ] + ], "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-block-formatting-context.html": [ [ "/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-block-formatting-context.html", @@ -355765,6 +355932,12 @@ {} ] ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html": [ + [ + "/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html", + {} + ] + ], "html/rendering/non-replaced-elements/the-hr-element-0/hr.html": [ [ "/html/rendering/non-replaced-elements/the-hr-element-0/hr.html", @@ -364721,12 +364894,6 @@ {} ] ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html": [ - [ - "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html", - {} - ] - ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/002.html": [ [ "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/002.html", @@ -364751,18 +364918,6 @@ {} ] ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html": [ - [ - "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html", - {} - ] - ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html": [ - [ - "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html", - {} - ] - ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/010.html": [ [ "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/010.html", @@ -364835,6 +364990,12 @@ {} ] ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.js": [ + [ + "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.html", + {} + ] + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/custom-element.window.js": [ [ "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/custom-element.window.html", @@ -364883,6 +365044,12 @@ {} ] ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js": [ + [ + "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.html", + {} + ] + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/origin-check-in-document-open-basic.html": [ [ "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/origin-check-in-document-open-basic.html", @@ -364907,11 +365074,21 @@ {} ] ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.js": [ + [ + "/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.html", + {} + ] + ], "html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js": [ [ "/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.html", {} ], + [ + "/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.serviceworker.html", + {} + ], [ "/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.sharedworker.html", {} @@ -364919,10 +365096,6 @@ [ "/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.worker.html", {} - ], - [ - "/html/webappapis/microtask-queuing/queue-microtask-exceptions.https.any.serviceworker.html", - {} ] ], "html/webappapis/microtask-queuing/queue-microtask.any.js": [ @@ -364930,6 +365103,10 @@ "/html/webappapis/microtask-queuing/queue-microtask.any.html", {} ], + [ + "/html/webappapis/microtask-queuing/queue-microtask.any.serviceworker.html", + {} + ], [ "/html/webappapis/microtask-queuing/queue-microtask.any.sharedworker.html", {} @@ -364937,10 +365114,6 @@ [ "/html/webappapis/microtask-queuing/queue-microtask.any.worker.html", {} - ], - [ - "/html/webappapis/microtask-queuing/queue-microtask.https.any.serviceworker.html", - {} ] ], "html/webappapis/microtask-queuing/queue-microtask.window.js": [ @@ -368767,6 +368940,54 @@ {} ] ], + "network-error-logging/no-report-on-failed-cors-preflight.https.html": [ + [ + "/network-error-logging/no-report-on-failed-cors-preflight.https.html", + {} + ] + ], + "network-error-logging/no-report-on-subdomain-404.https.html": [ + [ + "/network-error-logging/no-report-on-subdomain-404.https.html", + {} + ] + ], + "network-error-logging/no-report-on-subdomain-success.https.html": [ + [ + "/network-error-logging/no-report-on-subdomain-success.https.html", + {} + ] + ], + "network-error-logging/reports-are-not-observable.https.html": [ + [ + "/network-error-logging/reports-are-not-observable.https.html", + {} + ] + ], + "network-error-logging/sends-report-on-404.https.html": [ + [ + "/network-error-logging/sends-report-on-404.https.html", + {} + ] + ], + "network-error-logging/sends-report-on-subdomain-dns-failure.https.html": [ + [ + "/network-error-logging/sends-report-on-subdomain-dns-failure.https.html", + {} + ] + ], + "network-error-logging/sends-report-on-success-with-subdomain-policy.https.html": [ + [ + "/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html", + {} + ] + ], + "network-error-logging/sends-report-on-success.https.html": [ + [ + "/network-error-logging/sends-report-on-success.https.html", + {} + ] + ], "notifications/constructor-basic.html": [ [ "/notifications/constructor-basic.html", @@ -388634,6 +388855,10 @@ "/secure-contexts/idlharness.any.html", {} ], + [ + "/secure-contexts/idlharness.any.serviceworker.html", + {} + ], [ "/secure-contexts/idlharness.any.sharedworker.html", {} @@ -388641,10 +388866,6 @@ [ "/secure-contexts/idlharness.any.worker.html", {} - ], - [ - "/secure-contexts/idlharness.https.any.serviceworker.html", - {} ] ], "secure-contexts/shared-worker-insecure-first.https.html": [ @@ -393003,15 +393224,27 @@ {} ] ], - "trusted-types/DOMParser-requiresTrustedTypes.tentative.html": [ + "trusted-types/DOMParser-parseFromString.tentative.html": [ [ - "/trusted-types/DOMParser-requiresTrustedTypes.tentative.html", + "/trusted-types/DOMParser-parseFromString.tentative.html", {} ] ], - "trusted-types/DOMParser.tentative.html": [ + "trusted-types/Document-write.tentative.html": [ [ - "/trusted-types/DOMParser.tentative.html", + "/trusted-types/Document-write.tentative.html", + {} + ] + ], + "trusted-types/Element-insertAdjacentHTML.tentative.html": [ + [ + "/trusted-types/Element-insertAdjacentHTML.tentative.html", + {} + ] + ], + "trusted-types/Element-outerHTML.tentative.html": [ + [ + "/trusted-types/Element-outerHTML.tentative.html", {} ] ], @@ -393021,15 +393254,27 @@ {} ] ], - "trusted-types/TrustedHTML.tentative.html": [ + "trusted-types/Location-assign.tentative.html": [ [ - "/trusted-types/TrustedHTML.tentative.html", + "/trusted-types/Location-assign.tentative.html", {} ] ], - "trusted-types/TrustedScriptURL.tentative.html": [ + "trusted-types/Location-href.tentative.html": [ [ - "/trusted-types/TrustedScriptURL.tentative.html", + "/trusted-types/Location-href.tentative.html", + {} + ] + ], + "trusted-types/Location-replace.tentative.html": [ + [ + "/trusted-types/Location-replace.tentative.html", + {} + ] + ], + "trusted-types/Range-createContextualFragment.tentative.html": [ + [ + "/trusted-types/Range-createContextualFragment.tentative.html", {} ] ], @@ -393039,9 +393284,9 @@ {} ] ], - "trusted-types/TrustedURL.tentative.html": [ + "trusted-types/Window-open.tentative.html": [ [ - "/trusted-types/TrustedURL.tentative.html", + "/trusted-types/Window-open.tentative.html", {} ] ], @@ -393051,123 +393296,63 @@ {} ] ], + "trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html": [ + [ + "/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html", + {} + ] + ], + "trusted-types/block-string-assignment-to-Document-write.tentative.html": [ + [ + "/trusted-types/block-string-assignment-to-Document-write.tentative.html", + {} + ] + ], + "trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html": [ + [ + "/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html", + {} + ] + ], + "trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html": [ + [ + "/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html", + {} + ] + ], "trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html": [ [ "/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html", {} ] ], - "trusted-types/block-string-assignment-to-createContextualFragment.tentative.html": [ + "trusted-types/block-string-assignment-to-Location-assign.tentative.html": [ [ - "/trusted-types/block-string-assignment-to-createContextualFragment.tentative.html", + "/trusted-types/block-string-assignment-to-Location-assign.tentative.html", {} ] ], - "trusted-types/block-string-assignment-to-innerHTML.tentative.html": [ + "trusted-types/block-string-assignment-to-Location-href.tentative.html": [ [ - "/trusted-types/block-string-assignment-to-innerHTML.tentative.html", + "/trusted-types/block-string-assignment-to-Location-href.tentative.html", {} ] ], - "trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html": [ + "trusted-types/block-string-assignment-to-Location-replace.tentative.html": [ [ - "/trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html", + "/trusted-types/block-string-assignment-to-Location-replace.tentative.html", {} ] ], - "trusted-types/block-string-assignment-to-location-assign.tentative.html": [ + "trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html": [ [ - "/trusted-types/block-string-assignment-to-location-assign.tentative.html", + "/trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html", {} ] ], - "trusted-types/block-string-assignment-to-location-href.tentative.html": [ + "trusted-types/block-string-assignment-to-Window-open.tentative.html": [ [ - "/trusted-types/block-string-assignment-to-location-href.tentative.html", - {} - ] - ], - "trusted-types/block-string-assignment-to-location-replace.tentative.html": [ - [ - "/trusted-types/block-string-assignment-to-location-replace.tentative.html", - {} - ] - ], - "trusted-types/block-string-assignment-to-outerHTML.tentative.html": [ - [ - "/trusted-types/block-string-assignment-to-outerHTML.tentative.html", - {} - ] - ], - "trusted-types/block-string-assignment-to-window-open.tentative.html": [ - [ - "/trusted-types/block-string-assignment-to-window-open.tentative.html", - {} - ] - ], - "trusted-types/createContextualFragment.tentative.html": [ - [ - "/trusted-types/createContextualFragment.tentative.html", - {} - ] - ], - "trusted-types/document-write.tentative.html": [ - [ - "/trusted-types/document-write.tentative.html", - {} - ] - ], - "trusted-types/innerHTML.tentative.html": [ - [ - "/trusted-types/innerHTML.tentative.html", - {} - ] - ], - "trusted-types/insertAdjacentHTML.tentative.html": [ - [ - "/trusted-types/insertAdjacentHTML.tentative.html", - {} - ] - ], - "trusted-types/location-assign.tentative.html": [ - [ - "/trusted-types/location-assign.tentative.html", - {} - ] - ], - "trusted-types/location-href.tentative.html": [ - [ - "/trusted-types/location-href.tentative.html", - {} - ] - ], - "trusted-types/location-replace.tentative.html": [ - [ - "/trusted-types/location-replace.tentative.html", - {} - ] - ], - "trusted-types/outerHTML.tentative.html": [ - [ - "/trusted-types/outerHTML.tentative.html", - {} - ] - ], - "trusted-types/srcDoc-requiresTrustedTypes.tentative.html": [ - [ - "/trusted-types/srcDoc-requiresTrustedTypes.tentative.html", - {} - ] - ], - "trusted-types/srcDoc.tentative.html": [ - [ - "/trusted-types/srcDoc.tentative.html", - {} - ] - ], - "trusted-types/window-open.tentative.html": [ - [ - "/trusted-types/window-open.tentative.html", + "/trusted-types/block-string-assignment-to-Window-open.tentative.html", {} ] ], @@ -397826,6 +398011,10 @@ "/websockets/basic-auth.any.html", {} ], + [ + "/websockets/basic-auth.any.serviceworker.html", + {} + ], [ "/websockets/basic-auth.any.sharedworker.html", {} @@ -397833,10 +398022,6 @@ [ "/websockets/basic-auth.any.worker.html", {} - ], - [ - "/websockets/basic-auth.https.any.serviceworker.html", - {} ] ], "websockets/binary/001.html": [ @@ -432596,8 +432781,40 @@ "1bbe9e5ac609aa33914ad79d4af7cb2fdf45b9c7", "support" ], - "async-local-storage/storage-smoke-test.https.tentative.html": [ - "b4d66dabc7a177742666377718b30f84a10de744", + "async-local-storage/api-surface.tentative.https.html": [ + "eea51abd539787f531cbf32e28737c135c63c8d7", + "testharness" + ], + "async-local-storage/helpers/als-tests.js": [ + "fd6d6844f72e39d16e070ec461b99ab62a0b4547", + "support" + ], + "async-local-storage/helpers/class-assert.js": [ + "31b25cab9f2d88d8df59a0b4ecb35eef3765e380", + "support" + ], + "async-local-storage/helpers/equality-asserters.js": [ + "ad4623c179d75c8d4ce8b1fa8503f943bf6a7c77", + "support" + ], + "async-local-storage/key-types.tentative.https.html": [ + "771ee2f9749a00ec4e33019512a9bf8d145a3ce6", + "testharness" + ], + "async-local-storage/non-secure-context-dynamic-import.tentative.html": [ + "9270f6c82fa2018d1d6c3a199cbe5f6ca2403b56", + "testharness" + ], + "async-local-storage/non-secure-context-import-statement.tentative.html": [ + "879729696dbb6a767530d77fbd94af0b42afe6b4", + "testharness" + ], + "async-local-storage/non-secure-context-script-element.tentative.html": [ + "feeddafc8daa02556eb0c5fe068dbde1d45642da", + "testharness" + ], + "async-local-storage/storage-smoke-test.tentative.https.html": [ + "f978480ff2b80ba5f892c2a2c429e882d655574d", "testharness" ], "audio-output/META.yml": [ @@ -432629,7 +432846,7 @@ "testharness" ], "background-fetch/fetch.https.window.js": [ - "843506947f7176761f0d47b9a7fa83ab187322b1", + "33c8124ffa4569c905e9243703cb245ef3f81ca3", "testharness" ], "background-fetch/get-ids.https.window.js": [ @@ -514665,30 +514882,30 @@ "support" ], "css/compositing/parsing/background-blend-mode-invalid.html": [ - "1983adad24de2404efb205a5c79dae3464f509d6", + "f939195f3e5cced1a5a71271b69aae00f60a451c", "testharness" ], "css/compositing/parsing/background-blend-mode-valid.html": [ - "eebe646bf8a269bae54d03948be2ac2c36052800", + "4f1cfb8dcafdeeed5f78e3768463238d4e7d449c", "testharness" ], "css/compositing/parsing/isolation-invalid.html": [ - "bb3a5db31c3dd0c5b44a85fd58dac4bbb33284a4", + "8278431ad8a9883d8279576e79e6cd5daa37e30a", "testharness" ], "css/compositing/parsing/isolation-valid.html": [ - "6fa4cc9bc76bab577203a3474916e3f37d18d348", + "430533858ebdc2f952876fc3658e5fe1bb7d6b30", "testharness" ], "css/compositing/parsing/mix-blend-mode-invalid.html": [ - "a7c1232c8a1999935ad40c2cd20ed7ef882ba0bc", + "c62ef1369dfedaba2305b6f12b8ea5a4c2aff0eb", "testharness" ], "css/compositing/parsing/mix-blend-mode-valid.html": [ - "2033f404941ea650623bf8dd56f848e911f7aa79", + "324961499742adfb8c4dc84b5e939f22377ad624", "testharness" ], - "css/compositing/support/parsing-testcommon.js": [ + "css/compositing/parsing/support/parsing-testcommon.js": [ "b075882f89aae49b419220b234534241cde5fd42", "support" ], @@ -518032,10 +518249,6 @@ "9f2135417278f6d0528eb3f66d255508a62571a9", "reftest" ], - "css/css-backgrounds/box-shadow-syntax-001.html": [ - "4b8f1869f57a16e883f9bb21ae9bbb2ff381fc1c", - "testharness" - ], "css/css-backgrounds/box-shadow/box-shadow-blur-definition-001.xht": [ "ab6116c17d5b4c44e3c378ddef04b6f8ed73514d", "visual" @@ -518141,173 +518354,177 @@ "visual" ], "css/css-backgrounds/parsing/background-attachment-invalid.html": [ - "0af7394aa42829d66051a0920e8ba62176c86159", + "68b18dcc2d06de654ca34042f67e5182a9ea1ff5", "testharness" ], "css/css-backgrounds/parsing/background-attachment-valid.html": [ - "9a0f240991087f89fcdf04d35d5bcafe697555d7", + "600f310b4628c9f1f83c89edc42bd96f084b5aff", "testharness" ], "css/css-backgrounds/parsing/background-clip-invalid.html": [ - "50b887e1c61c40a1b8c0d9e7a1a33898b30b433d", + "c2492a2a94a937dd0e526dd06065c4564605e0d9", "testharness" ], "css/css-backgrounds/parsing/background-clip-valid.html": [ - "2778315260afd65cd7a4af7f000ac4c2fe88e4e7", + "5350a17cbec0a698574b018feb6ef0c6a6164153", "testharness" ], "css/css-backgrounds/parsing/background-color-invalid.html": [ - "ff90dffb31114a5d610ed3627d45ae6bb28047af", + "d7c688d6b6a68aae309d5dd4fc63709adb76d901", "testharness" ], "css/css-backgrounds/parsing/background-color-valid.html": [ - "b84e13fb957ccf06a2bcbae472d01b8430d2223c", + "d7e864c29b9304907b46cb7e7f973f965726ff8f", "testharness" ], "css/css-backgrounds/parsing/background-image-invalid.html": [ - "12103e01dfa2e25afe03c17fcc48b2fd3ed1a080", + "43d7f67b3e9124bfcb53e4efd508f644c9f7874d", "testharness" ], "css/css-backgrounds/parsing/background-image-valid.html": [ - "601efb6898a91d7ed322ea3d1f70aff2ca2e17d4", + "ffbf33b6d6497d77f02eb5a4e14204ea4d2f642c", "testharness" ], "css/css-backgrounds/parsing/background-invalid.html": [ - "19f5d965b4832386f940caf261863d9fd66ed757", + "c3deef9bc90abc28e5f4c2fb72c0eea922c94399", "testharness" ], "css/css-backgrounds/parsing/background-origin-invalid.html": [ - "1ef2d9faa897e980b944ef9ba80b79dc3b7246cf", + "9eead06baebb98ab560b0012d9e8dbb9b1d74be5", "testharness" ], "css/css-backgrounds/parsing/background-origin-valid.html": [ - "bbd59d39790a5208543000a77f0898720372a1e6", + "1c19b96ec430c210ca74ed0686a43ebfa5f142f0", "testharness" ], "css/css-backgrounds/parsing/background-position-invalid.html": [ - "9a3e9cb0a619f9b6aba37f16a55c49da041018fd", + "583606e14d28ea1e2782e12e382d72be51a2467d", "testharness" ], "css/css-backgrounds/parsing/background-position-valid.html": [ - "371bc7a351281257b8db13df5750afc437517b7c", + "dbb97406a5a0492fee2f7fbe16e790736b717e20", "testharness" ], "css/css-backgrounds/parsing/background-repeat-invalid.html": [ - "55229a70cefface8ef53c02978f30fe484fb075a", + "3ae01662d08caa6695c607e882a9eb57c3257eac", "testharness" ], "css/css-backgrounds/parsing/background-repeat-valid.html": [ - "3a00b19c4c50a28c6db972de4bcce6b94e0abd92", + "b69eb5ff03001d0fa68cb52b0883122f11b349f9", "testharness" ], "css/css-backgrounds/parsing/background-size-invalid.html": [ - "7497f43df97b789fd3a9974df8043986e81ff145", + "cfb9a1c077cd282b63a160da6b0377b3e6587771", "testharness" ], "css/css-backgrounds/parsing/background-size-valid.html": [ - "71d60698dca7915e3a580ac3c6a9ad966caad389", + "224421177467c40821e34c537c166e724f0ec7a7", "testharness" ], "css/css-backgrounds/parsing/background-valid.html": [ - "39c4672fb35b103b4a96c92bdeab4ced7c550e86", + "96529e5604a60595ab01e5d86e6679397a534d9b", "testharness" ], "css/css-backgrounds/parsing/border-color-invalid.html": [ - "67248dd612219633456e2e091aa0fbd7cc72023c", + "3916c4f2ecbdc92f282628923f87f485ee5b838f", "testharness" ], "css/css-backgrounds/parsing/border-color-valid.html": [ - "c69f9622811f8d06758dadbdb313e4ab0be260a7", + "63532a99a4b0310c1cf5a4d260bc9876c0dcb6c7", "testharness" ], "css/css-backgrounds/parsing/border-image-invalid.html": [ - "87194a5477d3fa285bdc3e340659e43285107c76", + "08c61dfb5549b4fe0f702a222ec0e6810bab9145", "testharness" ], "css/css-backgrounds/parsing/border-image-outset-invalid.html": [ - "5e76198141c8baef70367e6d8713629e35fe6440", + "4faaa3ca0d977e2abaf57cd4fa394ddc1c7d6029", "testharness" ], "css/css-backgrounds/parsing/border-image-outset-valid.html": [ - "5ead2608d2577fe37e0a6b04513e4619ea659515", + "781a6e0a8abe8da4fb32ed4ff812e5f7c1daebe7", "testharness" ], "css/css-backgrounds/parsing/border-image-repeat-invalid.html": [ - "0a70aab2412982045a41905b45dc0d44f6cf64f6", + "962ab22fbf003d46b1f0747e2546410402f8c007", "testharness" ], "css/css-backgrounds/parsing/border-image-repeat-valid.html": [ - "e77ae48fd192d4b35374d47376083f80fed48619", + "381b059523555db30c3eaef03748e92812b0071c", "testharness" ], "css/css-backgrounds/parsing/border-image-slice-invalid.html": [ - "b2f55f14fbe64cec31a40943b2559ef1cc164159", + "4ccf441f75aa7c14fec8277644faf2db38d30eaf", "testharness" ], "css/css-backgrounds/parsing/border-image-slice-valid.html": [ - "66d747f7cd99f0b14a0ccd8fa9c9190aebcb9753", + "297319442383e83356902e512299cfeea4a1b22c", "testharness" ], "css/css-backgrounds/parsing/border-image-source-invalid.html": [ - "78d042ded419e744ff6b4e28e33501f3439f5e14", + "28edd7855df021804834a78cfd522e457268917e", "testharness" ], "css/css-backgrounds/parsing/border-image-source-valid.html": [ - "c032dcda878b9606ccbca1971b9bd369f82c859d", + "c080d395078e41871a050a9893a05bd788ad477a", "testharness" ], "css/css-backgrounds/parsing/border-image-valid.html": [ - "880dc87700b66e742f3b07a5683c31aa6510b4cb", + "8bf9cd51fcd3eda774c309c875f9fb72fce3bfc4", "testharness" ], "css/css-backgrounds/parsing/border-image-width-invalid.html": [ - "c7fba1c2ce32a94c7fe777ffdece604403721128", + "445972353775d44df07c5d15f1e56c5c1f26ea7f", "testharness" ], "css/css-backgrounds/parsing/border-image-width-valid.html": [ - "5917c7f6b9d16dbd557a3ac50ce3b5e186265cba", + "e1316c337087947407677e563c3c665b5afc9747", "testharness" ], "css/css-backgrounds/parsing/border-invalid.html": [ - "baaa66c37da811c5efc5cae2d7e2018561443e04", + "a3d28dfbac05749156734f8877119d408535e506", "testharness" ], "css/css-backgrounds/parsing/border-radius-invalid.html": [ - "28460aa598d019bcfd4057e5ee61e1699ea6943d", + "9d767a961ea67c2e01290b2eb6cc1436894e0579", "testharness" ], "css/css-backgrounds/parsing/border-radius-valid.html": [ - "96cdf2019da1f3fae94fe37566178f1aa9841942", + "40a9d9e8e4083675eaf8c0445ac2f79416adf117", "testharness" ], "css/css-backgrounds/parsing/border-style-invalid.html": [ - "ecc9d6b43a6d3c047a994009baae82b7b8dbc628", + "42bd61ad1e91905f765a75ab8213451113f9ba5c", "testharness" ], "css/css-backgrounds/parsing/border-style-valid.html": [ - "dc8311b1243abab6d23401dd606b51902a094239", + "10fa39bbf6cdd67ba7828db90e266753b09351c4", "testharness" ], "css/css-backgrounds/parsing/border-valid.html": [ - "e94938a3a487b05ff5c3eb56b5697e850afdea03", + "5ca8bbdc43ec9a38c1495e308495287338913ef2", "testharness" ], "css/css-backgrounds/parsing/border-width-invalid.html": [ - "85a3414d8132c3c5573201250146f857a898ca81", + "2edd05ea42b8b4b00a121d50b219da7d446297db", "testharness" ], "css/css-backgrounds/parsing/border-width-valid.html": [ - "00dc1c0fcff86382486e88e0383ae54d6597e93e", + "5a9b174c91531af56b5e42bda2b9df813629464e", "testharness" ], "css/css-backgrounds/parsing/box-shadow-invalid.html": [ - "1bcfc694d4c31056aee1af81bfc049f91dae5d92", + "a38ae2fce74292e537cdaed849c939df6921c060", "testharness" ], "css/css-backgrounds/parsing/box-shadow-valid.html": [ - "29bb263bb0dda17f58c11322e353f9975077755d", + "be72b78cbbe42898a8693706d57852fd71cdb4d0", "testharness" ], + "css/css-backgrounds/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-backgrounds/reference/60x60-green-background.html": [ "d19ed4ea2678a45bbe53838d6eebf61ab641bcdd", "support" @@ -518708,10 +518925,6 @@ "2ca46e53f3c3db20d4cd14d7284f9ba1c658bed9", "support" ], - "css/css-backgrounds/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-backgrounds/support/pattern-grg-rgr-grg.png": [ "6fcfeb4883edea810f880fabb861e09df7871695", "support" @@ -518897,105 +519110,105 @@ "support" ], "css/css-box/parsing/clear-invalid.html": [ - "a91e61f97e216b409a3e337b0546c40c8a4df9ce", + "636704330e3c36608d925d370a5e8f056031fbb7", "testharness" ], "css/css-box/parsing/clear-valid.html": [ - "db0b5aa094de27aedf2f6d7713125cce01eb2a82", + "a849f6c06de9e519c91e83b0fc4e1798cbb88ee9", "testharness" ], "css/css-box/parsing/float-invalid.html": [ - "0ff53be36ac8864603c70479f4e7e50810ad7606", + "8c3d28f578a62c18a9ae276a52cef2a52360170d", "testharness" ], "css/css-box/parsing/float-valid.html": [ - "aecbd99d2201a0774d6ca407168606eb4f81c5a5", + "9ff6ef43a22fd8a3c55fa2a5fdcac2b114c8bd42", "testharness" ], "css/css-box/parsing/height-invalid.html": [ - "acc595f063a23e9eee11262494dacc93ba4bc97d", + "f15bd27de808fbec41df51fe3c884437873ce22f", "testharness" ], "css/css-box/parsing/height-valid.html": [ - "38f76ea24abb0bfad60743ba7597020b274a0d7f", + "a89ec6b5502928fddd8eb62002489d61fbdc1b81", "testharness" ], "css/css-box/parsing/margin-invalid.html": [ - "9c21749c8cc831140d264b318a1a712df100df0b", + "917fc927e9dd8d21c3900ede47b8e2a789651527", "testharness" ], "css/css-box/parsing/margin-valid.html": [ - "fc14af256281e5f152fc4c3d5432bef8d7c4881f", + "1715a9c924e702768bae5f7b55b942bf6824d7e1", "testharness" ], "css/css-box/parsing/max-height-invalid.html": [ - "9b8e72dd86957e7e3055f1570de7212125959ea6", + "61c85d129102fd96e57970e165164944ad8e2102", "testharness" ], "css/css-box/parsing/max-height-valid.html": [ - "35ad18ab2666e3cb057bfc66f4bf41f0ac79625e", + "ca58cec3a9cdff2952f9770ae4df8f939c22a8a4", "testharness" ], "css/css-box/parsing/max-width-invalid.html": [ - "487d15e6ec614a84a96db8531e5fbd8eab137e27", + "57bcbbf168c88324ac2919f063701ffcb7a37a85", "testharness" ], "css/css-box/parsing/max-width-valid.html": [ - "95b2ae9f55432b2a03be9cb2810fb2a4eb1e5b77", + "e098b804c08bd86f228a0f3d3be4c02c111f17ed", "testharness" ], "css/css-box/parsing/min-height-invalid.html": [ - "a35ac3f8d05342a29a051aa80d3dbf87ed56c4cd", + "26a1e10a34c5e8a5f1c2df7a89b08b25341dc132", "testharness" ], "css/css-box/parsing/min-height-valid.html": [ - "1a2b838dcbc7732dc6120a33c5823f3b845566af", + "576a5c07989ef187008545000148747032f8d2fb", "testharness" ], "css/css-box/parsing/min-width-invalid.html": [ - "2a290901ed8ac6deb4aed56510a94ba62bc7831e", + "d71777fea6c2de47fc0f2243b4af1c21b15c6509", "testharness" ], "css/css-box/parsing/min-width-valid.html": [ - "ebe9ee70e85fe35b7c9a1cd7407057eb5b53b285", + "406452dd583f40532ce6f904785264d9a16ed366", "testharness" ], "css/css-box/parsing/overflow-invalid.html": [ - "da09e64e69ea07963d7f1c9fd1a5cc09c61b50ab", + "bf3ee3d72ba35abe941a6e298a43939107db0a10", "testharness" ], "css/css-box/parsing/overflow-valid.html": [ - "898d7607de99527b669e098348f1dd72aea44878", + "6bef585856c3d038e63f9d24a06b75d3b5f2a7db", "testharness" ], "css/css-box/parsing/padding-invalid.html": [ - "7a83d75accad2fa4cf0527fd3d5b82b10a0deacf", + "7e172efed30f45166747ccedd45d22bbf904ba6b", "testharness" ], "css/css-box/parsing/padding-valid.html": [ - "33522ba6dbb98bf63454dcc289958ade1473c8c0", + "e5a932e1417400947c07ab5150297da174c2c354", "testharness" ], + "css/css-box/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-box/parsing/visibility-invalid.html": [ - "07fe6ebe18acf36f4a7302cd08c0f1c36cd0b90f", + "892ca54815f67f029bb3c7c2aefb48dacd8a0851", "testharness" ], "css/css-box/parsing/visibility-valid.html": [ - "de1cb9b0c11e9cbaa7beedd77c1dfc39fb6d0c6a", + "2c1143dfa5c320a5270e47adc36b0a3ced979976", "testharness" ], "css/css-box/parsing/width-invalid.html": [ - "defd7d27912a0f1e4def24f4607ae6a598669b1d", + "bcbb3d663ae3190ca33ea7cef3b8176b3e50ea9b", "testharness" ], "css/css-box/parsing/width-valid.html": [ - "f4be6b2b132232a3d415515c887636bbbb12465f", + "653b11e993cf99c4196582440cb45bd144c4f38b", "testharness" ], - "css/css-box/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-break/META.yml": [ "bfd40518b4a6477632fa319f3e53fe22517d326d", "support" @@ -519049,13 +519262,17 @@ "reftest" ], "css/css-cascade/parsing/all-invalid.html": [ - "526f9fdf575736609b57a8a56f251223c8310c01", + "8b7c044a0341ad732640b7a827c9f24a9c1d4f5f", "testharness" ], "css/css-cascade/parsing/all-valid.html": [ - "42baa25d016b5b719f32dc759dc0962ddf600624", + "668a6c2424125c8d84de85ec6dee694e7cc4799f", "testharness" ], + "css/css-cascade/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-cascade/reference/ref-filled-green-100px-square.xht": [ "05a13794482a94f6c10bd9d4c98704e63ef60331", "support" @@ -519064,10 +519281,6 @@ "a346267c7bdb08b6b67ca68c6e821ea71ab1eefa", "reftest" ], - "css/css-cascade/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-cascade/unset-val-001.html": [ "c25b980a3b5734df4782ee5b6feb32d845f19e6d", "reftest" @@ -519317,21 +519530,25 @@ "reftest" ], "css/css-color/parsing/color-invalid.html": [ - "ec59e0fb8be770bc013c19c3746a434a83fa103d", + "fe44aad6fc6a3d9805f2f29ce84970468224e6d1", "testharness" ], "css/css-color/parsing/color-valid.html": [ - "ee5948efec207fd2d473097f6223f9c344628b9f", + "fb240182c31a2d3056aa68a52cdc04cdbbbe421a", "testharness" ], "css/css-color/parsing/opacity-invalid.html": [ - "7e64036eb7a1de16feb785c2490a4c13f3469689", + "d9b9cc86abb0de9c55336b3872a7a61d25f228b8", "testharness" ], "css/css-color/parsing/opacity-valid.html": [ - "bf22b092ca09b1a48eddd47b5c19b92801d2c1e8", + "8ae302e09f34c5593f58f352adab88fccc906b97", "testharness" ], + "css/css-color/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-color/rebeccapurple-ref.html": [ "8c15364f38e53cc1651f2f6c115c7b2fca3f4ba9", "support" @@ -519404,10 +519621,6 @@ "65eab55794c39e47d6afb5b71e5fea4b9b671b37", "reftest" ], - "css/css-color/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-color/t31-color-currentColor-b-ref.html": [ "3013c7050c3c6f057e295923d43c87da6c09751f", "support" @@ -536017,53 +536230,57 @@ "support" ], "css/css-images/parsing/gradient-position-invalid.html": [ - "f224263952666372d59d82a72d31cf8a6dd3450f", + "626b9f130724b028eaebc96c9b49a241042f78e6", "testharness" ], "css/css-images/parsing/gradient-position-valid.html": [ - "04ca328da01babeaf249ba3dff4e312900af39ac", + "382fc61636061bfc9ad0d3e357dac83c27c297f1", "testharness" ], "css/css-images/parsing/image-orientation-invalid.html": [ - "a55e335e64f1caa83f97a2eaf5df979147d86586", + "f2a53fe5f262852626e7aaac8f14ea5451ac9360", "testharness" ], "css/css-images/parsing/image-orientation-valid.html": [ - "c1e2015b302d8614fadca491cf4da47d596094e5", + "ea2ec1e699ab9587e496f44e235c1f73a57b0e25", "testharness" ], "css/css-images/parsing/image-rendering-invalid.html": [ - "1ce1a2327a6febadd1aaabb686174a70e90ab287", + "9c54c5d7f1598bfd35dd010298d78dc0c480309b", "testharness" ], "css/css-images/parsing/image-rendering-valid.html": [ - "1918a247313e9d39be7fdf5b4112271908fcd8cf", + "b3b73edfde919d7666fcd669d9c8b1db31c1624d", "testharness" ], "css/css-images/parsing/image-resolution-invalid.html": [ - "d0998cc2b7f78f69649b8a2c1d665464fe979de3", + "e45ed73e23cd814872afabdd43db4b081fe5e29d", "testharness" ], "css/css-images/parsing/image-resolution-valid.html": [ - "6ee676f429f842f4701c70a1296428ce0610a786", + "7345227fcafce45f5ca12c91523622349c07964a", "testharness" ], "css/css-images/parsing/object-fit-invalid.html": [ - "26841d4be7eccaeb658776d8f0d67dddb9751406", + "574573c8015f8fbc120779a9361169ded1ed21f1", "testharness" ], "css/css-images/parsing/object-fit-valid.html": [ - "50ea7254b0bbd2f12f8ce73152ca15d263cb8bf5", + "8b531abad20e69c905650f34ec4cd26417381e01", "testharness" ], "css/css-images/parsing/object-position-invalid.html": [ - "d722f1a1af94818e047713d30d71f19def9eaeae", + "346b79a841aa6016b78d427f18f402b771368640", "testharness" ], "css/css-images/parsing/object-position-valid.html": [ - "7121cc42021a1122a2b96d893469252fb5e4ce1b", + "b7be35ccd74f01a0b966426bc5876afd35a1248d", "testharness" ], + "css/css-images/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-images/support/1x1-green.gif": [ "e023d92c7ad04264d06196d47a5edd828a7f71db", "support" @@ -536140,10 +536357,6 @@ "833e6e36cdf316be9e4f54dc68732712afe11ba2", "support" ], - "css/css-images/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-images/support/pattern-grg-rgr-grg.png": [ "6fcfeb4883edea810f880fabb861e09df7871695", "support" @@ -537733,30 +537946,30 @@ "testharness" ], "css/css-masking/parsing/clip-invalid.html": [ - "919396375a2947be03001e21af72c4f09fc4ddab", + "18ae8b552a5904097a4b9f0f639b3d0ca123242b", "testharness" ], "css/css-masking/parsing/clip-path-invalid.html": [ - "881cbc08a64b6170a6ae7ce00ca3306cb51dada6", + "3d33de251f6c046a6cd15a6cb84416563e2dc293", "testharness" ], "css/css-masking/parsing/clip-path-valid.html": [ - "fef14c97d5c529e2e54b19b263dc13340f44a17d", + "1d55ed785533df06ca250a341022ffcfaaa2027c", "testharness" ], "css/css-masking/parsing/clip-rule-invalid.html": [ - "50694703ce46efa41305abfcad060970df64334c", + "3088d07af8e30788b3454c1923d50a2071ef768c", "testharness" ], "css/css-masking/parsing/clip-rule-valid.html": [ - "ca805e1fcecc3b6c4c4f42d3461b36f18c89c3e4", + "2fb19073a3e64e941d975f6a60305f97bbc8ab00", "testharness" ], "css/css-masking/parsing/clip-valid.html": [ - "95c31f7de06cf22f3d72f29f280278338d6e7536", + "a21e90178b708a52f397fd890db25477708fb16c", "testharness" ], - "css/css-masking/support/parsing-testcommon.js": [ + "css/css-masking/parsing/support/parsing-testcommon.js": [ "b075882f89aae49b419220b234534241cde5fd42", "support" ], @@ -542865,37 +543078,41 @@ "testharness" ], "css/css-shapes/parsing/shape-image-threshold-invalid.html": [ - "dcabbe6cce1dc627f28ffe55bef09dce0dfc7525", + "eb60a2e44289dba4075cc6ae61d6e0a94a520da9", "testharness" ], "css/css-shapes/parsing/shape-image-threshold-valid.html": [ - "0a71e75ba0a2abba82efce5880c17d2f44c9f80b", + "4fe1efceb03e593b8010252a9201d7c2d3a64313", "testharness" ], "css/css-shapes/parsing/shape-margin-invalid.html": [ - "ebb1938d422422142ef3caa9699d7a419dce0b0e", + "cdaea05039527134af6de034674f8d95dc0e4e64", "testharness" ], "css/css-shapes/parsing/shape-margin-valid.html": [ - "f36a8eb3249210538e1be6d1d30fa7a6d062cd2e", + "28f094b6abb05bc751f7508ec29b7bc4fe701eae", "testharness" ], "css/css-shapes/parsing/shape-outside-invalid-position.html": [ - "90023aeaf25f3f318b57e691f8ff31462948ebbb", + "3bca706b153dd62267ea767742a50784367261aa", "testharness" ], "css/css-shapes/parsing/shape-outside-invalid.html": [ - "a2a8923818e7500ee895c27c2ee3deae7c38daff", + "57ce32e7fe6922f9378501f355183addeaa3101a", "testharness" ], "css/css-shapes/parsing/shape-outside-valid-position.html": [ - "225b0156ad77af4ea24a509faf26cbcdc9ea854e", + "ec0a16d568d5ac0b1ce6dac907c7eecd907f5ab8", "testharness" ], "css/css-shapes/parsing/shape-outside-valid.html": [ - "28f45353dcb316350c11bb304db93184a349944d", + "1a70d10f831fb6a451149769a54cdd1839b1d8d3", "testharness" ], + "css/css-shapes/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-shapes/shape-outside-invalid-001.html": [ "c6c7400f7d177bde46ad1a35a6bd77f6ff27d71b", "testharness" @@ -543849,7 +544066,7 @@ "testharness" ], "css/css-shapes/shape-outside/values/support/parsing-utils.js": [ - "68b0c1b1f7c4641cc8e28eb0dca52163b71eb1f7", + "06007f50939e251851be58a896208d03bb58d0dd", "support" ], "css/css-shapes/spec-examples/reference/shape-outside-001-ref.html": [ @@ -544016,10 +544233,6 @@ "85dd7324815b8f8ef1a1d0496224c1a0661db9d8", "support" ], - "css/css-shapes/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-shapes/support/pattern-grg-rgr-grg.png": [ "9b88fbd81149891234185f54f8b4a0431759f181", "support" @@ -550565,59 +550778,63 @@ "reftest" ], "css/css-transforms/parsing/perspective-origin-parsing-invalid.html": [ - "75350cc323633cdb4c8e63d2d35b336470764b92", + "24976fc2609e28bde89b4f1b6f0c21f64377a8ee", "testharness" ], "css/css-transforms/parsing/perspective-origin-parsing-valid.html": [ - "b53a34a21d7b7eed791e7bce49c3260446c68e37", + "d8ac4ccb50a1539a5c23d829c0493b669e8cb827", "testharness" ], "css/css-transforms/parsing/rotate-parsing-invalid.html": [ - "63a74e24699631ce4e77947075d775640b29c722", + "a3f625f6ab6823528ba3aa16ce53cb648908c8fc", "testharness" ], "css/css-transforms/parsing/rotate-parsing-valid.html": [ - "789ea00ac4bbdad73c6f7a17d2b5c75ba2cdd058", + "33e44e3361ddecf19ae86fb873cd59c375d7227d", "testharness" ], "css/css-transforms/parsing/scale-parsing-invalid.html": [ - "050171ca18c570b1c2f8c332a87e88ca29144e4d", + "ddc2cade2bcedcd59af8e3f40a9a510f7c521639", "testharness" ], "css/css-transforms/parsing/scale-parsing-valid.html": [ - "0d8783afc4d2b84749d4ae5eb98957a4e6fc7142", + "2b99d249aaf6a3dfa253aa9d922698903d20ef98", "testharness" ], + "css/css-transforms/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-transforms/parsing/transform-box-invalid.html": [ - "23f7ddc463e707499891581d944af28424420e50", + "415dc0720146b1041739e8bb346038212965e168", "testharness" ], "css/css-transforms/parsing/transform-box-valid.html": [ - "a5d8af9537e39581d94ef6da3fc006cb2a2a2f52", + "c2e7a5bc90479ebee991c7b0cb2cf0054c17f1e2", "testharness" ], "css/css-transforms/parsing/transform-invalid.html": [ - "02c2b9570a7b0e5c927b2e68e7e91de4e2faba25", + "bff9d7b723a58b610a7e71d6849cc2b36cda077c", "testharness" ], "css/css-transforms/parsing/transform-origin-invalid.html": [ - "0adcc1f2803a0dd8c127c5e3ea3ff0535718c465", + "0a8ef52f0ec2bdaec7d9098c4f0603c34d4358b3", "testharness" ], "css/css-transforms/parsing/transform-origin-valid.html": [ - "52aa6ff9322615d1d1d35c824b544aaf7d0c16bf", + "c9f1d73c29ec6fc737afd461a47be788d2b9931c", "testharness" ], "css/css-transforms/parsing/transform-valid.html": [ - "f9efaa1f71051d497b616194b1cd98d6661503be", + "62ad8e920f3a3ef237c375a4b08343359fda0750", "testharness" ], "css/css-transforms/parsing/translate-parsing-invalid.html": [ - "e602d91bb1324fb86c1ef57fafa6a2a2085139e7", + "8aa6de5e2810287c4cac9f7e7f3d7586d43b526d", "testharness" ], "css/css-transforms/parsing/translate-parsing-valid.html": [ - "ab4f27f1019408c681e3be36b855a16d8acc8eaf", + "86b4deb489b29183617a47a845677a02e49781c4", "testharness" ], "css/css-transforms/patternTransform/reference/svg-patternTransform-combination-ref.html": [ @@ -551880,10 +552097,6 @@ "9945ef47114c2841a746c99a2fb1e93e050aac8b", "support" ], - "css/css-transforms/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-transforms/support/pattern-grg-rgr-grg.png": [ "9b88fbd81149891234185f54f8b4a0431759f181", "support" @@ -557917,91 +558130,95 @@ "reftest" ], "css/css-ui/parsing/box-sizing-invalid.html": [ - "0b6ee8578abd97a69a5129997f16c21d108720e6", + "4ced38240f29163814f1d57a9923af8962d06c06", "testharness" ], "css/css-ui/parsing/box-sizing-valid.html": [ - "68010ffe34e1db1047b689a6224109905ba9fe55", + "fb60ac23bb8d2a509d1bdede4069cb09a52ae415", "testharness" ], "css/css-ui/parsing/caret-color-invalid.html": [ - "a980b0e25b5a8863c3363fa912ada8e89d9803fd", + "b4c9cb71e038b81ed2dfa582cb579395ae060aa0", "testharness" ], "css/css-ui/parsing/caret-color-valid.html": [ - "31e1b6c5ec5b909c25eb44c43080a533438af84a", + "41cc70654dade5dccfab56b0c1b18c8dba50344a", "testharness" ], "css/css-ui/parsing/cursor-invalid.html": [ - "1866b45c0aef1a1e42d476726bed9ae6aff6d547", + "dda8031d3090699885bbe896ad77158046b87bcd", "testharness" ], "css/css-ui/parsing/cursor-valid.html": [ - "b16a6fe2e8726b64dd7fc7d823fdc109c4815a51", + "e33b82e2ddc54455f7da26728e457b1821dec2ea", "testharness" ], "css/css-ui/parsing/outline-color-invalid.html": [ - "b554c1ac61196a5ef8a19c68700591db68f4ee28", + "195f55dc72bd3a943f457f700677484e94fc5ba1", "testharness" ], "css/css-ui/parsing/outline-color-valid-mandatory.html": [ - "92f1b047d7247869a7b1a759fd4bcd1b5e5d969d", + "fbe730bdae37d0148d767d8745589e363f98938d", "testharness" ], "css/css-ui/parsing/outline-color-valid-optional.html": [ - "9b82aeb5cd94828ad4d6a79d5969c9818565ce8d", + "bf12b87f2193bcd11e4a382ffb94073300b47eaf", "testharness" ], "css/css-ui/parsing/outline-invalid.html": [ - "f9aa61b89ec6e1495805a97e625a629ea0a6066c", + "c56a579452fb78daf9f48bd45cd996a1f2697b25", "testharness" ], "css/css-ui/parsing/outline-offset-invalid.html": [ - "1106e86d4bfe2b5b6c12add4d7aa009f39d0d7b7", + "1ee9477a1118f7f262d7e7b1c29f2978c8b80092", "testharness" ], "css/css-ui/parsing/outline-offset-valid.html": [ - "c0b8891d97bba361417fc10dcf2e85d09b32287a", + "c5e023b8e9607dd38bd0bd33cd52244e25a9b0fb", "testharness" ], "css/css-ui/parsing/outline-style-invalid.html": [ - "b93a98407aa3079a552d5fe8576826052fa7322b", + "2340c62d90299a636abbaf9973ed08e55c9d9d66", "testharness" ], "css/css-ui/parsing/outline-style-valid.html": [ - "93d14a46764bd6f42c3f8fa44105d3c176787e4b", + "5adcfda7ec75dadb3b87475cd6e0009c8aaa94ff", "testharness" ], "css/css-ui/parsing/outline-valid-mandatory.html": [ - "f8322a459347cf139780897fe8ed168c6ba0a9fc", + "a296c989bce262b620a8acc028cfa79d513a8acb", "testharness" ], "css/css-ui/parsing/outline-valid-optional.html": [ - "44cf823ba4c74e6a96cf253d8d87744c6a07c7e8", + "e179406cafb53cefc35fdc69b46b0530233dafc2", "testharness" ], "css/css-ui/parsing/outline-width-invalid.html": [ - "40e4961a0025d5a94ba2cba0c15e22df4743549e", + "07012fef225859ae8f4194f19795c24363685108", "testharness" ], "css/css-ui/parsing/outline-width-valid.html": [ - "db6c77c8645ba890d4bc059d014c9b3e86a2fb55", + "050371246ce4006b922e1ab2d69255d1cf454d5d", "testharness" ], "css/css-ui/parsing/resize-invalid.html": [ - "b166c01395c10aad1dcad6f2697fa226a84903dc", + "a56a1dbc6e24e851e9055513c7ebe86e1c847760", "testharness" ], "css/css-ui/parsing/resize-valid.html": [ - "3acc9b09e6f18740d013837f47e3fbf9c74a1dc3", + "025b0e447bb66d8daf828dccf009fcc039fa0b53", "testharness" ], + "css/css-ui/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-ui/parsing/text-overflow-invalid.html": [ - "22affb7cafa0f6c60f04f7805c6e54be83eb7318", + "f3945a162ec4c67fdf36753a270b3c2e350726a4", "testharness" ], "css/css-ui/parsing/text-overflow-valid.html": [ - "bc2a4fd87402d614d3b9b96cab998cb7269d1028", + "3f0aaa60938ff98e8d0f10d201d6e05d79d2771d", "testharness" ], "css/css-ui/reference/box-sizing-001-ref.html": [ @@ -559116,10 +559333,6 @@ "56346a8295ba14f002eee4e35cd864e1829ae840", "support" ], - "css/css-ui/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-ui/support/r1-1.svg": [ "84f1b9532b5f55ee48c6502ec00470b7b2d93431", "support" @@ -563373,43 +563586,47 @@ "manual" ], "css/css-writing-modes/parsing/direction-invalid.html": [ - "0927ca388b7cc7a0e37149f1c8a23b15d0d44444", + "7181dbe675d1183d7271cd9d7aebf699ec837872", "testharness" ], "css/css-writing-modes/parsing/direction-valid.html": [ - "ca4d7bb9df9e48606b92f0bb7a515488ad73a18c", + "11585cf9e43952a56f96a80bb3566b3d6a5fb8fd", "testharness" ], + "css/css-writing-modes/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/css-writing-modes/parsing/text-combine-upright-invalid.html": [ - "08cbb967bb81f19bfdf7d2f8e71daaceae364cb1", + "b8a0c14b56b7a43fce53626dcbad17997fc779bc", "testharness" ], "css/css-writing-modes/parsing/text-combine-upright-valid.html": [ - "bd56fb5c4935cf9db038a64b652e3ce81a0d4b01", + "2b15840cd7cfaefa80b61b3f175afe4774f364e0", "testharness" ], "css/css-writing-modes/parsing/text-orientation-invalid.html": [ - "118764d6c5f346600cd416ff08a2971e7009b05c", + "cc9c051049bc07ff977d08fcd796b8f31c13f5af", "testharness" ], "css/css-writing-modes/parsing/text-orientation-valid.html": [ - "2f2b6625505a1e2e613297a0fac3d185bf75c94b", + "06049c28a0aac6d2d8f3e538d3ad63aa8d68c4e3", "testharness" ], "css/css-writing-modes/parsing/unicode-bidi-invalid.html": [ - "de7cb6d20f71086b573a809762735ad35796f5b0", + "1e5f2a1f62a8ea8ffc6b9994bb7a5f8887f7624b", "testharness" ], "css/css-writing-modes/parsing/unicode-bidi-valid.html": [ - "97a3d59c9acf77a263627bf2199a77fa517a3026", + "087c026adcec3172f96c171b83a2c7efbb91be60", "testharness" ], "css/css-writing-modes/parsing/writing-mode-invalid.html": [ - "6a512a71834a36961bcf3022ed76904cfc6be124", + "50e42f73c6d14bbcc19737c17547a178c9cfb870", "testharness" ], "css/css-writing-modes/parsing/writing-mode-valid.html": [ - "4fd856ae9dde5128b0a8ea8740a7123697ea41d3", + "6f525109fe1bdbc1118f33f9c38ec39a8cc8b6fe", "testharness" ], "css/css-writing-modes/percent-margin-vlr-003.xht": [ @@ -565272,10 +565489,6 @@ "04dc0fe2e9e838e592f8da044555f1a379ab641f", "support" ], - "css/css-writing-modes/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/css-writing-modes/support/pass-cdts-abs-pos-non-replaced.png": [ "15fd6065ef670fcab7e0b662285c19bdd881da1d", "support" @@ -567933,29 +568146,33 @@ "testharness" ], "css/filter-effects/parsing/color-interpolation-filters-parsing-invalid.html": [ - "d07f195d62631a3896e892e40033d84b30e20a97", + "0cec437c627675b296b2ee0659ea18d886765952", "testharness" ], "css/filter-effects/parsing/color-interpolation-filters-parsing-valid.html": [ - "68313d8db7f444a611075c0b1188a131fde54cd1", + "5f7a6e653595a2b6d597dc77f81c08e3d076be09", "testharness" ], "css/filter-effects/parsing/filter-parsing-invalid.html": [ - "f89f45628ce8a2e5c755204b1a73ff32751588d2", + "06cb30c8c3c1f5a64ab8a5b93e81926ce6210714", "testharness" ], "css/filter-effects/parsing/filter-parsing-valid.html": [ - "3de19c023cf8c75ee87899393819b77b03060d3b", + "266fee237bad690a5eef67bbb486eb737566c4aa", "testharness" ], "css/filter-effects/parsing/lighting-color-parsing-invalid.html": [ - "0711cdd4cf33e4644f73ca352fa0fd9682dd19d7", + "3d8207bd55c598e9a303d615f0c7c457bac1dfa2", "testharness" ], "css/filter-effects/parsing/lighting-color-parsing-valid.html": [ - "495ddc5f8f6e8ff8171fefb6763843e23f60c94e", + "89b189a7f9a4a2ac4ae41c84a50d1f73e1dfc1b3", "testharness" ], + "css/filter-effects/parsing/support/parsing-testcommon.js": [ + "b075882f89aae49b419220b234534241cde5fd42", + "support" + ], "css/filter-effects/reference/filters-opacity-001-ref.html": [ "17d891ee707c36b7d404d9836060d7bb9275e841", "support" @@ -568040,10 +568257,6 @@ "9945ef47114c2841a746c99a2fb1e93e050aac8b", "support" ], - "css/filter-effects/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/filter-effects/support/pattern-grg-rgr-grg.png": [ "9b88fbd81149891234185f54f8b4a0431759f181", "support" @@ -568509,54 +568722,54 @@ "testharness" ], "css/motion/parsing/offset-anchor-parsing-invalid.html": [ - "45917c1b818c7760c42dcdd79b25787037b79520", + "817c2684bbcb95f097dee0541d1e314aec2b15af", "testharness" ], "css/motion/parsing/offset-anchor-parsing-valid.html": [ - "30101061b5ce62569bd3701494434e4f6d1088de", + "d354763b4e705c5bf18493055c9acb68606c1298", "testharness" ], "css/motion/parsing/offset-distance-parsing-invalid.html": [ - "1cbb27fe536c2ba202fd3b7ed65bde1309460f95", + "7eec1270ab8884afc2a37dfe6ee31e4f96aaf52a", "testharness" ], "css/motion/parsing/offset-distance-parsing-valid.html": [ - "56569c8e0222068860ddda93faadb2580a818a78", + "b699ecea5a1a0f3c59b0ea23c542caa1b25800d9", "testharness" ], "css/motion/parsing/offset-parsing-invalid.html": [ - "343d22e46b4714dde6f484b37ae8d3fd8772460a", + "23475f8d513211338c2239cbc0f26c6a1cf08c79", "testharness" ], "css/motion/parsing/offset-parsing-valid.html": [ - "b645199f3a13015941648df08d8583b9a7fc7fed", + "4cdb2b193891ffe12e15aff072906cae808201f0", "testharness" ], "css/motion/parsing/offset-path-parsing-invalid.html": [ - "7fbd06a508a322ac0969eb11c4299de50fd254e7", + "67c1099ba73b26812b86f0c1bf9dca9baa2ff225", "testharness" ], "css/motion/parsing/offset-path-parsing-valid.html": [ - "e7797686e4ac524ac9dc9f8525dbd5a24adeec29", + "d57b465ada6743e76e3e99e78a89e628815dc9ee", "testharness" ], "css/motion/parsing/offset-position-parsing-invalid.html": [ - "42370d44a38c2618d7f556d6be4b7a206e76b7e7", + "6565fa5cdd7e16a442338be282472dca8f1d54be", "testharness" ], "css/motion/parsing/offset-position-parsing-valid.html": [ - "3cf235cc855fc7e1325610ce4170974b746f1182", + "565abffbdeeba7b553d4dea8468c23282f32ccd4", "testharness" ], "css/motion/parsing/offset-rotate-parsing-invalid.html": [ - "591189acb050b8f5ff48175f461de837b036ae2f", + "a649fb4aabb28eb9218b2d3fe68b9effe8bc3b93", "testharness" ], "css/motion/parsing/offset-rotate-parsing-valid.html": [ - "f481ec8971d3a680c7c0ceb48502adb9ffbf83b4", + "38e9ff1ba45ed2cd54b8d55670086f34236b742a", "testharness" ], - "css/motion/support/parsing-testcommon.js": [ + "css/motion/parsing/support/parsing-testcommon.js": [ "b075882f89aae49b419220b234534241cde5fd42", "support" ], @@ -570608,10 +570821,6 @@ "9945ef47114c2841a746c99a2fb1e93e050aac8b", "support" ], - "css/support/parsing-testcommon.js": [ - "b075882f89aae49b419220b234534241cde5fd42", - "support" - ], "css/support/pattern-grg-rgr-grg.png": [ "9b88fbd81149891234185f54f8b4a0431759f181", "support" @@ -572981,27 +573190,43 @@ "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-001-ref.xhtml": [ - "33057bf5f0f8e0182850a0ad272cdddaac66a6a3", + "517b7f465ddc9059053b84219b57247afc22b4b7", "support" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-001a.xhtml": [ - "bb0a8cda64b19b85c612a0609d35c400eefaa840", + "bb592b74a8b761230b1a4c56398d961c4619b066", "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-001b.xhtml": [ - "a1e192102ea9cbbdc5685085d47dfdd8b81d5023", + "3de740e9a3f8e59955d66de1771369eaf73b988c", + "reftest" + ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002-ref.xhtml": [ + "f0d286494e428fcc6afbe642da0ea6fd0ae1ccc8", + "support" + ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-horiz-002.xhtml": [ + "902b26ba23e44659f5a111617439889c09875e32", "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001-ref.xhtml": [ - "1bac2bcda1dfc8f919211c26841f55883c249180", + "f09e2a448da8a5466bffd31d505cd90cbc34fcc2", "support" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001a.xhtml": [ - "c4236ef0406c48c5aec75d17d207cfb069591793", + "56bdd5fe4020ea641aa0c778fc49b500e4de2b6d", "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-001b.xhtml": [ - "54a8ed31f9637eb9781a5a1fafacec498f402cf9", + "002f20fb3335c7e0cb890dd267f36adeabab4f1b", + "reftest" + ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002-ref.xhtml": [ + "b514d840489dcc027be492ffd5081fe0a124b8e4", + "support" + ], + "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-vert-002.xhtml": [ + "b4a8ccf3dd1f5890e5fad1cee0a49ba96f12d49f", "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/flexbox-align-content-wmvert-001-ref.xhtml": [ @@ -574425,7 +574650,7 @@ "reftest" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/reftest.list": [ - "b51226706eae57ce44afdbbc24d5fb74e4225d34", + "ee725d99ed9c669b6010132c9ca91e3f78899886", "support" ], "css/vendor-imports/mozilla/mozilla-central-reftests/flexbox/support/Ahem.ttf": [ @@ -580184,6 +580409,10 @@ "12cfdfa13ade604dec791174ffcd2e3732c7a185", "support" ], + "docs/_appendix/reverting.md": [ + "1f549b3a341f2a78a16be835ae43e2d8da5b7137", + "support" + ], "docs/_appendix/test-templates.md": [ "39b599569876967d6de158c6497ecff59dd7bbef", "support" @@ -590017,7 +590246,7 @@ "testharness" ], "html/browsers/the-window-object/apis-for-creating-and-navigating-browsing-contexts-by-name/creating_browsing_context_test_01.html": [ - "75c8729cf716981ca79ebf15f01c431e5e156582", + "062f61949dc820634999e58045383065b179b49f", "testharness" ], "html/browsers/the-window-object/apis-for-creating-and-navigating-browsing-contexts-by-name/non_automated/001-1.html": [ @@ -591637,7 +591866,7 @@ "testharness" ], "html/dom/elements/the-innertext-idl-attribute/getter-tests.js": [ - "af860c98e93af93bf3e34fa76db3086a257d5e5a", + "fcd11b414b4a325bfb77468f0fea5ffb5a12f460", "support" ], "html/dom/elements/the-innertext-idl-attribute/getter.html": [ @@ -596125,7 +596354,7 @@ "testharness" ], "html/input/the-disabled-attribute/number-disabled.html": [ - "2472e56c4ecfce73d92b653951275099500128b6", + "11cb82fdda60d69f8b1dd7709ea99210238123f8", "testharness" ], "html/input/the-placeholder-attribute/multiline-cr.html": [ @@ -596596,21 +596825,41 @@ "59c5ca70d41cc969aed7ac6a531c1ca9a5f82f0f", "reftest" ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html": [ + "f07c84f406cefc7054e8a13c2796d3d82673bdf7", + "testharness" + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html": [ + "953328982032dae2d508619b90188534179a26a8", + "testharness" + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html": [ + "d2b1d30d8cc828496fa76cc887e01149f2a44809", + "testharness" + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html": [ + "bdb2c2fd94686b502ed5a663b1026180283af22f", + "testharness" + ], "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-block-formatting-context.html": [ "4e9539179739a3690aab276f2ba98c25bd4dfe9b", "testharness" ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html": [ + "fd6c11a00566759fbf1e749d49ad396cf1a7ee08", + "support" + ], + "html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html": [ + "efd55ab22db75f8195a8d15161995311abf5b735", + "reftest" + ], "html/rendering/non-replaced-elements/the-fieldset-element-0/legend.html": [ "1cda91f32baf119b8dd827275a3ba8b10c484084", "testharness" ], - "html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html": [ - "cabbaf20e6563decd428c6fddae09e93b4dcc172", - "reftest" - ], - "html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html": [ - "d17268a786bc99a32ef9a1420ca9fcd93a84a724", - "support" + "html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html": [ + "92c33029701b41678f4f514d28c6260338bd8e77", + "testharness" ], "html/rendering/non-replaced-elements/the-hr-element-0/align-ref.html": [ "9e4283e208e17e4f2cfed05882bb1aff276028ad", @@ -603413,7 +603662,7 @@ "testharness" ], "html/semantics/scripting-1/the-script-element/script-charset-02.html": [ - "77a015bb71f058ad23f4cc39e237cdbf1633d673", + "63cbe838e017fa0133d2fe66bcb2ec4002d7d43d", "testharness" ], "html/semantics/scripting-1/the-script-element/script-charset-03.html": [ @@ -605276,10 +605525,6 @@ "0cc32be6a2368591de06759c92223941f380f9c2", "support" ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html": [ - "f82d80be7b6b50608699b73a6f8d1b592e0a55c6", - "testharness" - ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/002.html": [ "5584bf9afbff034f5ea68d769afa648e31fe1aaf", "testharness" @@ -605296,14 +605541,6 @@ "1dcb92615d085b28d3c9d2a22d744be849158d18", "testharness" ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html": [ - "36c0a3f3ea11b9f96522cbba22f1006ba42900bb", - "testharness" - ], - "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html": [ - "0d104c9569b37355c24d60fdcad0ca9ed792dcd0", - "testharness" - ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/010-1.html": [ "317e13691d4a94e5861bbb85d7367591d7d1c624", "support" @@ -605349,11 +605586,11 @@ "testharness" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015-1.html": [ - "dffbac0c04116c67492bca5fee606e2c958afbae", + "c325bd08015fc66b0fe7a95a13da6fe461a9afa7", "support" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015.html": [ - "5ef06176ba0a1f253da0afe28fc0ea68e745ec30", + "cce9e65d4c30a13e11c334b89ff225cac45fbd39", "testharness" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/016-1.html": [ @@ -605384,6 +605621,10 @@ "b3ea1fdf75455616c0e47772b0403bf434f76d36", "testharness" ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.js": [ + "1e2f891c1766bb1bcf37855476d952b09e4faab2", + "testharness" + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/custom-element.window.js": [ "be646d15b803d3fbe583a38d4524ad36e1a9b5d1", "testharness" @@ -605397,11 +605638,11 @@ "testharness" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/document.open-03-frame.html": [ - "b5252dceab6b63bed93f4c0f6fd23d61097fe7a4", + "a4b370cea41fb2d379b0350639aaeec4f169191d", "support" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/document.open-03.html": [ - "fc325d54db239d729a4b762367576494c9e93490", + "e446d7021992cfcac2752f5a63e5821f3d51b022", "testharness" ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/encoding.window.js": [ @@ -605420,6 +605661,10 @@ "4efbb863c6372a3ee04d11f38d7ee56a44a2ac7d", "testharness" ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js": [ + "d4a9296fca66d3017c267a8d8951bef2dcbee238", + "testharness" + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/origin-check-in-document-open-basic.html": [ "118be71af19c88d5fed0a1efe010bbd6868eae9c", "testharness" @@ -605448,6 +605693,10 @@ "843c3a2c7988be0b9595bc69887fc75a5a7b304c", "support" ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/global-variables-frame.html": [ + "0fe189914c3727f8071c4eaaa6cc740aeb7aab93", + "support" + ], "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/set-document-domain.html": [ "a92a7ae39f8351f97cd865dca5ebe8d4260aa229", "support" @@ -605464,6 +605713,10 @@ "9174008da33b7faeba91169efe3ace6e1dc87704", "testharness" ], + "html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.js": [ + "e275a4987a0859b160a0f91de6c8896b90bdab31", + "testharness" + ], "html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js": [ "01f32ac9ba14962fa99d4b263a8ca0f5a0daa161", "testharness" @@ -610492,6 +610745,86 @@ "7d08e12280a881508c862ebaaeaa40a099cf8e35", "testharness" ], + "network-error-logging/META.yml": [ + "bc063177d1d14febb1c4bdb86e70cfdce8ca0b5b", + "support" + ], + "network-error-logging/README.md": [ + "7cf2c6fdceed95b3911deb69542a6820acda479d", + "support" + ], + "network-error-logging/no-report-on-failed-cors-preflight.https.html": [ + "3a35651b4ef549a0510a83df923fd6b9d642b97c", + "testharness" + ], + "network-error-logging/no-report-on-subdomain-404.https.html": [ + "462f99e842317cf36c4d7a76ad13af00b1692d15", + "testharness" + ], + "network-error-logging/no-report-on-subdomain-success.https.html": [ + "5fd6d4fb41231c5ca5f345b890927c5d1b9411ab", + "testharness" + ], + "network-error-logging/reports-are-not-observable.https.html": [ + "35ab4f3c23507617c4f26981339741d9b3c385be", + "testharness" + ], + "network-error-logging/sends-report-on-404.https.html": [ + "38bdc014501e90f5a99bae1ac0d433191f557afb", + "testharness" + ], + "network-error-logging/sends-report-on-subdomain-dns-failure.https.html": [ + "8913857af8acb01760589b6a7546a110a359f192", + "testharness" + ], + "network-error-logging/sends-report-on-success-with-subdomain-policy.https.html": [ + "fce12cd3e96cf327222faea4bdaeba5835f4f4ce", + "testharness" + ], + "network-error-logging/sends-report-on-success.https.html": [ + "68fddaa0c70b8dd0fd22194b351ba1157f836bdc", + "testharness" + ], + "network-error-logging/support/clear-policy-pass.png": [ + "2fa1e0ac0663a65deae6602621521cc2844b93de", + "support" + ], + "network-error-logging/support/clear-policy-pass.png.sub.headers": [ + "1085b8a987c56fd8b0412f4032f6957e58447ace", + "support" + ], + "network-error-logging/support/lock.py": [ + "8c88250bde00b4a62cc99131fdaa09e97f716369", + "support" + ], + "network-error-logging/support/nel.sub.js": [ + "c6b4783bd94c00579047627b6c2b137478ae1c2e", + "support" + ], + "network-error-logging/support/no-policy-pass.png": [ + "2fa1e0ac0663a65deae6602621521cc2844b93de", + "support" + ], + "network-error-logging/support/pass.png": [ + "2fa1e0ac0663a65deae6602621521cc2844b93de", + "support" + ], + "network-error-logging/support/pass.png.sub.headers": [ + "70796e913ace97d4d1a21ac0b1c19f6fbb6d01fc", + "support" + ], + "network-error-logging/support/report.py": [ + "7c05b51b9eb011f4d32bd5e774f6a0d3ead2cd9c", + "support" + ], + "network-error-logging/support/subdomains-pass.png": [ + "2fa1e0ac0663a65deae6602621521cc2844b93de", + "support" + ], + "network-error-logging/support/subdomains-pass.png.sub.headers": [ + "50124b8cfcdfd6efe91a0613ec34d3c8ca0a10dc", + "support" + ], "notifications/META.yml": [ "2cb7972705c7b9ef00375dfa4258e92edb15fb21", "support" @@ -630152,12 +630485,16 @@ "050ac0b542455ceb53ed36038af5b9b0810977cf", "support" ], + "service-workers/cache-storage/resources/vary.py": [ + "59e39bc2ae730a4cd3e1376dd003c9ffada4ed4a", + "support" + ], "service-workers/cache-storage/script-tests/cache-abort.js": [ "ec4130fded29e0070828092c2546dc46456d8fdc", "support" ], "service-workers/cache-storage/script-tests/cache-add.js": [ - "c03faeb0e83723eab64e648e600ba0612873d404", + "a482c42eaeb2a902a36b1d6aecd306e8ed4b4ebf", "support" ], "service-workers/cache-storage/script-tests/cache-delete.js": [ @@ -630793,7 +631130,7 @@ "testharness" ], "service-workers/service-worker/interfaces-window.https.html": [ - "d3e85f2c711a627e6517a71a0c801ef344b83489", + "85d7f6467ee98905b127a075d4e2a2d331395e79", "testharness" ], "service-workers/service-worker/invalid-blobtype.https.html": [ @@ -630809,7 +631146,7 @@ "testharness" ], "service-workers/service-worker/local-url-inherit-controller.https.html": [ - "df25051b25748211622b26fc25f145f1c7212c8d", + "6702abcadbb18958af4cb581d6286685f90b60c4", "testharness" ], "service-workers/service-worker/mime-sniffing.https.html": [ @@ -631953,7 +632290,7 @@ "support" ], "service-workers/service-worker/resources/registration-tests-script-url.js": [ - "8d777a83099d54b30bee6b9f78de6fc35d8c3f29", + "0dc5af16d30b7b8463762e6f88c0255de851701f", "support" ], "service-workers/service-worker/resources/registration-tests-script.js": [ @@ -632333,7 +632670,7 @@ "testharness" ], "service-workers/service-worker/update-top-level.https.html": [ - "1f0bdff65597832c7e3ba27c37681b760decf456", + "e382028b44a9d19b26b3c15a3bba17fa6a0d9bcb", "testharness" ], "service-workers/service-worker/update.https.html": [ @@ -638477,7 +638814,7 @@ "support" ], "tools/manifest/manifest.py": [ - "02250e8300fb0136b882df5d51eabe7dd6f87778", + "42a8e1ceb23693a3e139cf6129bc82327b765182", "support" ], "tools/manifest/sourcefile.py": [ @@ -638497,7 +638834,7 @@ "support" ], "tools/manifest/tests/test_manifest.py": [ - "e0a2c828a770f7c6897c18ccb8116057de04c8ef", + "017513ae01ae92d7f4807d77358849c142a182a3", "support" ], "tools/manifest/tests/test_sourcefile.py": [ @@ -638993,7 +639330,7 @@ "support" ], "tools/serve/test_serve.py": [ - "e939c3a0ccee4ac4f5babbcab3b9d30dbfa80be8", + "1c089b506738ee73e2f1e29cf1729a1abc1e0c0b", "support" ], "tools/third_party/atomicwrites/.gitignore": [ @@ -642929,7 +643266,7 @@ "support" ], "tools/webdriver/webdriver/error.py": [ - "b2337ff3b38f57828c72d76e49ef8893d30b578c", + "e148e8fe800700c0c0b96abb48444063c4af6572", "support" ], "tools/webdriver/webdriver/protocol.py": [ @@ -643413,7 +643750,7 @@ "support" ], "tools/wptrunner/wptrunner/testloader.py": [ - "018dc103983dfef222ed7cc6a4f8fe5288add256", + "2313a80c745bfac9946119926411234c506c6654", "support" ], "tools/wptrunner/wptrunner/testrunner.py": [ @@ -643461,7 +643798,7 @@ "support" ], "tools/wptrunner/wptrunner/tests/test_wpttest.py": [ - "827244cda3c5cc94d2bdb8becad1fe5c5253828b", + "5280e46a6b243326e2303bb554ada4a726bd7203", "support" ], "tools/wptrunner/wptrunner/update/__init__.py": [ @@ -643545,7 +643882,7 @@ "support" ], "tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py": [ - "87ddfeed891742519c59b33aa0806f635c16a5cc", + "98b54dc11ea8a033b8ee626b5f8bb924759959cc", "support" ], "tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py": [ @@ -643920,126 +644257,102 @@ "15b2db735fd0d7a01d9e9bd3a1f3719f790d62e5", "testharness" ], - "trusted-types/DOMParser-requiresTrustedTypes.tentative.html": [ - "7e21feabd24c653cbe0b713d01e20aeade534b6e", + "trusted-types/DOMParser-parseFromString.tentative.html": [ + "2fe9b31b787e1fb458a3ed8996b2d79f7e14aa35", "testharness" ], - "trusted-types/DOMParser.tentative.html": [ - "53d2b44febd6510b4216bb21cb22a83ed6663e5b", + "trusted-types/Document-write.tentative.html": [ + "3a63e923543b999b05d1fab926ad33d7d2719dfa", + "testharness" + ], + "trusted-types/Element-insertAdjacentHTML.tentative.html": [ + "599ade44ec117ecb429659a9f969a2767bd95cbb", + "testharness" + ], + "trusted-types/Element-outerHTML.tentative.html": [ + "a0bb6c1a5e3fef47e4351353befbfc8eb105652f", "testharness" ], "trusted-types/HTMLElement-generic.tentative.html": [ - "486b008986bc4e3328c8b709674359538d408800", + "cea32a5a2df1d9b255f5aaf85ac5a694fdb3a618", + "testharness" + ], + "trusted-types/Location-assign.tentative.html": [ + "13cca5679488d0b3e12631d5f70408565ea1b065", + "testharness" + ], + "trusted-types/Location-href.tentative.html": [ + "d759d28593e67f25d8bc28d36cf0ff4912460dc0", + "testharness" + ], + "trusted-types/Location-replace.tentative.html": [ + "7d84905d19878d57634a8497b81ef86d8114b72e", "testharness" ], "trusted-types/META.yml": [ "d0743949b6a122d8bd0adf7b1ed0181f0c51429d", "support" ], - "trusted-types/TrustedHTML.tentative.html": [ - "a8d4e78b06d124b05aa640aad563b3d9e9003046", - "testharness" - ], - "trusted-types/TrustedScriptURL.tentative.html": [ - "92bc87f1e1da3eec8a0d597687e04857ad079359", + "trusted-types/Range-createContextualFragment.tentative.html": [ + "3d45b33486d3971c0c58180fa4034dbfae18f135", "testharness" ], "trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html": [ - "a37b5a7197f264bb8f75e1582debff5a53cd8d5e", + "76e6d130b05dfba00911ad42eb7a162cd29b222e", "testharness" ], - "trusted-types/TrustedURL.tentative.html": [ - "5048326fad81af68915b9f83b56da375388fcbef", + "trusted-types/Window-open.tentative.html": [ + "c005fbba143f66a9540deebba7988fdea9661558", "testharness" ], "trusted-types/Window-trustedTypes.tentative.html": [ "ef4487749dd0c12a00bd3ab42c1353467a6eeb8f", "testharness" ], + "trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html": [ + "cc575dc0085bce3aa1370fb528e28003ad3c1c2b", + "testharness" + ], + "trusted-types/block-string-assignment-to-Document-write.tentative.html": [ + "28813d72e0e1833e25658e2210abb9b0a30b2137", + "testharness" + ], + "trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html": [ + "ad94b44e8fb7621ba4693ad65377872281f3e9a6", + "testharness" + ], + "trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html": [ + "47f1165b1a69366848dd5dd21a2ad2199b9c2e81", + "testharness" + ], "trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html": [ - "79bbb24f541b43123ef1d5f814fb3108275b974c", + "eae52626190746ad0a8b436f74981009e400232b", "testharness" ], - "trusted-types/block-string-assignment-to-createContextualFragment.tentative.html": [ - "1d67a51ff6ab1df879a59af5ee7c6ad9a0609be0", + "trusted-types/block-string-assignment-to-Location-assign.tentative.html": [ + "8079335bc5861fa723691a0f884cf249e6f63e24", "testharness" ], - "trusted-types/block-string-assignment-to-innerHTML.tentative.html": [ - "67faf6ea7d30ebb76bb857faf0faf04743917dfb", + "trusted-types/block-string-assignment-to-Location-href.tentative.html": [ + "4e393f92506e00276a4440e1023ac23e7a6138e8", "testharness" ], - "trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html": [ - "70bb803442f14cbdcb356be6f6cdceca83522008", + "trusted-types/block-string-assignment-to-Location-replace.tentative.html": [ + "872f14e144830ed87b51e352f93c32ce85438bfe", "testharness" ], - "trusted-types/block-string-assignment-to-location-assign.tentative.html": [ - "76725da7c72e2f137b15c065bf74a43fc95c1933", + "trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html": [ + "2afa2572c350071b791ee280bce0a1e5135dc2aa", "testharness" ], - "trusted-types/block-string-assignment-to-location-href.tentative.html": [ - "07cc4d5fe29f79ae0239035dba1430f326945d70", - "testharness" - ], - "trusted-types/block-string-assignment-to-location-replace.tentative.html": [ - "9736a84b3ada709ac7ea758ccca10b766130b9aa", - "testharness" - ], - "trusted-types/block-string-assignment-to-outerHTML.tentative.html": [ - "8cf6c4b065a87f83015e5519ce86ecb6e1115e12", - "testharness" - ], - "trusted-types/block-string-assignment-to-window-open.tentative.html": [ - "2c3a8ce056685a5306472ee2e1a87bcbfc4c11e9", - "testharness" - ], - "trusted-types/createContextualFragment.tentative.html": [ - "5e50acc80650e4cc66e045735b301df3cdcaa307", - "testharness" - ], - "trusted-types/document-write.tentative.html": [ - "12794199722f6e2f078efa9ce9f91146c8ae9219", - "testharness" - ], - "trusted-types/innerHTML.tentative.html": [ - "f9f32d42a766346f5fd4e5fd18d8e20f7acba810", - "testharness" - ], - "trusted-types/insertAdjacentHTML.tentative.html": [ - "a95dd6c0bacc9844d25e64453b66aab759c76ad7", - "testharness" - ], - "trusted-types/location-assign.tentative.html": [ - "07cb4a801966006065adca4a5635d798c5c8ef94", - "testharness" - ], - "trusted-types/location-href.tentative.html": [ - "2527fbf4847b767d0252c86bddce0ab8127fd15b", - "testharness" - ], - "trusted-types/location-replace.tentative.html": [ - "097c24d593aa1eb34d452dd1d2812f2cc72ae1ad", - "testharness" - ], - "trusted-types/outerHTML.tentative.html": [ - "1deb46bf5e3d102550575d5e79eaf05cdf9739f6", - "testharness" - ], - "trusted-types/srcDoc-requiresTrustedTypes.tentative.html": [ - "b957488bee42d17b3160144b1cdaccfdfb8f1689", - "testharness" - ], - "trusted-types/srcDoc.tentative.html": [ - "b23703e22329a088b9459bdbbda367b5debf184f", + "trusted-types/block-string-assignment-to-Window-open.tentative.html": [ + "f5712295d30d7b1d680ad6753dd401d21c0409f9", "testharness" ], "trusted-types/support/helper.sub.js": [ - "91112d8f239069a9f88fdcffc46f5b2d49e49321", + "b5435917bec607c97eaa5d75ee7fa2752999cb0a", "support" ], - "trusted-types/window-open.tentative.html": [ - "66ffbd78399c70cca883ac55b6f380587bffc9ab", - "testharness" - ], "uievents/META.yml": [ "2f1ec58efec10e0dd6374aac05cb926c8cffa3f1", "support" @@ -645945,7 +646258,7 @@ "testharness" ], "wake-lock/wakelock-api.https.html": [ - "7ec4fc2827a77421cc16838e2458443265c1dccf", + "45a906dfa1f23de34befdc68e9da17d179fb2474", "testharness" ], "wake-lock/wakelock-applicability-manual.https.html": [ @@ -646009,7 +646322,7 @@ "testharness" ], "wake-lock/wakelock-type.https.html": [ - "6f6413d1a916bd966ab3e82b49065672eaaa9372", + "2f9e1242614d57a79b2aada32cdd3d2cb3ae012d", "testharness" ], "wake-lock/wakelockrequest-is-independent.https.html": [ @@ -646397,7 +646710,7 @@ "testharness" ], "web-animations/timing-model/animations/playing-an-animation.html": [ - "0d47ef6ec2aba7b56c771fc067a347ceeafcce1f", + "1477261e6bc6227598b4e80475a66aa95fe5d15c", "testharness" ], "web-animations/timing-model/animations/reversing-an-animation.html": [ @@ -648373,7 +648686,7 @@ "support" ], "webdriver/tests/support/asserts.py": [ - "44c76a96b0997100a0201a5ffafd20e9967758ec", + "2d305a0f3bec08d6b773541e556258416e737f7f", "support" ], "webdriver/tests/support/fixtures.py": [ @@ -655149,7 +655462,7 @@ "testharness" ], "xhr/META.yml": [ - "7ad6ad95c36c2646e1b2d1a79854d6194bd121f3", + "4b2ecf39bf3a07188d587f0a7ac3d22cad55f923", "support" ], "xhr/README.md": [ @@ -656698,5 +657011,5 @@ ] }, "url_base": "/", - "version": 4 + "version": 5 } diff --git a/testing/web-platform/meta/async-local-storage/api-surface.tentative.https.html.ini b/testing/web-platform/meta/async-local-storage/api-surface.tentative.https.html.ini new file mode 100644 index 000000000000..6fc771828557 --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/api-surface.tentative.https.html.ini @@ -0,0 +1,2 @@ +[api-surface.tentative.https.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/async-local-storage/key-types.tentative.https.html.ini b/testing/web-platform/meta/async-local-storage/key-types.tentative.https.html.ini new file mode 100644 index 000000000000..a878dd98e501 --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/key-types.tentative.https.html.ini @@ -0,0 +1,2 @@ +[key-types.tentative.https.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/async-local-storage/non-secure-context-dynamic-import.tentative.html.ini b/testing/web-platform/meta/async-local-storage/non-secure-context-dynamic-import.tentative.html.ini new file mode 100644 index 000000000000..b6d895f4c011 --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/non-secure-context-dynamic-import.tentative.html.ini @@ -0,0 +1,4 @@ +[non-secure-context-dynamic-import.tentative.html] + [Async local storage: should not work in non-secure contexts when included via import()] + expected: FAIL + diff --git a/testing/web-platform/meta/async-local-storage/non-secure-context-import-statement.tentative.html.ini b/testing/web-platform/meta/async-local-storage/non-secure-context-import-statement.tentative.html.ini new file mode 100644 index 000000000000..9edd135f65b7 --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/non-secure-context-import-statement.tentative.html.ini @@ -0,0 +1,5 @@ +[non-secure-context-import-statement.tentative.html] + expected: TIMEOUT + [Async local storage: should not work in non-secure contexts when included via an import statement] + expected: TIMEOUT + diff --git a/testing/web-platform/meta/async-local-storage/non-secure-context-script-element.tentative.html.ini b/testing/web-platform/meta/async-local-storage/non-secure-context-script-element.tentative.html.ini new file mode 100644 index 000000000000..3c0abd3f2660 --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/non-secure-context-script-element.tentative.html.ini @@ -0,0 +1,5 @@ +[non-secure-context-script-element.tentative.html] + expected: TIMEOUT + [Async local storage: should not work in non-secure contexts when included via a script element] + expected: TIMEOUT + diff --git a/testing/web-platform/meta/async-local-storage/storage-smoke-test.tentative.https.html.ini b/testing/web-platform/meta/async-local-storage/storage-smoke-test.tentative.https.html.ini new file mode 100644 index 000000000000..d2169720936f --- /dev/null +++ b/testing/web-platform/meta/async-local-storage/storage-smoke-test.tentative.https.html.ini @@ -0,0 +1,2 @@ +[storage-smoke-test.tentative.https.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/background-fetch/fetch.https.window.js.ini b/testing/web-platform/meta/background-fetch/fetch.https.window.js.ini index 1586c60a2050..82d0c68aa6e5 100644 --- a/testing/web-platform/meta/background-fetch/fetch.https.window.js.ini +++ b/testing/web-platform/meta/background-fetch/fetch.https.window.js.ini @@ -11,3 +11,6 @@ [IDs must be unique among active Background Fetch registrations] expected: FAIL + [Background Fetch that exceeds the quota throws a QuotaExceededError] + expected: FAIL + diff --git a/testing/web-platform/meta/css/css-text/line-breaking/line-breaking-012.html.ini b/testing/web-platform/meta/css/css-text/line-breaking/line-breaking-012.html.ini deleted file mode 100644 index 6fd167de2a7b..000000000000 --- a/testing/web-platform/meta/css/css-text/line-breaking/line-breaking-012.html.ini +++ /dev/null @@ -1,2 +0,0 @@ -[line-breaking-012.html] - expected: FAIL diff --git a/testing/web-platform/meta/fetch/api/abort/general.any.js.ini b/testing/web-platform/meta/fetch/api/abort/general.any.js.ini index 9fb75d7f46a4..8a8823a9ad44 100644 --- a/testing/web-platform/meta/fetch/api/abort/general.any.js.ini +++ b/testing/web-platform/meta/fetch/api/abort/general.any.js.ini @@ -24,3 +24,11 @@ [general.https.any.serviceworker.html] expected: TIMEOUT + +[general.any.serviceworker.html] + [Readable stream synchronously cancels with AbortError if aborted before reading] + expected: FAIL + + [Stream will not error if body is empty. It's closed with an empty queue before it errors.] + expected: FAIL + diff --git a/testing/web-platform/meta/hr-time/idlharness.any.js.ini b/testing/web-platform/meta/hr-time/idlharness.any.js.ini index 2b3f19640d4f..a328654c9308 100644 --- a/testing/web-platform/meta/hr-time/idlharness.any.js.ini +++ b/testing/web-platform/meta/hr-time/idlharness.any.js.ini @@ -30,3 +30,17 @@ [idlharness.https.any.serviceworker.html] expected: TIMEOUT + +[idlharness.any.serviceworker.html] + [Performance interface: operation toJSON()] + expected: FAIL + + [Performance interface: performance must inherit property "toJSON()" with the proper type] + expected: FAIL + + [Test default toJSON operation of Performance] + expected: FAIL + + [WorkerGlobalScope interface: attribute performance] + expected: FAIL + diff --git a/testing/web-platform/meta/html/browsers/windows/auxiliary-browsing-contexts/opener-setter.html.ini b/testing/web-platform/meta/html/browsers/windows/auxiliary-browsing-contexts/opener-setter.html.ini new file mode 100644 index 000000000000..f688f420d108 --- /dev/null +++ b/testing/web-platform/meta/html/browsers/windows/auxiliary-browsing-contexts/opener-setter.html.ini @@ -0,0 +1,7 @@ +[opener-setter.html] + expected: + if debug and webrender and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86_64") and (bits == 64): TIMEOUT + [Auxiliary browsing context created via `window.open` and setting `window.opener` to `null` should report `window.opener` `null`] + expected: + if debug and webrender and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86_64") and (bits == 64): TIMEOUT + diff --git a/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html.ini b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html.ini new file mode 100644 index 000000000000..e76c8d1b88db --- /dev/null +++ b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-default-style.html.ini @@ -0,0 +1,4 @@ +[fieldset-default-style.html] + [fieldset default style] + expected: FAIL + diff --git a/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html.ini b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html.ini new file mode 100644 index 000000000000..34ba276a2927 --- /dev/null +++ b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html.ini @@ -0,0 +1,34 @@ +[fieldset-display.html] + [fieldset with display: inline] + expected: FAIL + + [fieldset with display: table-footer-group] + expected: FAIL + + [fieldset with display: table-column] + expected: FAIL + + [fieldset with display: table-row] + expected: FAIL + + [fieldset with display: table-cell] + expected: FAIL + + [fieldset with display: flow] + expected: FAIL + + [fieldset with display: table-header-group] + expected: FAIL + + [fieldset with display: run-in] + expected: FAIL + + [fieldset with display: table-column-group] + expected: FAIL + + [fieldset with display: table-row-group] + expected: FAIL + + [fieldset with display: ruby-text-container] + expected: FAIL + diff --git a/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html.ini b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html.ini new file mode 100644 index 000000000000..f1efd104b956 --- /dev/null +++ b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html.ini @@ -0,0 +1,2 @@ +[legend-position-relative.html] + expected: FAIL diff --git a/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html.ini b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html.ini new file mode 100644 index 000000000000..bfa672fa2431 --- /dev/null +++ b/testing/web-platform/meta/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html.ini @@ -0,0 +1,7 @@ +[min-inline-size.html] + [vertical-lr] + expected: FAIL + + [vertical-rl] + expected: FAIL + diff --git a/testing/web-platform/meta/html/semantics/scripting-1/the-script-element/script-charset-02.html.ini b/testing/web-platform/meta/html/semantics/scripting-1/the-script-element/script-charset-02.html.ini deleted file mode 100644 index b1b336ee24ab..000000000000 --- a/testing/web-platform/meta/html/semantics/scripting-1/the-script-element/script-charset-02.html.ini +++ /dev/null @@ -1,7 +0,0 @@ -[script-charset-02.html] - [Script @type: unknown parameters] - expected: FAIL - - [Script @type: unknown parameters 1] - expected: FAIL - diff --git a/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015.html.ini b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015.html.ini new file mode 100644 index 000000000000..1036a206e92b --- /dev/null +++ b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015.html.ini @@ -0,0 +1,4 @@ +[015.html] + [global scope unchanged] + expected: FAIL + diff --git a/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.js.ini b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.js.ini new file mode 100644 index 000000000000..fb5a5ee105f8 --- /dev/null +++ b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/beforeunload.window.js.ini @@ -0,0 +1,4 @@ +[beforeunload.window.html] + [document.open() should not fire a beforeunload event] + expected: FAIL + diff --git a/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/document.open-03.html.ini b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/document.open-03.html.ini new file mode 100644 index 000000000000..a89b42dacca2 --- /dev/null +++ b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/document.open-03.html.ini @@ -0,0 +1,4 @@ +[document.open-03.html] + [document.open and no singleton replacement] + expected: FAIL + diff --git a/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js.ini b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js.ini new file mode 100644 index 000000000000..43c9a136e70e --- /dev/null +++ b/testing/web-platform/meta/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js.ini @@ -0,0 +1,46 @@ +[no-new-global.window.html] + [Location maintains its prototype and properties through open()] + expected: FAIL + + [Navigator maintains its prototype and properties through open()] + expected: FAIL + + [Document maintains its prototype and properties through open()] + expected: FAIL + + [Obtaining a variable from a global whose document had open() invoked] + expected: FAIL + + [WindowProxy maintains its prototype and properties through open()] + expected: FAIL + + [Location maintains object identity through open()] + expected: FAIL + + [Navigator maintains object identity through open()] + expected: FAIL + + [sessionStorage maintains its prototype and properties through open()] + expected: FAIL + + [BarProp maintains its prototype and properties through open()] + expected: FAIL + + [localStorage maintains object identity through open()] + expected: FAIL + + [BarProp maintains object identity through open()] + expected: FAIL + + [History maintains object identity through open()] + expected: FAIL + + [sessionStorage maintains object identity through open()] + expected: FAIL + + [localStorage maintains its prototype and properties through open()] + expected: FAIL + + [History maintains its prototype and properties through open()] + expected: FAIL + diff --git a/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js.ini b/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js.ini index 30c30363c0b6..4214bcc35d57 100644 --- a/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js.ini +++ b/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask-exceptions.any.js.ini @@ -15,3 +15,8 @@ [It rethrows exceptions] expected: FAIL + +[queue-microtask-exceptions.any.serviceworker.html] + [It rethrows exceptions] + expected: FAIL + diff --git a/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask.any.js.ini b/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask.any.js.ini index 9650507b8bd6..a1045436a5b2 100644 --- a/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask.any.js.ini +++ b/testing/web-platform/meta/html/webappapis/microtask-queuing/queue-microtask.any.js.ini @@ -51,3 +51,20 @@ [It interleaves with promises as expected] expected: FAIL + +[queue-microtask.any.serviceworker.html] + [It exists and is a function] + expected: FAIL + + [It does not pass any arguments] + expected: FAIL + + [It calls the callback asynchronously] + expected: FAIL + + [It throws when given non-functions] + expected: FAIL + + [It interleaves with promises as expected] + expected: FAIL + diff --git a/testing/web-platform/meta/mozilla-sync b/testing/web-platform/meta/mozilla-sync index 7be67c869750..e32e9ee8ef92 100644 --- a/testing/web-platform/meta/mozilla-sync +++ b/testing/web-platform/meta/mozilla-sync @@ -1,2 +1,2 @@ -local: 456bcc8a4472d2b1813f30f15156a1a10bdef05f -upstream: fa06664dc85df3024762f5e082e05aa293f90d88 +local: d6ba0fc0655cd41b77171eb13e77ec877790abb6 +upstream: 35ef190b37f437ebf44422f2e3aa5d426f98dd82 diff --git a/testing/web-platform/meta/network-error-logging/reports-are-not-observable.https.html.ini b/testing/web-platform/meta/network-error-logging/reports-are-not-observable.https.html.ini new file mode 100644 index 000000000000..34a8bbe61b6f --- /dev/null +++ b/testing/web-platform/meta/network-error-logging/reports-are-not-observable.https.html.ini @@ -0,0 +1,7 @@ +[reports-are-not-observable.https.html] + [Test that NEL reports are not observable from JavaScript] + expected: FAIL + + [\n Test that NEL reports are not observable from JavaScript\n ] + expected: FAIL + diff --git a/testing/web-platform/meta/network-error-logging/sends-report-on-404.https.html.ini b/testing/web-platform/meta/network-error-logging/sends-report-on-404.https.html.ini new file mode 100644 index 000000000000..fbe1c2ab21c9 --- /dev/null +++ b/testing/web-platform/meta/network-error-logging/sends-report-on-404.https.html.ini @@ -0,0 +1,7 @@ +[sends-report-on-404.https.html] + [Test that NEL reports are sent for HTTP errors] + expected: FAIL + + [\n Test that NEL reports are sent for HTTP errors\n ] + expected: FAIL + diff --git a/testing/web-platform/meta/network-error-logging/sends-report-on-subdomain-dns-failure.https.html.ini b/testing/web-platform/meta/network-error-logging/sends-report-on-subdomain-dns-failure.https.html.ini new file mode 100644 index 000000000000..1d0af0080f04 --- /dev/null +++ b/testing/web-platform/meta/network-error-logging/sends-report-on-subdomain-dns-failure.https.html.ini @@ -0,0 +1,4 @@ +[sends-report-on-subdomain-dns-failure.https.html] + [\n Test that include_subdomains policies report DNS failures for subdomains\n ] + expected: FAIL + diff --git a/testing/web-platform/meta/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html.ini b/testing/web-platform/meta/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html.ini new file mode 100644 index 000000000000..a2483a21d119 --- /dev/null +++ b/testing/web-platform/meta/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html.ini @@ -0,0 +1,7 @@ +[sends-report-on-success-with-subdomain-policy.https.html] + [Test that NEL reports are sent for successful requests] + expected: FAIL + + [\n Test that NEL reports are sent for successful requests\n ] + expected: FAIL + diff --git a/testing/web-platform/meta/network-error-logging/sends-report-on-success.https.html.ini b/testing/web-platform/meta/network-error-logging/sends-report-on-success.https.html.ini new file mode 100644 index 000000000000..ad2fcc54debb --- /dev/null +++ b/testing/web-platform/meta/network-error-logging/sends-report-on-success.https.html.ini @@ -0,0 +1,7 @@ +[sends-report-on-success.https.html] + [Test that NEL reports are sent for successful requests] + expected: FAIL + + [\n Test that NEL reports are sent for successful requests\n ] + expected: FAIL + diff --git a/testing/web-platform/meta/service-workers/service-worker/__dir__.ini b/testing/web-platform/meta/service-workers/service-worker/__dir__.ini index b92994a6b017..d3d3a0aeb2ed 100644 --- a/testing/web-platform/meta/service-workers/service-worker/__dir__.ini +++ b/testing/web-platform/meta/service-workers/service-worker/__dir__.ini @@ -1,2 +1,2 @@ prefs: [dom.serviceWorkers.enabled:true] -lsan-allowed: [Alloc, Create, CreateInner, MakeUnique, Malloc, NewEmptyScopeData, NewPage, OrInsert, PLDHashTable::Add, Realloc, SharedMutex, __rdl_alloc, __rdl_realloc, js_new, js_pod_calloc, js_pod_malloc, js_pod_realloc, mozilla::BasePrincipal::CreateCodebasePrincipal, mozilla::ThrottledEventQueue::Create, mozilla::WeakPtr, mozilla::dom::ChromeUtils::GenerateQI, mozilla::dom::Performance::CreateForMainThread, mozilla::dom::PerformanceStorageWorker::Create, mozilla::dom::WorkerPrivate::WorkerPrivate, mozilla::net::HttpBaseChannel::HttpBaseChannel, mozilla::net::HttpChannelChild::HttpChannelChild, mozilla::net::nsHttpHandler::NewProxiedChannel2, nsNodeSupportsWeakRefTearoff::GetWeakReference, nsPrefetchService::Preload] +lsan-allowed: [Alloc, Create, CreateInner, MakeUnique, Malloc, NewChannelFromURIWithProxyFlagsInternal, NewEmptyScopeData, NewPage, OrInsert, PLDHashTable::Add, Realloc, SharedMutex, __rdl_alloc, __rdl_realloc, js_new, js_pod_calloc, js_pod_malloc, js_pod_realloc, mozilla::BasePrincipal::CreateCodebasePrincipal, mozilla::ThrottledEventQueue::Create, mozilla::WeakPtr, mozilla::dom::ChromeUtils::GenerateQI, mozilla::dom::Performance::CreateForMainThread, mozilla::dom::PerformanceStorageWorker::Create, mozilla::dom::WorkerPrivate::WorkerPrivate, mozilla::net::HttpBaseChannel::HttpBaseChannel, mozilla::net::HttpChannelChild::HttpChannelChild, mozilla::net::nsHttpHandler::NewProxiedChannel2, nsNodeSupportsWeakRefTearoff::GetWeakReference, nsPrefetchService::Preload, nsSegmentedBuffer::AppendNewSegment] diff --git a/testing/web-platform/meta/trusted-types/DOMParser-parseFromString.tentative.html.ini b/testing/web-platform/meta/trusted-types/DOMParser-parseFromString.tentative.html.ini new file mode 100644 index 000000000000..708e74b7c171 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/DOMParser-parseFromString.tentative.html.ini @@ -0,0 +1,4 @@ +[DOMParser-parseFromString.tentative.html] + [document.innerText assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Document-write.tentative.html.ini b/testing/web-platform/meta/trusted-types/Document-write.tentative.html.ini new file mode 100644 index 000000000000..fcff1ec25784 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Document-write.tentative.html.ini @@ -0,0 +1,4 @@ +[Document-write.tentative.html] + [document.write with html assigned via policy (successful URL transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Element-innerHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/Element-innerHTML.tentative.html.ini new file mode 100644 index 000000000000..9d3780b7987f --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Element-innerHTML.tentative.html.ini @@ -0,0 +1,4 @@ +[Element-innerHTML.tentative.html] + [innerHTML assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Element-insertAdjacentHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/Element-insertAdjacentHTML.tentative.html.ini new file mode 100644 index 000000000000..0c79fdfbc1cf --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Element-insertAdjacentHTML.tentative.html.ini @@ -0,0 +1,4 @@ +[Element-insertAdjacentHTML.tentative.html] + [insertAdjacentHTML with html assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Element-outerHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/Element-outerHTML.tentative.html.ini new file mode 100644 index 000000000000..4d003c7e08c5 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Element-outerHTML.tentative.html.ini @@ -0,0 +1,4 @@ +[Element-outerHTML.tentative.html] + [outerHTML with html assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/HTMLElement-generic.tentative.html.ini b/testing/web-platform/meta/trusted-types/HTMLElement-generic.tentative.html.ini index 74c8dbb9d4ef..35315dc07409 100644 --- a/testing/web-platform/meta/trusted-types/HTMLElement-generic.tentative.html.ini +++ b/testing/web-platform/meta/trusted-types/HTMLElement-generic.tentative.html.ini @@ -2,3 +2,54 @@ [HTMLElement-generic] expected: FAIL + [object.codeBase assigned via policy (successful URL transformation)] + expected: FAIL + + [iframe.src assigned via policy (successful URL transformation)] + expected: FAIL + + [area.href assigned via policy (successful URL transformation)] + expected: FAIL + + [embed.src assigned via policy (successful ScriptURL transformation)] + expected: FAIL + + [object.data assigned via policy (successful URL transformation)] + expected: FAIL + + [source.src assigned via policy (successful URL transformation)] + expected: FAIL + + [link.href assigned via policy (successful URL transformation)] + expected: FAIL + + [input.src assigned via policy (successful URL transformation)] + expected: FAIL + + [frame.src assigned via policy (successful URL transformation)] + expected: FAIL + + [script.src assigned via policy (successful ScriptURL transformation)] + expected: FAIL + + [a.href assigned via policy (successful URL transformation)] + expected: FAIL + + [track.src assigned via policy (successful URL transformation)] + expected: FAIL + + [img.src assigned via policy (successful URL transformation)] + expected: FAIL + + [video.src assigned via policy (successful URL transformation)] + expected: FAIL + + [base.href assigned via policy (successful URL transformation)] + expected: FAIL + + [div.innerHTML assigned via policy (successful HTML transformation)] + expected: FAIL + + [iframe.srcdoc assigned via policy (successful HTML transformation)] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/HTMLIFrameElement-srcdoc.tentative.html.ini b/testing/web-platform/meta/trusted-types/HTMLIFrameElement-srcdoc.tentative.html.ini new file mode 100644 index 000000000000..05c1c222b305 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/HTMLIFrameElement-srcdoc.tentative.html.ini @@ -0,0 +1,4 @@ +[HTMLIFrameElement-srcdoc.tentative.html] + [iframe.srcdoc assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Location-assign.tentative.html.ini b/testing/web-platform/meta/trusted-types/Location-assign.tentative.html.ini new file mode 100644 index 000000000000..d56a292178f6 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Location-assign.tentative.html.ini @@ -0,0 +1,4 @@ +[Location-assign.tentative.html] + [location.assign via policy (successful URL transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Location-href.tentative.html.ini b/testing/web-platform/meta/trusted-types/Location-href.tentative.html.ini new file mode 100644 index 000000000000..549425916df9 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Location-href.tentative.html.ini @@ -0,0 +1,4 @@ +[Location-href.tentative.html] + [location.href assigned via policy (successful URL transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Location-replace.tentative.html.ini b/testing/web-platform/meta/trusted-types/Location-replace.tentative.html.ini new file mode 100644 index 000000000000..6fc7ba7d4210 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Location-replace.tentative.html.ini @@ -0,0 +1,4 @@ +[Location-replace.tentative.html] + [location.replace via policy (successful URL transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Range-createContextualFragment.tentative.html.ini b/testing/web-platform/meta/trusted-types/Range-createContextualFragment.tentative.html.ini new file mode 100644 index 000000000000..67baf8f513b4 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Range-createContextualFragment.tentative.html.ini @@ -0,0 +1,4 @@ +[Range-createContextualFragment.tentative.html] + [range.createContextualFragment assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/TrustedScript.tentative.html.ini b/testing/web-platform/meta/trusted-types/TrustedScript.tentative.html.ini new file mode 100644 index 000000000000..4b15fad2bb0e --- /dev/null +++ b/testing/web-platform/meta/trusted-types/TrustedScript.tentative.html.ini @@ -0,0 +1,4 @@ +[TrustedScript.tentative.html] + [TrustedScript] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html.ini b/testing/web-platform/meta/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html.ini index 4d4e4bc55eac..52b8d7a81c5d 100644 --- a/testing/web-platform/meta/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html.ini +++ b/testing/web-platform/meta/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html.ini @@ -5,3 +5,99 @@ [TrustedTypePolicyFactory-createPolicy] expected: FAIL + [html = callback that throws] + expected: FAIL + + [script_url - calling undefined callback] + expected: FAIL + + [html - calling undefined callback] + expected: FAIL + + [script_url = identity function] + expected: FAIL + + [url = this without bind] + expected: FAIL + + [html = this bound to an object] + expected: FAIL + + [url = identity function, global string changed] + expected: FAIL + + [url = this bound to an object] + expected: FAIL + + [script_url = identity function, global string changed] + expected: FAIL + + [script_url = this bound to an object] + expected: FAIL + + [html = identity function, global string changed] + expected: FAIL + + [url = callback that throws] + expected: FAIL + + [url = identity function] + expected: FAIL + + [html = identity function] + expected: FAIL + + [script_url = this without bind] + expected: FAIL + + [url - calling undefined callback] + expected: FAIL + + [script_url = callback that throws] + expected: FAIL + + [html = this without bind] + expected: FAIL + + [html = string + global string] + expected: FAIL + + [script_url = string + global string] + expected: FAIL + + [script_url = null] + expected: FAIL + + [html = null] + expected: FAIL + + [url = string + global string] + expected: FAIL + + [url = null] + expected: FAIL + + [script = identity function] + expected: FAIL + + [script = identity function, global string changed] + expected: FAIL + + [script = this without bind] + expected: FAIL + + [script = this bound to an object] + expected: FAIL + + [script = callback that throws] + expected: FAIL + + [script - calling undefined callback] + expected: FAIL + + [script = null] + expected: FAIL + + [script = string + global string] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/Window-open.tentative.html.ini b/testing/web-platform/meta/trusted-types/Window-open.tentative.html.ini new file mode 100644 index 000000000000..443b1704368f --- /dev/null +++ b/testing/web-platform/meta/trusted-types/Window-open.tentative.html.ini @@ -0,0 +1,7 @@ +[Window-open.tentative.html] + [document.open via policy (successful URL transformation).] + expected: FAIL + + [window.open via policy (successful URL transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html.ini new file mode 100644 index 000000000000..8664f6e0ee77 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-DOMParser-parseFromString.tentative.html] + [`document.innerText = string` throws.] + expected: FAIL + + ['document.innerText = null' throws] + expected: FAIL + + [document.innerText assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Document-write.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Document-write.tentative.html.ini new file mode 100644 index 000000000000..46e7d13793c0 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Document-write.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-Document-write.tentative.html] + [`document.write(string)` throws] + expected: FAIL + + [document.write with html assigned via policy (successful URL transformation).] + expected: FAIL + + [`document.write(null)` throws] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-innerHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-innerHTML.tentative.html.ini new file mode 100644 index 000000000000..4c83e8d8a521 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-innerHTML.tentative.html.ini @@ -0,0 +1,4 @@ +[block-string-assignment-to-Element-innerHTML.tentative.html] + [block-string-assignment-to-Element-innerHTML] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html.ini new file mode 100644 index 000000000000..467daa842fba --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-Element-insertAdjacentHTML.tentative.html] + [insertAdjacentHTML with html assigned via policy (successful HTML transformation).] + expected: FAIL + + [`insertAdjacentHTML(string)` throws.] + expected: FAIL + + [`insertAdjacentHTML(null)` throws.] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html.ini new file mode 100644 index 000000000000..7476afb94b10 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-Element-outerHTML.tentative.html] + [`outerHTML = string` throws.] + expected: FAIL + + [`outerHTML = null` throws.] + expected: FAIL + + [outerHTML with html assigned via policy (successful HTML transformation).] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html.ini index 8c35378bbd99..dde8843246d3 100644 --- a/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html.ini +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html.ini @@ -2,3 +2,54 @@ [block-string-assignment-to-HTMLElement-generic] expected: FAIL + [frame.src accepts only TrustedURL] + expected: FAIL + + [object.codeBase accepts only TrustedURL] + expected: FAIL + + [area.href accepts only TrustedURL] + expected: FAIL + + [track.src accepts only TrustedURL] + expected: FAIL + + [iframe.src accepts only TrustedURL] + expected: FAIL + + [base.href accepts only TrustedURL] + expected: FAIL + + [img.src accepts only TrustedURL] + expected: FAIL + + [source.src accepts only TrustedURL] + expected: FAIL + + [script.src accepts only TrustedScriptURL] + expected: FAIL + + [video.src accepts only TrustedURL] + expected: FAIL + + [embed.src accepts only TrustedScriptURL] + expected: FAIL + + [link.href accepts only TrustedURL] + expected: FAIL + + [a.href accepts only TrustedURL] + expected: FAIL + + [object.data accepts only TrustedURL] + expected: FAIL + + [input.src accepts only TrustedURL] + expected: FAIL + + [iframe.srcdoc accepts only TrustedHTML] + expected: FAIL + + [div.innerHTML accepts only TrustedHTML] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLIFrameElement-srcdoc.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLIFrameElement-srcdoc.tentative.html.ini new file mode 100644 index 000000000000..5c6f013b2c89 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-HTMLIFrameElement-srcdoc.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-HTMLIFrameElement-srcdoc.tentative.html] + [iframe.srcdoc assigned via policy (successful HTML transformation).] + expected: FAIL + + [`iframe.srcdoc = null` throws.] + expected: FAIL + + [`iframe.srcdoc = string` throws.] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-assign.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-assign.tentative.html.ini new file mode 100644 index 000000000000..9d48964d399c --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-assign.tentative.html.ini @@ -0,0 +1,2 @@ +[block-string-assignment-to-Location-assign.tentative.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-href.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-href.tentative.html.ini new file mode 100644 index 000000000000..0b21d2ba331f --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-href.tentative.html.ini @@ -0,0 +1,2 @@ +[block-string-assignment-to-Location-href.tentative.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-replace.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-replace.tentative.html.ini new file mode 100644 index 000000000000..ef38d1dbca1e --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Location-replace.tentative.html.ini @@ -0,0 +1,2 @@ +[block-string-assignment-to-Location-replace.tentative.html] + expected: TIMEOUT diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html.ini new file mode 100644 index 000000000000..af82b78e5a85 --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Range-createContextualFragment.tentative.html.ini @@ -0,0 +1,10 @@ +[block-string-assignment-to-Range-createContextualFragment.tentative.html] + [range.createContextualFragment assigned via policy (successful HTML transformation).] + expected: FAIL + + [`range.createContextualFragment(null)` throws.] + expected: FAIL + + [`range.createContextualFragment(string)` throws.] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-Window-open.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Window-open.tentative.html.ini new file mode 100644 index 000000000000..3825dc4860ef --- /dev/null +++ b/testing/web-platform/meta/trusted-types/block-string-assignment-to-Window-open.tentative.html.ini @@ -0,0 +1,19 @@ +[block-string-assignment-to-Window-open.tentative.html] + [window.open via policy (successful URL transformation).] + expected: FAIL + + [`window.open(string)` throws.] + expected: FAIL + + [document.open via policy (successful URL transformation).] + expected: FAIL + + [`document.open(null)` throws.] + expected: FAIL + + [`window.open(null)` throws.] + expected: FAIL + + [`document.open(string)` throws.] + expected: FAIL + diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-assign.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-assign.tentative.html.ini deleted file mode 100644 index 859151c893ab..000000000000 --- a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-assign.tentative.html.ini +++ /dev/null @@ -1,4 +0,0 @@ -[block-string-assignment-to-location-assign.tentative.html] - [block-string-assignment-to-location-assign] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-href.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-href.tentative.html.ini deleted file mode 100644 index 6ea9459988d6..000000000000 --- a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-href.tentative.html.ini +++ /dev/null @@ -1,19 +0,0 @@ -[block-string-assignment-to-location-href.tentative.html] - [`location.href = string` throws] - expected: FAIL - - [Basic processing: safe URL, safe construction.] - expected: FAIL - - [Basic processing: safe URL, unsafe construction.] - expected: FAIL - - [Basic processing: javascript URL, safe construction.] - expected: FAIL - - [Basic processing: javascript URL, unsafe construction.] - expected: FAIL - - [block-string-assignment-to-location-href] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-replace.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-replace.tentative.html.ini deleted file mode 100644 index 3a5f85ed203d..000000000000 --- a/testing/web-platform/meta/trusted-types/block-string-assignment-to-location-replace.tentative.html.ini +++ /dev/null @@ -1,4 +0,0 @@ -[block-string-assignment-to-location-replace.tentative.html] - [block-string-assignment-to-location-replace] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/block-string-assignment-to-window-open.tentative.html.ini b/testing/web-platform/meta/trusted-types/block-string-assignment-to-window-open.tentative.html.ini deleted file mode 100644 index 190838515e2e..000000000000 --- a/testing/web-platform/meta/trusted-types/block-string-assignment-to-window-open.tentative.html.ini +++ /dev/null @@ -1,28 +0,0 @@ -[block-string-assignment-to-window-open.tentative.html] - [window.open: safe URL, safe construction.] - expected: FAIL - - [window.open: safe URL, unsafe construction.] - expected: FAIL - - [document.open: safe URL, safe construction.] - expected: FAIL - - [document.open: safe URL, unsafe construction.] - expected: FAIL - - [`window.open(string)` throws.] - expected: FAIL - - [`document.open(string)` throws.] - expected: FAIL - - [`window.open(null)` throws.] - expected: FAIL - - [`document.open(null)` throws.] - expected: FAIL - - [block-string-assignment-to-window-open] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/document-write.tentative.html.ini b/testing/web-platform/meta/trusted-types/document-write.tentative.html.ini deleted file mode 100644 index b735e5f88310..000000000000 --- a/testing/web-platform/meta/trusted-types/document-write.tentative.html.ini +++ /dev/null @@ -1,7 +0,0 @@ -[document-write.tentative.html] - [document.write(TrustedHTML).] - expected: FAIL - - [document-write] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/location-assign.tentative.html.ini b/testing/web-platform/meta/trusted-types/location-assign.tentative.html.ini deleted file mode 100644 index ef99c8b6acdd..000000000000 --- a/testing/web-platform/meta/trusted-types/location-assign.tentative.html.ini +++ /dev/null @@ -1,10 +0,0 @@ -[location-assign.tentative.html] - [Basic processing: safe URL, safe construction.] - expected: FAIL - - [Basic processing: safe URL, unsafe construction.] - expected: FAIL - - [location-assign] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/location-href.tentative.html.ini b/testing/web-platform/meta/trusted-types/location-href.tentative.html.ini deleted file mode 100644 index 1f563a0ab942..000000000000 --- a/testing/web-platform/meta/trusted-types/location-href.tentative.html.ini +++ /dev/null @@ -1,16 +0,0 @@ -[location-href.tentative.html] - [Basic processing: safe URL, safe construction.] - expected: FAIL - - [Basic processing: safe URL, unsafe construction.] - expected: FAIL - - [Basic processing: javascript URL, safe construction.] - expected: FAIL - - [Basic processing: javascript URL, unsafe construction.] - expected: FAIL - - [location-href] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/location-replace.tentative.html.ini b/testing/web-platform/meta/trusted-types/location-replace.tentative.html.ini deleted file mode 100644 index 29c39f4c916d..000000000000 --- a/testing/web-platform/meta/trusted-types/location-replace.tentative.html.ini +++ /dev/null @@ -1,10 +0,0 @@ -[location-replace.tentative.html] - [Basic processing: safe URL, safe construction.] - expected: FAIL - - [Basic processing: safe URL, unsafe construction.] - expected: FAIL - - [location-replace] - expected: FAIL - diff --git a/testing/web-platform/meta/trusted-types/window-open.tentative.html.ini b/testing/web-platform/meta/trusted-types/window-open.tentative.html.ini deleted file mode 100644 index f8badcbfc007..000000000000 --- a/testing/web-platform/meta/trusted-types/window-open.tentative.html.ini +++ /dev/null @@ -1,16 +0,0 @@ -[window-open.tentative.html] - [window.open: safe URL, safe construction.] - expected: FAIL - - [window.open: safe URL, unsafe construction.] - expected: FAIL - - [document.open: safe URL, safe construction.] - expected: FAIL - - [document.open: safe URL, unsafe construction.] - expected: FAIL - - [window-open] - expected: FAIL - diff --git a/testing/web-platform/meta/wake-lock/wakelock-type.https.html.ini b/testing/web-platform/meta/wake-lock/wakelock-type.https.html.ini index 99e8a000432b..05ee77666b68 100644 --- a/testing/web-platform/meta/wake-lock/wakelock-type.https.html.ini +++ b/testing/web-platform/meta/wake-lock/wakelock-type.https.html.ini @@ -17,3 +17,9 @@ ['NotSupportedError' is thrown when set an unsupported wake lock type] expected: FAIL + ['TypeError' is thrown when set an unsupported wake lock type] + expected: FAIL + + ['TypeError' is thrown when set an empty wake lock type] + expected: FAIL + diff --git a/testing/web-platform/meta/webdriver/tests/minimize_window/user_prompts.py.ini b/testing/web-platform/meta/webdriver/tests/minimize_window/user_prompts.py.ini deleted file mode 100644 index 52eb5b23cca5..000000000000 --- a/testing/web-platform/meta/webdriver/tests/minimize_window/user_prompts.py.ini +++ /dev/null @@ -1,3 +0,0 @@ -[user_prompts.py] - disabled: - if not debug and not webrender and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86") and (bits == 32): wpt-sync Bug 1449780 diff --git a/testing/web-platform/meta/websockets/basic-auth.any.js.ini b/testing/web-platform/meta/websockets/basic-auth.any.js.ini index f08ba7c485e5..e1b2a2fe9978 100644 --- a/testing/web-platform/meta/websockets/basic-auth.any.js.ini +++ b/testing/web-platform/meta/websockets/basic-auth.any.js.ini @@ -6,3 +6,8 @@ [basic-auth.any.worker.html] [basic-auth.any.sharedworker.html] + +[basic-auth.any.serviceworker.html] + [HTTP basic authentication should work with WebSockets] + expected: FAIL + diff --git a/testing/web-platform/mozilla/meta/MANIFEST.json b/testing/web-platform/mozilla/meta/MANIFEST.json index a01cd2520d04..71ce897ec08f 100644 --- a/testing/web-platform/mozilla/meta/MANIFEST.json +++ b/testing/web-platform/mozilla/meta/MANIFEST.json @@ -1766,5 +1766,5 @@ ] }, "url_base": "/_mozilla/", - "version": 4 + "version": 5 } diff --git a/testing/web-platform/tests/async-local-storage/api-surface.tentative.https.html b/testing/web-platform/tests/async-local-storage/api-surface.tentative.https.html new file mode 100644 index 000000000000..eea51abd5397 --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/api-surface.tentative.https.html @@ -0,0 +1,65 @@ + + +Async local storage API surface + + + + + diff --git a/testing/web-platform/tests/async-local-storage/helpers/als-tests.js b/testing/web-platform/tests/async-local-storage/helpers/als-tests.js new file mode 100644 index 000000000000..fd6d6844f72e --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/helpers/als-tests.js @@ -0,0 +1,72 @@ +import { StorageArea, storage as defaultArea } from "std:async-local-storage"; +import { assertArrayCustomEquals } from "./equality-asserters.js"; + +export function testWithArea(testFn, description) { + promise_test(t => { + const area = new StorageArea(description); + t.add_cleanup(t => area.clear()); + + return testFn(area, t); + }, description); +} + +export function testWithDefaultArea(testFn, description) { + promise_test(t => { + t.add_cleanup(t => defaultArea.clear()); + + return testFn(defaultArea, t); + }, description); +} + +// These two functions take a key/value and use them to test +// set()/get()/delete()/has()/keys()/values()/entries(). The keyEqualityAsserter should be a +// function from ./equality-asserters.js. + +export function testVariousMethodsWithDefaultArea(label, key, value, keyEqualityAsserter) { + testWithDefaultArea(testVariousMethodsInner(key, value, keyEqualityAsserter), label); +} + +export function testVariousMethods(label, key, value, keyEqualityAsserter) { + testWithArea(testVariousMethodsInner(key, value, keyEqualityAsserter), label); +} + +function testVariousMethodsInner(key, value, keyEqualityAsserter) { + return async area => { + await assertPromiseEquals(area.set(key, value), undefined, "set()", "undefined"); + + await assertPromiseEquals(area.get(key), value, "get()", "the set value"); + await assertPromiseEquals(area.has(key), true, "has()", "true"); + + const keysPromise = area.keys(); + assertIsPromise(keysPromise, "keys()"); + assertArrayCustomEquals(await keysPromise, [key], keyEqualityAsserter, "keys() must have the key"); + + const valuesPromise = area.values(); + assertIsPromise(valuesPromise); + assert_array_equals(await valuesPromise, [value], "values() must have the value"); + + const entriesPromise = area.entries(); + assertIsPromise(entriesPromise, "entries()"); + const entries = await entriesPromise; + assert_true(Array.isArray(entries), "entries() must give an array"); + assert_equals(entries.length, 1, "entries() must have only one value"); + assert_true(Array.isArray(entries[0]), "entries() 0th element must be an array"); + assert_equals(entries[0].length, 2, "entries() 0th element must have 2 elements"); + keyEqualityAsserter(entries[0][0], key, "entries() 0th element's 0th element must be the key"); + assert_equals(entries[0][1], value, "entries() 0th element's 1st element must be the value"); + + await assertPromiseEquals(area.delete(key), undefined, "delete()", "undefined"); + + await assertPromiseEquals(area.get(key), undefined, "get()", "undefined after deleting"); + await assertPromiseEquals(area.has(key), false, "has()", "false after deleting"); + }; +} + +async function assertPromiseEquals(promise, expected, label, expectedLabel) { + assertIsPromise(promise, label); + assert_equals(await promise, expected, label + " must fulfill with " + expectedLabel); +} + +function assertIsPromise(promise, label) { + assert_equals(promise.constructor, Promise, label + " must return a promise"); +} diff --git a/testing/web-platform/tests/async-local-storage/helpers/class-assert.js b/testing/web-platform/tests/async-local-storage/helpers/class-assert.js new file mode 100644 index 000000000000..31b25cab9f2d --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/helpers/class-assert.js @@ -0,0 +1,107 @@ +export function isConstructor(o) { + assert_equals(typeof o, "function", "Must be a function according to typeof"); + assert_true(isConstructorTest(o), "Must be a constructor according to the meta-object protocol"); + assert_throws(new TypeError(), () => o(), "Attempting to call (not construct) must throw"); +} + +export function functionLength(o, expected, label) { + const lengthExpected = { writable: false, enumerable: false, configurable: true }; + const { value } = propertyDescriptor(o, "length", lengthExpected); + + assert_equals(value, expected, `${formatLabel(label)}length value`); +} + +export function functionName(o, expected, label) { + const lengthExpected = { writable: false, enumerable: false, configurable: true }; + const { value } = propertyDescriptor(o, "name", lengthExpected); + + assert_equals(value, expected, `${formatLabel(label)}name value`); +} + +export function hasClassPrototype(o) { + const prototypeExpected = { writable: false, enumerable: false, configurable: false }; + const { value } = propertyDescriptor(o, "prototype", prototypeExpected); + assert_equals(typeof value, "object", "prototype must be an object"); + assert_not_equals(value, null, "prototype must not be null"); +} + +export function hasPrototypeConstructorLink(klass) { + const constructorExpected = { writable: true, enumerable: false, configurable: true }; + const { value } = propertyDescriptor(klass.prototype, "constructor", constructorExpected); + assert_equals(value, klass, "constructor property must match"); +} + +export function propertyKeys(o, expectedNames, expectedSymbols, label) { + label = formatLabel(label); + assert_array_equals(Object.getOwnPropertyNames(o), expectedNames, `${label}property names`); + assert_array_equals(Object.getOwnPropertySymbols(o), expectedSymbols, + `${label}property symbols`); +} + +export function methods(o, expectedMethods) { + for (const [name, length] of Object.entries(expectedMethods)) { + method(o, name, length); + } +} + +export function accessors(o, expectedAccessors) { + for (const [name, accessorTypes] of Object.entries(expectedAccessors)) { + accessor(o, name, accessorTypes); + } +} + +function method(o, prop, length) { + const methodExpected = { writable: true, enumerable: false, configurable: true }; + const { value } = propertyDescriptor(o, prop, methodExpected); + + assert_equals(typeof value, "function", `${prop} method must be a function according to typeof`); + assert_false(isConstructorTest(value), + `${prop} method must not be a constructor according to the meta-object protocol`); + functionLength(value, length, prop); + functionName(value, prop, prop); + propertyKeys(value, ["length", "name"], [], prop); +} + +function accessor(o, prop, expectedAccessorTypes) { + const accessorExpected = { enumerable: false, configurable: true }; + const propDesc = propertyDescriptor(o, prop, accessorExpected); + + for (const possibleType of ["get", "set"]) { + const accessorFunc = propDesc[possibleType]; + if (expectedAccessorTypes.includes(possibleType)) { + const label = `${prop}'s ${possibleType}ter`; + + assert_equals(typeof accessorFunc, "function", + `${label} must be a function according to typeof`); + assert_false(isConstructorTest(accessorFunc), + `${label} must not be a constructor according to the meta-object protocol`); + + functionLength(accessorFunc, possibleType === "get" ? 0 : 1, label); + functionName(accessorFunc, `${possibleType} ${prop}`, label); + propertyKeys(accessorFunc, ["length", "name"], [], label); + } else { + assert_equals(accessorFunc, undefined, `${prop} must not have a ${possibleType}ter`); + } + } +} + +function propertyDescriptor(obj, prop, mustMatch) { + const propDesc = Object.getOwnPropertyDescriptor(obj, prop); + for (const key in Object.keys(mustMatch)) { + assert_equals(propDesc[key], mustMatch[key], `${prop} ${key}`); + } + return propDesc; +} + +function isConstructorTest(o) { + try { + new (new Proxy(o, {construct: () => ({})})); + return true; + } catch (e) { + return false; + } +} + +function formatLabel(label) { + return label !== undefined ? ` ${label}` : ""; +} diff --git a/testing/web-platform/tests/async-local-storage/helpers/equality-asserters.js b/testing/web-platform/tests/async-local-storage/helpers/equality-asserters.js new file mode 100644 index 000000000000..ad4623c179d7 --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/helpers/equality-asserters.js @@ -0,0 +1,37 @@ +export function assertEqualDates(actual, expected, label) { + assert_equals(expected.constructor, Date, + "assertEqualDates usage check: expected must be a Date"); + + const labelPart = label === undefined ? "" : `${label}: `; + assert_equals(actual.constructor, Date, `${labelPart}must be a Date`); + assert_equals(actual.valueOf(), expected.valueOf(), `${labelPart}timestamps must match`); +} + +export function assertEqualArrayBuffers(actual, expected, label) { + assert_equals(expected.constructor, ArrayBuffer, + "assertEqualArrayBuffers usage check: expected must be an ArrayBuffer"); + + const labelPart = label === undefined ? "" : `${label}: `; + assert_equals(actual.constructor, ArrayBuffer, `${labelPart}must be an ArrayBuffer`); + assert_array_equals(new Uint8Array(actual), new Uint8Array(expected), `${labelPart}must match`); +} + +export function assertArrayBufferEqualsABView(actual, expected, label) { + assert_true(ArrayBuffer.isView(expected), + "assertArrayBufferEqualsABView usage check: expected must be an ArrayBuffer view"); + + assertEqualArrayBuffers(actual, expected.buffer, label); +} + +export function assertArrayCustomEquals(actual, expected, equalityAsserter, label) { + assert_true(Array.isArray(expected), + "assertArrayCustomEquals usage check: expected must be an Array"); + + const labelPart = label === undefined ? "" : `${label}: `; + assert_true(Array.isArray(actual), `${labelPart}must be an array`); + assert_equals(actual.length, expected.length, `${labelPart}length must be as expected`); + + for (let i = 0; i < actual.length; ++i) { + equalityAsserter(actual[i], expected[i], `${labelPart}index ${i}`); + } +} diff --git a/testing/web-platform/tests/async-local-storage/key-types.tentative.https.html b/testing/web-platform/tests/async-local-storage/key-types.tentative.https.html new file mode 100644 index 000000000000..771ee2f9749a --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/key-types.tentative.https.html @@ -0,0 +1,66 @@ + + +Async local storage: tests against various key types + + + + + diff --git a/testing/web-platform/tests/async-local-storage/non-secure-context-dynamic-import.tentative.html b/testing/web-platform/tests/async-local-storage/non-secure-context-dynamic-import.tentative.html new file mode 100644 index 000000000000..9270f6c82fa2 --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/non-secure-context-dynamic-import.tentative.html @@ -0,0 +1,18 @@ + + +Async local storage: should not work in non-secure contexts when included via import() + + + + + diff --git a/testing/web-platform/tests/async-local-storage/non-secure-context-import-statement.tentative.html b/testing/web-platform/tests/async-local-storage/non-secure-context-import-statement.tentative.html new file mode 100644 index 000000000000..879729696dbb --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/non-secure-context-import-statement.tentative.html @@ -0,0 +1,27 @@ + + +Async local storage: should not work in non-secure contexts when included via an import statement + + + + + + + diff --git a/testing/web-platform/tests/async-local-storage/non-secure-context-script-element.tentative.html b/testing/web-platform/tests/async-local-storage/non-secure-context-script-element.tentative.html new file mode 100644 index 000000000000..feeddafc8daa --- /dev/null +++ b/testing/web-platform/tests/async-local-storage/non-secure-context-script-element.tentative.html @@ -0,0 +1,25 @@ + + +Async local storage: should not work in non-secure contexts when included via a script element + + + + + + + diff --git a/testing/web-platform/tests/async-local-storage/storage-smoke-test.https.tentative.html b/testing/web-platform/tests/async-local-storage/storage-smoke-test.tentative.https.html similarity index 52% rename from testing/web-platform/tests/async-local-storage/storage-smoke-test.https.tentative.html rename to testing/web-platform/tests/async-local-storage/storage-smoke-test.tentative.https.html index b4d66dabc7a1..f978480ff2b8 100644 --- a/testing/web-platform/tests/async-local-storage/storage-smoke-test.https.tentative.html +++ b/testing/web-platform/tests/async-local-storage/storage-smoke-test.tentative.https.html @@ -6,6 +6,7 @@ diff --git a/testing/web-platform/tests/background-fetch/fetch.https.window.js b/testing/web-platform/tests/background-fetch/fetch.https.window.js index 843506947f71..33c8124ffa45 100644 --- a/testing/web-platform/tests/background-fetch/fetch.https.window.js +++ b/testing/web-platform/tests/background-fetch/fetch.https.window.js @@ -84,3 +84,15 @@ backgroundFetchTest(async (test, backgroundFetch) => { assert_equals(results[0].text, 'Background Fetch'); }, 'Using Background Fetch to successfully fetch a single resource'); + +backgroundFetchTest(async (test, backgroundFetch) => { + const registrationId = uniqueId(); + + // Very large download total that will definitely exceed the quota. + const options = {downloadTotal: Number.MAX_SAFE_INTEGER}; + await promise_rejects( + test, "QUOTA_EXCEEDED_ERR", + backgroundFetch.fetch(registrationId, 'resources/feature-name.txt', options), + 'This fetch should have thrown a quota exceeded error'); + +}, 'Background Fetch that exceeds the quota throws a QuotaExceededError'); diff --git a/testing/web-platform/tests/css/compositing/parsing/background-blend-mode-invalid.html b/testing/web-platform/tests/css/compositing/parsing/background-blend-mode-invalid.html index 1983adad24de..f939195f3e5c 100644 --- a/testing/web-platform/tests/css/compositing/parsing/background-blend-mode-invalid.html +++ b/testing/web-platform/tests/css/compositing/parsing/background-blend-mode-invalid.html @@ -8,7 +8,7 @@ - + - + - + - + - + - + - - - - diff --git a/testing/web-platform/tests/css/css-backgrounds/parsing/background-attachment-invalid.html b/testing/web-platform/tests/css/css-backgrounds/parsing/background-attachment-invalid.html index 0af7394aa428..68b18dcc2d06 100644 --- a/testing/web-platform/tests/css/css-backgrounds/parsing/background-attachment-invalid.html +++ b/testing/web-platform/tests/css/css-backgrounds/parsing/background-attachment-invalid.html @@ -8,7 +8,7 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/testing/web-platform/tests/css/css-backgrounds/parsing/box-shadow-valid.html b/testing/web-platform/tests/css/css-backgrounds/parsing/box-shadow-valid.html index 29bb263bb0dd..be72b78cbbe4 100644 --- a/testing/web-platform/tests/css/css-backgrounds/parsing/box-shadow-valid.html +++ b/testing/web-platform/tests/css/css-backgrounds/parsing/box-shadow-valid.html @@ -3,12 +3,14 @@ CSS Backgrounds and Borders Module Level 3: parsing box-shadow with valid values + + - + diff --git a/testing/web-platform/tests/css/css-backgrounds/support/parsing-testcommon.js b/testing/web-platform/tests/css/css-backgrounds/parsing/support/parsing-testcommon.js similarity index 100% rename from testing/web-platform/tests/css/css-backgrounds/support/parsing-testcommon.js rename to testing/web-platform/tests/css/css-backgrounds/parsing/support/parsing-testcommon.js diff --git a/testing/web-platform/tests/css/css-box/parsing/clear-invalid.html b/testing/web-platform/tests/css/css-box/parsing/clear-invalid.html index a91e61f97e21..636704330e3c 100644 --- a/testing/web-platform/tests/css/css-box/parsing/clear-invalid.html +++ b/testing/web-platform/tests/css/css-box/parsing/clear-invalid.html @@ -8,7 +8,7 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + +
+
+ diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html new file mode 100644 index 000000000000..953328982032 --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-display.html @@ -0,0 +1,41 @@ + +fieldset and CSS display + + + +
x
+
x
+
x
+ diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html new file mode 100644 index 000000000000..d2b1d30d8cc8 --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-flexbox.html @@ -0,0 +1,71 @@ + +fieldset and CSS Flexbox + + + +
+
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
+
+
+
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
+
+
+
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
+
+
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
+ diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html new file mode 100644 index 000000000000..bdb2c2fd9468 --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/fieldset-multicol.html @@ -0,0 +1,29 @@ + +fieldset multicol + + + +
+

1 +

2 +

3 +

4 +

5 +

+
+

1 +

2 +

3 +

4 +

5 +

+ diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html new file mode 100644 index 000000000000..fd6c11a00566 --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative-ref.html @@ -0,0 +1,10 @@ + +Reference for legend position: relative + +

There should be no red.

+
diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html new file mode 100644 index 000000000000..efd55ab22db7 --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/legend-position-relative.html @@ -0,0 +1,11 @@ + +legend position: relative + + +

There should be no red.

+
+
diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html new file mode 100644 index 000000000000..92c33029701b --- /dev/null +++ b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-inline-size.html @@ -0,0 +1,39 @@ + +fieldset min-inline-size + + + +
+
+
+ diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html deleted file mode 100644 index cabbaf20e656..000000000000 --- a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/min-width-not-important.html +++ /dev/null @@ -1,54 +0,0 @@ - - - - - Rendering requirements test (suggested default rendering): fieldset min-width is overridable - - - - - - - - - - -
-
-
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
-
-
-

Test passes if there is a filled green square and no red.

-
- - diff --git a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html b/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html deleted file mode 100644 index d17268a786bc..000000000000 --- a/testing/web-platform/tests/html/rendering/non-replaced-elements/the-fieldset-element-0/ref.html +++ /dev/null @@ -1,21 +0,0 @@ - - - - - Rendering requirements Reftest Reference - - - -
-

Test passes if there is a filled green square and no red.

- - diff --git a/testing/web-platform/tests/html/semantics/scripting-1/the-script-element/script-charset-02.html b/testing/web-platform/tests/html/semantics/scripting-1/the-script-element/script-charset-02.html index 77a015bb71f0..63cbe838e017 100644 --- a/testing/web-platform/tests/html/semantics/scripting-1/the-script-element/script-charset-02.html +++ b/testing/web-platform/tests/html/semantics/scripting-1/the-script-element/script-charset-02.html @@ -1,40 +1,41 @@ - - Script @type: unknown parameters + Script encoding for document encoding windows-1250 - + +
+ + + - + --> + diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html deleted file mode 100644 index f82d80be7b6b..000000000000 --- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/001.html +++ /dev/null @@ -1,20 +0,0 @@ - -Replacement of window object after document.open - - -
- - diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html deleted file mode 100644 index 36c0a3f3ea11..000000000000 --- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/008.html +++ /dev/null @@ -1,20 +0,0 @@ - -Replacement of document prototype object after document.open - - -
- - diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html deleted file mode 100644 index 0d104c9569b3..000000000000 --- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/009.https.html +++ /dev/null @@ -1,34 +0,0 @@ - -document.open replacing singleton - - -
- - diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015-1.html b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015-1.html index dffbac0c0411..c325bd08015f 100644 --- a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015-1.html +++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/015-1.html @@ -5,7 +5,7 @@ onload = function() { parent.tests[0].step(function() {parent.assert_equals(document.open(), document)}); document.write(" diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js new file mode 100644 index 000000000000..d4a9296fca66 --- /dev/null +++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/no-new-global.window.js @@ -0,0 +1,57 @@ +// In an earlier version of the HTML Standard, document open steps created a +// new JavaScript realm and migrated the existing objects to use the new realm. +// Test that this no longer happens. + +async_test(t => { + const frame = document.body.appendChild(document.createElement("iframe")); + // Ensure a load event gets dispatched to unblock testharness + t.add_cleanup(() => frame.remove()); + frame.src = "resources/global-variables-frame.html"; + frame.onload = t.step_func_done(() => { + assert_equals(frame.contentWindow.hey, "You", "precondition"); + frame.contentDocument.open(); + assert_equals(frame.contentWindow.hey, "You", "actual check"); + }); +}, "Obtaining a variable from a global whose document had open() invoked"); + +function testIdentity(desc, frameToObject, frameToConstructor) { + async_test(t => { + const frame = document.body.appendChild(document.createElement("iframe")); + // Ensure a load event gets dispatched to unblock testharness + t.add_cleanup(() => frame.remove()); + frame.src = "/common/blank.html"; + frame.onload = t.step_func_done(() => { + const obj = frameToObject(frame); + frame.contentDocument.open(); + assert_equals(frameToObject(frame), obj); + }); + }, `${desc} maintains object identity through open()`); + + async_test(t => { + const frame = document.body.appendChild(document.createElement("iframe")); + // Ensure a load event gets dispatched to unblock testharness + t.add_cleanup(() => frame.remove()); + frame.src = "/common/blank.html"; + frame.onload = t.step_func_done(() => { + const obj = frameToObject(frame); + const origProto = Object.getPrototypeOf(obj); + const origCtor = frameToConstructor(frame); + const sym = Symbol(); + obj[sym] = "foo"; + frame.contentDocument.open(); + assert_equals(frameToObject(frame)[sym], "foo"); + assert_true(frameToObject(frame) instanceof origCtor); + assert_equals(Object.getPrototypeOf(frameToObject(frame)), origProto); + assert_equals(frameToConstructor(frame), origCtor); + }); + }, `${desc} maintains its prototype and properties through open()`); +} + +testIdentity("Document", frame => frame.contentDocument, frame => frame.contentWindow.Document); +testIdentity("WindowProxy", frame => frame.contentWindow, frame => frame.contentWindow.Window); +testIdentity("BarProp", frame => frame.contentWindow.locationbar, frame => frame.contentWindow.BarProp); +testIdentity("History", frame => frame.contentWindow.history, frame => frame.contentWindow.History); +testIdentity("localStorage", frame => frame.contentWindow.localStorage, frame => frame.contentWindow.Storage); +testIdentity("Location", frame => frame.contentWindow.location, frame => frame.contentWindow.Location); +testIdentity("sessionStorage", frame => frame.contentWindow.sessionStorage, frame => frame.contentWindow.Storage); +testIdentity("Navigator", frame => frame.contentWindow.navigator, frame => frame.contentWindow.Navigator); diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/global-variables-frame.html b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/global-variables-frame.html new file mode 100644 index 000000000000..0fe189914c37 --- /dev/null +++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/resources/global-variables-frame.html @@ -0,0 +1,4 @@ + + diff --git a/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.js b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.js new file mode 100644 index 000000000000..e275a4987a08 --- /dev/null +++ b/testing/web-platform/tests/html/webappapis/dynamic-markup-insertion/opening-the-input-stream/unload.window.js @@ -0,0 +1,19 @@ +// In an earlier version of the HTML Standard, document open steps had "unload +// document" as a step. Test that this no longer happens. + +async_test(t => { + const frame = document.body.appendChild(document.createElement("iframe")); + t.add_cleanup(() => frame.remove()); + frame.src = "/common/blank.html"; + frame.onload = t.step_func(() => { + frame.contentWindow.onpagehide = t.unreached_func("onpagehide got called"); + frame.contentDocument.onvisibilitychange = t.unreached_func("onvisibilitychange got called"); + frame.contentWindow.onunload = t.unreached_func("onunload got called"); + frame.contentDocument.open(); + t.step_timeout(t.step_func_done(() => { + // If none of the three events have been fired by this point, we consider + // the test a success. `frame.remove()` above will allow the `load` event + // to be fired on the top-level Window, thus unblocking testharness. + }), 500); + }); +}, "document.open(): Do not fire pagehide, visibilitychange, or unload events"); diff --git a/testing/web-platform/tests/network-error-logging/META.yml b/testing/web-platform/tests/network-error-logging/META.yml new file mode 100644 index 000000000000..bc063177d1d1 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/META.yml @@ -0,0 +1,2 @@ +suggested_reviewers: + - dcreager diff --git a/testing/web-platform/tests/network-error-logging/README.md b/testing/web-platform/tests/network-error-logging/README.md new file mode 100644 index 000000000000..7cf2c6fdceed --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/README.md @@ -0,0 +1,74 @@ +# Network Error Logging + +The tests in this directory exercise the user agent's implementation of [Network +Error Logging](https://w3c.github.io/network-error-logging/) and +[Reporting](https://w3c.github.io/reporting/). + +## Collector + +Each test case generates a unique `reportID` that is used to distinguish the NEL +reports generated by that test case. + +The [support/report.py][] file is a [Python file handler][] that can be used as +a Reporting collector. Its default operation is to save any reports that it +receives into the [stash][]. If you pass in the optional `op` URL parameter, +with a value of `retrieve_report`, it will instead return a list of all of the +reports received for a particular `reportID`. + +[Python file handler]: https://wptserve.readthedocs.io/en/latest/handlers.html#python-file-handlers +[stash]: https://wptserve.readthedocs.io/en/latest/stash.html +[support/report.py]: support/report.py + +## Installing NEL policies + +NEL reports are only generated if the user agent has received a NEL policy for +the origin of the request. The current request counts; if its response contains +a policy, that policy is used for the current request and all future requests, +modulo the policy's `max_age` field. + +Most of the test cases will therefore make a request or two to install NEL +policies, and then make another request that should or should not be covered by +those policies. It will then assert that NEL reports were or were not created, +as required by the spec. + +The [support][] directory contains several images, each of which defines a +particular "kind" of NEL policy (e.g., `include_subdomains` set vs unset, no +policy at all, etc.). The [support/nel.sub.js][] file contains helper +JavaScript methods for requesting those images, so that the test cases +themselves are more descriptive. + +[support]: support +[support/nel.sub.js]: support/nel.sub.js + +## Avoiding spurious reports + +NEL policies apply to **all** future requests to the origin. We therefore serve +all of the test case's "infrastructure" (the test case itself, +[support/report.py][] and [support/nel.sub.js][]) on a different origin than +the requests that exercise the NEL implementation. That ensures that we don't +have to wade through NEL reports about the infrastructure when verifying the NEL +reports about the requests that we care about. + +## Browser configuration + +You must configure your browser's Reporting implementation to upload reports for +a request immediately. The test cases do not currently have any timeouts; they +assume that as soon as the Fetch API promise resolves, any NEL reports for the +request have already been uploaded. + +## Test parallelism + +Because NEL policies are stored in a global cache in the user agent, we need to +run the tests in this directory serially instead of in parallel. We implement a +simple spin-lock in [support/lock.py][] to ensure that only one test is allowed +to perform any NEL-related requests at a time. + +[support/lock.py]: support/lock.py + +## CORS preflights + +Reporting uploads are subject to CORS preflights. We want to test normal +operation (when preflight requests succeed) as well as failures of the CORS +preflight logic in the user agent. To support this, our test collector is +configured to always reject the CORS preflight for a single domain (www2), and +to always grant the CORS preflight for all other test subdomains. diff --git a/testing/web-platform/tests/network-error-logging/no-report-on-failed-cors-preflight.https.html b/testing/web-platform/tests/network-error-logging/no-report-on-failed-cors-preflight.https.html new file mode 100644 index 000000000000..3a35651b4ef5 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/no-report-on-failed-cors-preflight.https.html @@ -0,0 +1,26 @@ + + + + + Test that NEL reports are not sent if the CORS preflight fails + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-404.https.html b/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-404.https.html new file mode 100644 index 000000000000..462f99e84231 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-404.https.html @@ -0,0 +1,30 @@ + + + + + Test that include_subdomains policies do NOT report HTTP errors + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-success.https.html b/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-success.https.html new file mode 100644 index 000000000000..5fd6d4fb4123 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/no-report-on-subdomain-success.https.html @@ -0,0 +1,30 @@ + + + + + Test that include_subdomains policies do NOT report successful requests + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/reports-are-not-observable.https.html b/testing/web-platform/tests/network-error-logging/reports-are-not-observable.https.html new file mode 100644 index 000000000000..35ab4f3c2350 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/reports-are-not-observable.https.html @@ -0,0 +1,29 @@ + + + + + Test that NEL reports are not observable from JavaScript + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/sends-report-on-404.https.html b/testing/web-platform/tests/network-error-logging/sends-report-on-404.https.html new file mode 100644 index 000000000000..38bdc014501e --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/sends-report-on-404.https.html @@ -0,0 +1,42 @@ + + + + + Test that NEL reports are sent for HTTP errors + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/sends-report-on-subdomain-dns-failure.https.html b/testing/web-platform/tests/network-error-logging/sends-report-on-subdomain-dns-failure.https.html new file mode 100644 index 000000000000..8913857af8ac --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/sends-report-on-subdomain-dns-failure.https.html @@ -0,0 +1,46 @@ + + + + + Test that include_subdomains policies report DNS failures for subdomains + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html b/testing/web-platform/tests/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html new file mode 100644 index 000000000000..fce12cd3e96c --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/sends-report-on-success-with-subdomain-policy.https.html @@ -0,0 +1,40 @@ + + + + + Test that NEL reports are sent for successful requests + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/sends-report-on-success.https.html b/testing/web-platform/tests/network-error-logging/sends-report-on-success.https.html new file mode 100644 index 000000000000..68fddaa0c70b --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/sends-report-on-success.https.html @@ -0,0 +1,37 @@ + + + + + Test that NEL reports are sent for successful requests + + + + + + + + + diff --git a/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png b/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png new file mode 100644 index 000000000000..2fa1e0ac0663 Binary files /dev/null and b/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png differ diff --git a/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png.sub.headers b/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png.sub.headers new file mode 100644 index 000000000000..1085b8a987c5 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/clear-policy-pass.png.sub.headers @@ -0,0 +1,6 @@ +Expires: Mon, 26 Jul 1997 05:00:00 GMT +Cache-Control: no-store, no-cache, must-revalidate +Cache-Control: post-check=0, pre-check=0, false +Pragma: no-cache +Report-To: { "group": "nel-group", "max_age": 0, "endpoints": [] } +NEL: {"max_age": 0} diff --git a/testing/web-platform/tests/network-error-logging/support/lock.py b/testing/web-platform/tests/network-error-logging/support/lock.py new file mode 100644 index 000000000000..8c88250bde00 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/lock.py @@ -0,0 +1,38 @@ +_LOCK_KEY = "network-error-logging:lock" +_TIMEOUT = 5 # seconds + +def wait_for_lock(request): + t0 = time.time() + while time.time() - t0 < _TIMEOUT: + time.sleep(0.5) + value = request.server.stash.take(key=_LOCK_KEY) + if value is None: + return True + return False + +def lock(request, report_id): + with request.server.stash.lock: + # Loop until the lock is free + if not wait_for_lock(request): + return (503, [], "Cannot obtain lock") + request.server.stash.put(key=_LOCK_KEY, value=report_id) + return "Obtained lock for %s" % report_id + +def unlock(request, report_id): + with request.server.stash.lock: + lock_holder = request.server.stash.take(key=_LOCK_KEY) + if lock_holder != request_id: + # Return the lock holder to the stash + request.server.stash.put(key=_LOCK_KEY, value=lock_holder) + return (503, [], "Cannot release lock held by %s" % lock_holder) + return "Released lock for %s" % report_id + +def main(request, response): + op = request.GET.first("op") + report_id = request.GET.first("reportID") + if op == "lock": + return lock(request, report_id) + elif op == "unlock": + return unlock(request, report_id) + else: + return (400, [], "Invalid op") diff --git a/testing/web-platform/tests/network-error-logging/support/nel.sub.js b/testing/web-platform/tests/network-error-logging/support/nel.sub.js new file mode 100644 index 000000000000..c6b4783bd94c --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/nel.sub.js @@ -0,0 +1,169 @@ +const reportID = "{{$id:uuid()}}"; + +/* + * NEL tests have to run serially, since the user agent maintains a global cache + * of Reporting and NEL policies, and we don't want the policies for multiple + * tests to interfere with each other. These functions (along with a Python + * handler in lock.py) implement a simple spin lock. + */ + +function obtainNELLock() { + return fetch("/network-error-logging/support/lock.py?op=lock&reportID=" + reportID); +} + +function releaseNELLock() { + return fetch("/network-error-logging/support/lock.py?op=unlock&reportID=" + reportID); +} + +function nel_test(callback, name, properties) { + promise_test(async t => { + await obtainNELLock(); + await clearReportingAndNELConfigurations(); + await callback(t); + await releaseNELLock(); + }, name, properties); +} + +/* + * Helper functions for constructing domain names that contain NEL policies. + */ +function _monitoredDomain(subdomain) { + if (subdomain == "www") { + return "{{hosts[alt][www]}}" + } else if (subdomain == "www1") { + return "{{hosts[alt][www1]}}" + } else if (subdomain == "www2") { + return "{{hosts[alt][www2]}}" + } else if (subdomain == "nonexistent") { + return "{{hosts[alt][nonexistent]}}" + } else { + return "{{hosts[alt][]}}" + } +} + +function _getNELResourceURL(subdomain, suffix) { + return "https://" + _monitoredDomain(subdomain) + + ":{{ports[https][0]}}/network-error-logging/support/" + suffix; +} + +/* + * Fetches a resource whose headers define a basic NEL policy (i.e., with no + * include_subdomains flag). We ensure that we request the resource from a + * different origin than is used for the main test case HTML file or for report + * uploads. This minimizes the number of reports that are generated for this + * policy. + */ + +function getURLForResourceWithBasicPolicy(subdomain) { + return _getNELResourceURL(subdomain, "pass.png?id="+reportID); +} + +function fetchResourceWithBasicPolicy(subdomain) { + const url = getURLForResourceWithBasicPolicy(subdomain); + return fetch(url, {mode: "no-cors"}); +} + +/* + * Fetches a resource whose headers define an include_subdomains NEL policy. + */ + +function getURLForResourceWithIncludeSubdomainsPolicy(subdomain) { + return _getNELResourceURL(subdomain, "subdomains-pass.png?id="+reportID); +} + +function fetchResourceWithIncludeSubdomainsPolicy(subdomain) { + const url = getURLForResourceWithIncludeSubdomainsPolicy(subdomain); + return fetch(url, {mode: "no-cors"}); +} + +/* + * Fetches a resource whose headers do NOT define a NEL policy. This may or may + * not generate a NEL report, depending on whether you've already successfully + * requested a resource from the same origin that included a NEL policy. + */ + +function getURLForResourceWithNoPolicy(subdomain) { + return _getNELResourceURL(subdomain, "no-policy-pass.png"); +} + +function fetchResourceWithNoPolicy(subdomain) { + const url = getURLForResourceWithNoPolicy(subdomain); + return fetch(url, {mode: "no-cors"}); +} + +/* + * Fetches a resource that doesn't exist. This may or may not generate a NEL + * report, depending on whether you've already successfully requested a resource + * from the same origin that included a NEL policy. + */ + +function getURLForMissingResource(subdomain) { + return _getNELResourceURL(subdomain, "nonexistent.png"); +} + +function fetchMissingResource(subdomain) { + const url = getURLForMissingResource(subdomain); + return fetch(url, {mode: "no-cors"}); +} + +/* + * Fetches resources that clear out any existing Reporting or NEL configurations + * for all origins that any test case might use. + */ + +function getURLForClearingConfiguration(subdomain) { + return _getNELResourceURL(subdomain, "clear-pass.png?id="+reportID); +} + +async function clearReportingAndNELConfigurations(subdomain) { + await Promise.all([ + fetch(getURLForClearingConfiguration(""), {mode: "no-cors"}), + fetch(getURLForClearingConfiguration("www"), {mode: "no-cors"}), + fetch(getURLForClearingConfiguration("www1"), {mode: "no-cors"}), + fetch(getURLForClearingConfiguration("www2"), {mode: "no-cors"}), + ]); + return; +} + +/* + * Returns whether all of the fields in obj1 also exist in obj2 with the same + * values. (Put another way, returns whether obj1 and obj2 are equal, ignoring + * any extra fields in obj2.) + */ + +function _isSubsetOf(obj1, obj2) { + for (const prop in obj1) { + if (typeof obj1[prop] === 'object') { + if (typeof obj2[prop] !== 'object') { + return false; + } + if (!_isSubsetOf(obj1[prop], obj2[prop])) { + return false; + } + } else if (obj1[prop] != obj2[prop]) { + return false; + } + } + return true; +} + +/* + * Verifies that a report was uploaded that contains all of the fields in + * expected. + */ + +async function reportExists(expected) { + var timeout = + document.querySelector("meta[name=timeout][content=long]") ? 50 : 1; + var reportLocation = + "/network-error-logging/support/report.py?op=retrieve_report&timeout=" + + timeout + "&reportID=" + reportID; + const response = await fetch(reportLocation); + const json = await response.json(); + for (const report of json) { + if (_isSubsetOf(expected, report)) { + return true; + } + } + return false; +} diff --git a/testing/web-platform/tests/network-error-logging/support/no-policy-pass.png b/testing/web-platform/tests/network-error-logging/support/no-policy-pass.png new file mode 100644 index 000000000000..2fa1e0ac0663 Binary files /dev/null and b/testing/web-platform/tests/network-error-logging/support/no-policy-pass.png differ diff --git a/testing/web-platform/tests/network-error-logging/support/pass.png b/testing/web-platform/tests/network-error-logging/support/pass.png new file mode 100644 index 000000000000..2fa1e0ac0663 Binary files /dev/null and b/testing/web-platform/tests/network-error-logging/support/pass.png differ diff --git a/testing/web-platform/tests/network-error-logging/support/pass.png.sub.headers b/testing/web-platform/tests/network-error-logging/support/pass.png.sub.headers new file mode 100644 index 000000000000..70796e913ace --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/pass.png.sub.headers @@ -0,0 +1,6 @@ +Expires: Mon, 26 Jul 1997 05:00:00 GMT +Cache-Control: no-store, no-cache, must-revalidate +Cache-Control: post-check=0, pre-check=0, false +Pragma: no-cache +Report-To: { "group": "nel-group", "max_age": 10886400, "endpoints": [{ "url": "https://{{hosts[][www]}}:{{ports[https][0]}}/network-error-logging/support/report.py?op=put&reportID={{GET[id]}}" }] } +NEL: {"report_to": "nel-group", "max_age": 10886400, "success_fraction": 1.0} diff --git a/testing/web-platform/tests/network-error-logging/support/report.py b/testing/web-platform/tests/network-error-logging/support/report.py new file mode 100644 index 000000000000..7c05b51b9eb0 --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/report.py @@ -0,0 +1,52 @@ +import time +import json +import re + +def retrieve_from_stash(request, key, timeout, default_value): + t0 = time.time() + while time.time() - t0 < timeout: + time.sleep(0.5) + value = request.server.stash.take(key=key) + if value is not None: + return json.dumps(value) + + return default_value + +def main(request, response): + # Handle CORS preflight requests + if request.method == 'OPTIONS': + # Always reject preflights for one subdomain + if "www2" in request.headers["Origin"]: + return (400, [], "CORS preflight rejected for www2") + return [ + ("Content-Type", "text/plain"), + ("Access-Control-Allow-Origin", "*"), + ("Access-Control-Allow-Methods", "post"), + ("Access-Control-Allow-Headers", "Content-Type"), + ], "CORS allowed" + + op = request.GET.first("op"); + key = request.GET.first("reportID") + + if op == "retrieve_report": + try: + timeout = float(request.GET.first("timeout")) + except: + timeout = 0.5 + return [("Content-Type", "application/json")], retrieve_from_stash(request, key, timeout, '[]') + + # append new reports + new_reports = json.loads(request.body) + for report in new_reports: + report["metadata"] = { + "content_type": request.headers["Content-Type"], + } + with request.server.stash.lock: + reports = request.server.stash.take(key=key) + if reports is None: + reports = [] + reports.extend(new_reports) + request.server.stash.put(key=key, value=reports) + + # return acknowledgement report + return [("Content-Type", "text/plain")], "Recorded report" diff --git a/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png b/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png new file mode 100644 index 000000000000..2fa1e0ac0663 Binary files /dev/null and b/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png differ diff --git a/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png.sub.headers b/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png.sub.headers new file mode 100644 index 000000000000..50124b8cfcdf --- /dev/null +++ b/testing/web-platform/tests/network-error-logging/support/subdomains-pass.png.sub.headers @@ -0,0 +1,6 @@ +Expires: Mon, 26 Jul 1997 05:00:00 GMT +Cache-Control: no-store, no-cache, must-revalidate +Cache-Control: post-check=0, pre-check=0, false +Pragma: no-cache +Report-To: { "group": "nel-group", "max_age": 10886400, "include_subdomains": true, "endpoints": [{ "url": "https://{{hosts[][www]}}:{{ports[https][0]}}/network-error-logging/support/report.py?op=put&reportID={{GET[id]}}" }] } +NEL: {"report_to": "nel-group", "max_age": 10886400, "include_subdomains": true, "success_fraction": 1.0} diff --git a/testing/web-platform/tests/service-workers/cache-storage/resources/vary.py b/testing/web-platform/tests/service-workers/cache-storage/resources/vary.py new file mode 100644 index 000000000000..59e39bc2ae73 --- /dev/null +++ b/testing/web-platform/tests/service-workers/cache-storage/resources/vary.py @@ -0,0 +1,25 @@ +def main(request, response): + if "clear-vary-value-override-cookie" in request.GET: + response.unset_cookie("vary-value-override") + return "vary cookie cleared" + + set_cookie_vary = request.GET.first("set-vary-value-override-cookie", + default="") + if set_cookie_vary: + response.set_cookie("vary-value-override", set_cookie_vary) + return "vary cookie set" + + # If there is a vary-value-override cookie set, then use its value + # for the VARY header no matter what the query string is set to. This + # override is necessary to test the case when two URLs are identical + # (including query), but differ by VARY header. + cookie_vary = request.cookies.get("vary-value-override"); + if cookie_vary: + response.headers.set("vary", cookie_vary) + else: + # If there is no cookie, then use the query string value, if present. + query_vary = request.GET.first("vary", default="") + if query_vary: + response.headers.set("vary", query_vary) + + return "vary response" diff --git a/testing/web-platform/tests/service-workers/cache-storage/script-tests/cache-add.js b/testing/web-platform/tests/service-workers/cache-storage/script-tests/cache-add.js index c03faeb0e837..a482c42eaeb2 100644 --- a/testing/web-platform/tests/service-workers/cache-storage/script-tests/cache-add.js +++ b/testing/web-platform/tests/service-workers/cache-storage/script-tests/cache-add.js @@ -267,4 +267,84 @@ cache_test(function(cache, test) { 'twice.'); }, 'Cache.addAll called with the same Request object specified twice'); +cache_test(async function(cache, test) { + const url = '../resources/vary.py?vary=x-shape'; + let requests = [ + new Request(url, { headers: { 'x-shape': 'circle' }}), + new Request(url, { headers: { 'x-shape': 'square' }}), + ]; + let result = await cache.addAll(requests); + assert_equals(result, undefined, 'Cache.addAll() should succeed'); + }, 'Cache.addAll should succeed when entries differ by vary header'); + +cache_test(async function(cache, test) { + const url = '../resources/vary.py?vary=x-shape'; + let requests = [ + new Request(url, { headers: { 'x-shape': 'circle' }}), + new Request(url, { headers: { 'x-shape': 'circle' }}), + ]; + await promise_rejects( + test, + 'InvalidStateError', + cache.addAll(requests), + 'Cache.addAll() should reject when entries are duplicate by vary header'); + }, 'Cache.addAll should reject when entries are duplicate by vary header'); + +// VARY header matching is asymmetric. Determining if two entries are duplicate +// depends on which entry's response is used in the comparison. The target +// response's VARY header determines what request headers are examined. This +// test verifies that Cache.addAll() duplicate checking handles this asymmetric +// behavior correctly. +cache_test(async function(cache, test) { + const base_url = '../resources/vary.py'; + + // Define a request URL that sets a VARY header in the + // query string to be echoed back by the server. + const url = base_url + '?vary=x-size'; + + // Set a cookie to override the VARY header of the response + // when the request is made with credentials. This will + // take precedence over the query string vary param. This + // is a bit confusing, but it's necessary to construct a test + // where the URL is the same, but the VARY headers differ. + // + // Note, the test could also pass this information in additional + // request headers. If the cookie approach becomes too unwieldy + // this test could be rewritten to use that technique. + await fetch(base_url + '?set-vary-value-override-cookie=x-shape'); + test.add_cleanup(_ => fetch(base_url + '?clear-vary-value-override-cookie')); + + let requests = [ + // This request will result in a Response with a "Vary: x-shape" + // header. This *will not* result in a duplicate match with the + // other entry. + new Request(url, { headers: { 'x-shape': 'circle', + 'x-size': 'big' }, + credentials: 'same-origin' }), + + // This request will result in a Response with a "Vary: x-size" + // header. This *will* result in a duplicate match with the other + // entry. + new Request(url, { headers: { 'x-shape': 'square', + 'x-size': 'big' }, + credentials: 'omit' }), + ]; + await promise_rejects( + test, + 'InvalidStateError', + cache.addAll(requests), + 'Cache.addAll() should reject when one entry has a vary header ' + + 'matching an earlier entry.'); + + // Test the reverse order now. + await promise_rejects( + test, + 'InvalidStateError', + cache.addAll(requests.reverse()), + 'Cache.addAll() should reject when one entry has a vary header ' + + 'matching a later entry.'); + + }, 'Cache.addAll should reject when one entry has a vary header ' + + 'matching another entry'); + done(); diff --git a/testing/web-platform/tests/tools/manifest/manifest.py b/testing/web-platform/tests/tools/manifest/manifest.py index 02250e8300fb..42a8e1ceb236 100644 --- a/testing/web-platform/tests/tools/manifest/manifest.py +++ b/testing/web-platform/tests/tools/manifest/manifest.py @@ -9,7 +9,7 @@ from .log import get_logger from .utils import from_os_path, to_os_path -CURRENT_VERSION = 4 +CURRENT_VERSION = 5 class ManifestError(Exception): diff --git a/testing/web-platform/tests/tools/manifest/tests/test_manifest.py b/testing/web-platform/tests/tools/manifest/tests/test_manifest.py index e0a2c828a770..017513ae01ae 100644 --- a/testing/web-platform/tests/tools/manifest/tests/test_manifest.py +++ b/testing/web-platform/tests/tools/manifest/tests/test_manifest.py @@ -111,7 +111,7 @@ def test_manifest_to_json_forwardslash(): 'paths': { 'a/b': ('0000000000000000000000000000000000000000', 'testharness') }, - 'version': 4, + 'version': 5, 'url_base': '/', 'items': { 'reftest': {}, @@ -135,7 +135,7 @@ def test_manifest_to_json_backslash(): 'paths': { 'a/b': ('0000000000000000000000000000000000000000', 'testharness') }, - 'version': 4, + 'version': 5, 'url_base': '/', 'items': { 'reftest': {}, @@ -158,7 +158,7 @@ def test_manifest_from_json_backslash(): 'paths': { 'a\\b': ('0000000000000000000000000000000000000000', 'testharness') }, - 'version': 4, + 'version': 5, 'url_base': '/', 'items': { 'reftest': {}, diff --git a/testing/web-platform/tests/tools/serve/test_serve.py b/testing/web-platform/tests/tools/serve/test_serve.py index e939c3a0ccee..1c089b506738 100644 --- a/testing/web-platform/tests/tools/serve/test_serve.py +++ b/testing/web-platform/tests/tools/serve/test_serve.py @@ -35,7 +35,7 @@ def test_make_hosts_file_windows(): browser_host="foo.bar", alternate_hosts={"alt": "foo2.bar"}, subdomains={"a", "b"}, - not_subdomains={"x, y"}) as c: + not_subdomains={"x", "y"}) as c: hosts = serve.make_hosts_file(c, "192.168.42.42") lines = hosts.split("\n") assert set(lines) == {"", diff --git a/testing/web-platform/tests/tools/webdriver/webdriver/error.py b/testing/web-platform/tests/tools/webdriver/webdriver/error.py index b2337ff3b38f..e148e8fe8007 100644 --- a/testing/web-platform/tests/tools/webdriver/webdriver/error.py +++ b/testing/web-platform/tests/tools/webdriver/webdriver/error.py @@ -113,7 +113,7 @@ class NoSuchWindowException(WebDriverException): class ScriptTimeoutException(WebDriverException): - http_status = 408 + http_status = 500 status_code = "script timeout" @@ -128,7 +128,7 @@ class StaleElementReferenceException(WebDriverException): class TimeoutException(WebDriverException): - http_status = 408 + http_status = 500 status_code = "timeout" diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py index 018dc103983d..2313a80c745b 100644 --- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py @@ -439,7 +439,10 @@ class ManifestLoader(object): if (not os.path.exists(manifest_path) or self.force_manifest_update): self.update_manifest(manifest_path, tests_path, url_base, download=self.manifest_download) - manifest_file = manifest.load(tests_path, manifest_path, types=self.types, meta_filters=self.meta_filters) + try: + manifest_file = manifest.load(tests_path, manifest_path, types=self.types, meta_filters=self.meta_filters) + except manifest.ManifestVersionMismatch: + manifest_file = manifest.Manifest(url_base) if manifest_file.url_base != url_base: self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base, url_base)) diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py index 827244cda3c5..5280e46a6b24 100644 --- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py @@ -25,6 +25,14 @@ test_0 = """\ tags: [a, @Reset] """ +test_1 = """\ +[1.html] + prefs: + if os == 'win': [a:b, c:d] + expected: + if os == 'win': FAIL +""" + def test_metadata_inherit(): tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10), @@ -53,11 +61,14 @@ def test_metadata_inherit(): def test_conditional(): tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10), ("test", "c", 10)) - test_metadata = manifestexpected.static.compile(BytesIO(test_0), - {}, + + test_metadata = manifestexpected.static.compile(BytesIO(test_1), + {"os": "win"}, data_cls_getter=manifestexpected.data_cls_getter, test_path="a", url_base="") + + test = tests[1][2].pop() test_obj = wpttest.from_manifest(test, [], test_metadata.get_test(test.id)) assert test_obj.prefs == {"a": "b", "c": "d"} assert test_obj.expected() == "FAIL" diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py index 87ddfeed8917..98b54dc11ea8 100644 --- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py @@ -76,10 +76,9 @@ key: ["VariableNode", "x", []], ["NumberNode", "1", []] ]], - ["ListNode", None, - [["ValueNode", "value1", []], - ["ValueNode", "value2", []], - ]], + ["ListNode", None, + [["ValueNode", "value1", []], + ["ValueNode", "value2", []]]], ]]]]]] ) diff --git a/testing/web-platform/tests/trusted-types/DOMParser-parseFromString.tentative.html b/testing/web-platform/tests/trusted-types/DOMParser-parseFromString.tentative.html new file mode 100644 index 000000000000..2fe9b31b787e --- /dev/null +++ b/testing/web-platform/tests/trusted-types/DOMParser-parseFromString.tentative.html @@ -0,0 +1,22 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/DOMParser-requiresTrustedTypes.tentative.html b/testing/web-platform/tests/trusted-types/DOMParser-requiresTrustedTypes.tentative.html deleted file mode 100644 index 7e21feabd24c..000000000000 --- a/testing/web-platform/tests/trusted-types/DOMParser-requiresTrustedTypes.tentative.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/DOMParser.tentative.html b/testing/web-platform/tests/trusted-types/DOMParser.tentative.html deleted file mode 100644 index 53d2b44febd6..000000000000 --- a/testing/web-platform/tests/trusted-types/DOMParser.tentative.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/Document-write.tentative.html b/testing/web-platform/tests/trusted-types/Document-write.tentative.html new file mode 100644 index 000000000000..3a63e923543b --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Document-write.tentative.html @@ -0,0 +1,15 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/Element-insertAdjacentHTML.tentative.html b/testing/web-platform/tests/trusted-types/Element-insertAdjacentHTML.tentative.html new file mode 100644 index 000000000000..599ade44ec11 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Element-insertAdjacentHTML.tentative.html @@ -0,0 +1,38 @@ + + + + + +
+ diff --git a/testing/web-platform/tests/trusted-types/Element-outerHTML.tentative.html b/testing/web-platform/tests/trusted-types/Element-outerHTML.tentative.html new file mode 100644 index 000000000000..a0bb6c1a5e3f --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Element-outerHTML.tentative.html @@ -0,0 +1,24 @@ + + + + + +
+ diff --git a/testing/web-platform/tests/trusted-types/HTMLElement-generic.tentative.html b/testing/web-platform/tests/trusted-types/HTMLElement-generic.tentative.html index 486b008986bc..cea32a5a2df1 100644 --- a/testing/web-platform/tests/trusted-types/HTMLElement-generic.tentative.html +++ b/testing/web-platform/tests/trusted-types/HTMLElement-generic.tentative.html @@ -6,7 +6,7 @@ diff --git a/testing/web-platform/tests/trusted-types/Location-assign.tentative.html b/testing/web-platform/tests/trusted-types/Location-assign.tentative.html new file mode 100644 index 000000000000..13cca5679488 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Location-assign.tentative.html @@ -0,0 +1,15 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/Location-href.tentative.html b/testing/web-platform/tests/trusted-types/Location-href.tentative.html new file mode 100644 index 000000000000..d759d28593e6 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Location-href.tentative.html @@ -0,0 +1,15 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/Location-replace.tentative.html b/testing/web-platform/tests/trusted-types/Location-replace.tentative.html new file mode 100644 index 000000000000..7d84905d1987 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Location-replace.tentative.html @@ -0,0 +1,15 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/Range-createContextualFragment.tentative.html b/testing/web-platform/tests/trusted-types/Range-createContextualFragment.tentative.html new file mode 100644 index 000000000000..3d45b33486d3 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Range-createContextualFragment.tentative.html @@ -0,0 +1,17 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/TrustedHTML.tentative.html b/testing/web-platform/tests/trusted-types/TrustedHTML.tentative.html deleted file mode 100644 index a8d4e78b06d1..000000000000 --- a/testing/web-platform/tests/trusted-types/TrustedHTML.tentative.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - diff --git a/testing/web-platform/tests/trusted-types/TrustedScriptURL.tentative.html b/testing/web-platform/tests/trusted-types/TrustedScriptURL.tentative.html deleted file mode 100644 index 92bc87f1e1da..000000000000 --- a/testing/web-platform/tests/trusted-types/TrustedScriptURL.tentative.html +++ /dev/null @@ -1,23 +0,0 @@ - - - - - diff --git a/testing/web-platform/tests/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html b/testing/web-platform/tests/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html index a37b5a7197f2..76e6d130b05d 100644 --- a/testing/web-platform/tests/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html +++ b/testing/web-platform/tests/trusted-types/TrustedTypePolicyFactory-createPolicy.tentative.html @@ -6,7 +6,7 @@ diff --git a/testing/web-platform/tests/trusted-types/TrustedURL.tentative.html b/testing/web-platform/tests/trusted-types/TrustedURL.tentative.html deleted file mode 100644 index 5048326fad81..000000000000 --- a/testing/web-platform/tests/trusted-types/TrustedURL.tentative.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - diff --git a/testing/web-platform/tests/trusted-types/Window-open.tentative.html b/testing/web-platform/tests/trusted-types/Window-open.tentative.html new file mode 100644 index 000000000000..c005fbba143f --- /dev/null +++ b/testing/web-platform/tests/trusted-types/Window-open.tentative.html @@ -0,0 +1,27 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html new file mode 100644 index 000000000000..cc575dc0085b --- /dev/null +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-DOMParser-parseFromString.tentative.html @@ -0,0 +1,35 @@ + + + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-Document-write.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Document-write.tentative.html new file mode 100644 index 000000000000..28813d72e0e1 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Document-write.tentative.html @@ -0,0 +1,35 @@ + + + + + + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html new file mode 100644 index 000000000000..ad94b44e8fb7 --- /dev/null +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-insertAdjacentHTML.tentative.html @@ -0,0 +1,97 @@ + + + + + + + + + + +
+ + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-outerHTML.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html similarity index 55% rename from testing/web-platform/tests/trusted-types/block-string-assignment-to-outerHTML.tentative.html rename to testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html index 8cf6c4b065a8..47f1165b1a69 100644 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-outerHTML.tentative.html +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Element-outerHTML.tentative.html @@ -12,6 +12,22 @@ diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html index 79bbb24f541b..eae526261907 100644 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-HTMLElement-generic.tentative.html @@ -9,7 +9,7 @@ diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-assign.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-assign.tentative.html similarity index 55% rename from testing/web-platform/tests/trusted-types/block-string-assignment-to-location-assign.tentative.html rename to testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-assign.tentative.html index 76725da7c72e..8079335bc586 100644 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-assign.tentative.html +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-assign.tentative.html @@ -9,18 +9,15 @@ + + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-replace.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-replace.tentative.html similarity index 55% rename from testing/web-platform/tests/trusted-types/block-string-assignment-to-location-replace.tentative.html rename to testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-replace.tentative.html index 9736a84b3ada..872f14e14483 100644 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-replace.tentative.html +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Location-replace.tentative.html @@ -9,18 +9,15 @@ + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-Window-open.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Window-open.tentative.html new file mode 100644 index 000000000000..f5712295d30d --- /dev/null +++ b/testing/web-platform/tests/trusted-types/block-string-assignment-to-Window-open.tentative.html @@ -0,0 +1,63 @@ + + + + + + + + + + + + + diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-createContextualFragment.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-createContextualFragment.tentative.html deleted file mode 100644 index 1d67a51ff6ab..000000000000 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-createContextualFragment.tentative.html +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-innerHTML.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-innerHTML.tentative.html deleted file mode 100644 index 67faf6ea7d30..000000000000 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-innerHTML.tentative.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html deleted file mode 100644 index 70bb803442f1..000000000000 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-insertAdjacentHTML.tentative.html +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - - - - -
- - - diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-href.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-href.tentative.html deleted file mode 100644 index 07cc4d5fe29f..000000000000 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-location-href.tentative.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/block-string-assignment-to-window-open.tentative.html b/testing/web-platform/tests/trusted-types/block-string-assignment-to-window-open.tentative.html deleted file mode 100644 index 2c3a8ce05668..000000000000 --- a/testing/web-platform/tests/trusted-types/block-string-assignment-to-window-open.tentative.html +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/createContextualFragment.tentative.html b/testing/web-platform/tests/trusted-types/createContextualFragment.tentative.html deleted file mode 100644 index 5e50acc80650..000000000000 --- a/testing/web-platform/tests/trusted-types/createContextualFragment.tentative.html +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/document-write.tentative.html b/testing/web-platform/tests/trusted-types/document-write.tentative.html deleted file mode 100644 index 12794199722f..000000000000 --- a/testing/web-platform/tests/trusted-types/document-write.tentative.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/innerHTML.tentative.html b/testing/web-platform/tests/trusted-types/innerHTML.tentative.html deleted file mode 100644 index f9f32d42a766..000000000000 --- a/testing/web-platform/tests/trusted-types/innerHTML.tentative.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/insertAdjacentHTML.tentative.html b/testing/web-platform/tests/trusted-types/insertAdjacentHTML.tentative.html deleted file mode 100644 index a95dd6c0bacc..000000000000 --- a/testing/web-platform/tests/trusted-types/insertAdjacentHTML.tentative.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - - -
- diff --git a/testing/web-platform/tests/trusted-types/location-assign.tentative.html b/testing/web-platform/tests/trusted-types/location-assign.tentative.html deleted file mode 100644 index 07cb4a801966..000000000000 --- a/testing/web-platform/tests/trusted-types/location-assign.tentative.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/location-href.tentative.html b/testing/web-platform/tests/trusted-types/location-href.tentative.html deleted file mode 100644 index 2527fbf4847b..000000000000 --- a/testing/web-platform/tests/trusted-types/location-href.tentative.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/location-replace.tentative.html b/testing/web-platform/tests/trusted-types/location-replace.tentative.html deleted file mode 100644 index 097c24d593aa..000000000000 --- a/testing/web-platform/tests/trusted-types/location-replace.tentative.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/outerHTML.tentative.html b/testing/web-platform/tests/trusted-types/outerHTML.tentative.html deleted file mode 100644 index 1deb46bf5e3d..000000000000 --- a/testing/web-platform/tests/trusted-types/outerHTML.tentative.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - -
- diff --git a/testing/web-platform/tests/trusted-types/srcDoc-requiresTrustedTypes.tentative.html b/testing/web-platform/tests/trusted-types/srcDoc-requiresTrustedTypes.tentative.html deleted file mode 100644 index b957488bee42..000000000000 --- a/testing/web-platform/tests/trusted-types/srcDoc-requiresTrustedTypes.tentative.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - diff --git a/testing/web-platform/tests/trusted-types/srcDoc.tentative.html b/testing/web-platform/tests/trusted-types/srcDoc.tentative.html deleted file mode 100644 index b23703e22329..000000000000 --- a/testing/web-platform/tests/trusted-types/srcDoc.tentative.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/trusted-types/support/helper.sub.js b/testing/web-platform/tests/trusted-types/support/helper.sub.js index 91112d8f2390..b5435917bec6 100644 --- a/testing/web-platform/tests/trusted-types/support/helper.sub.js +++ b/testing/web-platform/tests/trusted-types/support/helper.sub.js @@ -1,70 +1,88 @@ -var STRINGS = { - unescapedHTML: "This has ', \", >, <, & & characters!", - escapedHTML: "<html>This has ', ", >, <, &amp; & characters!</html>", - unescapedText: "This has ', \", >, <, & & characters!", +var INPUTS = { + HTML: "Hi, I want to be transformed!", + SCRIPT: "Hi, I want to be transformed!", + SCRIPTURL: "http://this.is.a.scripturl.test/", + URL: "http://hello.i.am.an.url/" }; -var URLS = { - safe: "http://{{host}}:{{ports[http][0]}}/", - javascript: "javascript:'scripted'", - external: "custom-handler:whatever", - sanitized: "about:invalid" +var RESULTS = { + HTML: "Quack, I want to be a duck!", + SCRIPT: "Meow, I want to be a cat!", + SCRIPTURL: "http://this.is.a.successful.test/", + URL: "http://hooray.i.am.successfully.transformed/" }; -function createFrameAndWrite(html) { - return new Promise((resolve, reject) => { - var i = document.createElement('iframe'); - i.onload = e => { - i.contentDocument.open(); - try { - i.contentDocument.write(html); - } catch (e) { - i.remove(); - reject(e); - } - i.contentDocument.close(); - resolve(i); - }; - document.body.appendChild(i); - }); +function createHTMLJS(html) { + return html.replace("Hi", "Quack") + .replace("transformed", "a duck"); } -function createFrameAndHref(href) { - return new Promise((resolve, reject) => { - var i = document.createElement('iframe'); - i.onload = _ => { - i.onload = null; - try { - i.onload = _ => resolve(i); - i.contentWindow.location.href = href; - } catch (ex) { - i.remove(); - reject(ex); - } - }; - document.body.appendChild(i); - }); +function createScriptJS(script) { + return script.replace("Hi", "Meow") + .replace("transformed", "a cat"); } -let trustedHTML = TrustedHTML.escape(STRINGS.unescapedHTML); -function assert_accepts_trusted_html(tag, attribute) { +function createScriptURLJS(scripturl) { + return scripturl.replace("scripturl", "successful"); +} + +function createURLJS(url) { + return url.replace("hello", "hooray") + .replace("an.url", "successfully.transformed"); +} + +function createHTML_policy(win) { + return win.trustedTypes.createPolicy('SomeName', { createHTML: createHTMLJS }); +} + +function createScript_policy(win) { + return win.trustedTypes.createPolicy('SomeName', { createScript: createScriptJS }); +} + +function createScriptURL_policy(win) { + return win.trustedTypes.createPolicy('SomeName', { createScriptURL: createScriptURLJS }); +} + +function createURL_policy(win) { + return win.trustedTypes.createPolicy('SomeName', { createURL: createURLJS }); +} + +function assert_element_accepts_trusted_html(win, t, tag, attribute, expected) { + createHTML_policy(win) + .then(t.step_func_done(p => { + let html = p.createHTML(INPUTS.HTML); + assert_element_accepts_trusted_type(tag, attribute, html, expected); + })); +} + +function assert_element_accepts_trusted_script(win, t, tag, attribute, expected) { + createScript_policy(win) + .then(t.step_func_done(p => { + let script = p.createScript(INPUTS.SCRIPT); + assert_element_accepts_trusted_type(tag, attribute, script, expected); + })); +} + +function assert_element_accepts_trusted_script_url(win, t, tag, attribute, expected) { + createScriptURL_policy(win) + .then(t.step_func_done(p => { + let scripturl = p.createScriptURL(INPUTS.SCRIPTURL); + assert_element_accepts_trusted_type(tag, attribute, scripturl, expected); + })); +} + +function assert_element_accepts_trusted_url(win, t, tag, attribute, expected) { + createURL_policy(win) + .then(t.step_func_done(p => { + let url = p.createURL(INPUTS.URL); + assert_element_accepts_trusted_type(tag, attribute, url, expected); + })); +} + +function assert_element_accepts_trusted_type(tag, attribute, value, expected) { let elem = document.createElement(tag); - elem[attribute] = trustedHTML; - assert_equals(elem[attribute] + "", STRINGS.unescapedHTML); -} - -let trustedURL = TrustedURL.create(URLS.safe); -function assert_accepts_trusted_url(tag, attribute) { - let elem = document.createElement(tag); - elem[attribute] = trustedURL; - assert_equals(elem[attribute] + "", URLS.safe); -} - -let trustedScriptURL = TrustedScriptURL.unsafelyCreate(URLS.safe); -function assert_accepts_trusted_script_url(tag, attribute) { - let elem = document.createElement(tag); - elem[attribute] = trustedScriptURL; - assert_equals(elem[attribute] + "", URLS.safe); + elem[attribute] = value; + assert_equals(elem[attribute] + "", expected); } function assert_throws_no_trusted_type(tag, attribute, value) { diff --git a/testing/web-platform/tests/trusted-types/window-open.tentative.html b/testing/web-platform/tests/trusted-types/window-open.tentative.html deleted file mode 100644 index 66ffbd78399c..000000000000 --- a/testing/web-platform/tests/trusted-types/window-open.tentative.html +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - diff --git a/testing/web-platform/tests/wake-lock/wakelock-api.https.html b/testing/web-platform/tests/wake-lock/wakelock-api.https.html index 7ec4fc2827a7..45a906dfa1f2 100644 --- a/testing/web-platform/tests/wake-lock/wakelock-api.https.html +++ b/testing/web-platform/tests/wake-lock/wakelock-api.https.html @@ -12,7 +12,7 @@ promise_test(async t => { const wakeLock = await navigator.getWakeLock("screen"); const request = wakeLock.createRequest(); assert_true(wakeLock instanceof WakeLock, "wakeLock is a WakeLock"); - assert_true(wakeLock.type instanceof WakeLockType, "wakeLock.type is a WakeLockType"); + assert_equals(typeof wakeLock.type, "string", "the type of wakeLock.type is string"); assert_equals(typeof wakeLock.active, "boolean", "the type of wakeLock.active is boolean"); assert_true(request instanceof WakeLockRequest, "request is a WakeLockRequest"); }, "Test that the Wake Lock API is correct"); diff --git a/testing/web-platform/tests/wake-lock/wakelock-type.https.html b/testing/web-platform/tests/wake-lock/wakelock-type.https.html index 6f6413d1a916..2f9e1242614d 100644 --- a/testing/web-platform/tests/wake-lock/wakelock-type.https.html +++ b/testing/web-platform/tests/wake-lock/wakelock-type.https.html @@ -18,11 +18,11 @@ promise_test(async t => { }, "Test that wakeLock.type is 'system' when system wake lock is invoked"); promise_test(t => { - return promise_rejects(t, new DOMException("", "NotSupportedError"), navigator.getWakeLock()); -}, "'NotSupportedError' is thrown when set an empty wake lock type"); + return promise_rejects(t, new TypeError(), navigator.getWakeLock()); +}, "'TypeError' is thrown when set an empty wake lock type"); promise_test(t => { - return promise_rejects(t, new DOMException("", "NotSupportedError"), navigator.getWakeLock("unsupported")); -}, "'NotSupportedError' is thrown when set an unsupported wake lock type"); + return promise_rejects(t, new TypeError(), navigator.getWakeLock("unsupported")); +}, "'TypeError' is thrown when set an unsupported wake lock type"); diff --git a/testing/web-platform/tests/webdriver/tests/support/asserts.py b/testing/web-platform/tests/webdriver/tests/support/asserts.py index 44c76a96b099..2d305a0f3bec 100644 --- a/testing/web-platform/tests/webdriver/tests/support/asserts.py +++ b/testing/web-platform/tests/webdriver/tests/support/asserts.py @@ -20,10 +20,10 @@ errors = { "no such element": 404, "no such frame": 404, "no such window": 404, - "script timeout": 408, + "script timeout": 500, "session not created": 500, "stale element reference": 404, - "timeout": 408, + "timeout": 500, "unable to set cookie": 500, "unable to capture screen": 500, "unexpected alert open": 500, diff --git a/testing/web-platform/tests/xhr/META.yml b/testing/web-platform/tests/xhr/META.yml index 7ad6ad95c36c..4b2ecf39bf3a 100644 --- a/testing/web-platform/tests/xhr/META.yml +++ b/testing/web-platform/tests/xhr/META.yml @@ -1,6 +1,5 @@ spec: https://xhr.spec.whatwg.org/ suggested_reviewers: - - emilio - caitp - Manishearth - jungkees diff --git a/testing/webdriver/Cargo.toml b/testing/webdriver/Cargo.toml index 1865130cd694..922aee68d5da 100644 --- a/testing/webdriver/Cargo.toml +++ b/testing/webdriver/Cargo.toml @@ -10,15 +10,20 @@ readme = "README.md" license = "MPL-2.0" [dependencies] -cookie = { version = "0.10", default-features = false } base64 = "0.6" -hyper = "0.10" -lazy_static = "1.0" +cookie = { version = "0.11", default-features = false } +futures = "0.1" +http = "0.1" +hyper = "0.12" log = "0.4" regex = "1.0" serde = "1.0" serde_json = "1.0" serde_derive = "1.0" time = "0.1" +tokio = "0.1" unicode-segmentation = "1.2" url = "1" + +[dev-dependencies] +lazy_static = "1.0" diff --git a/testing/webdriver/src/error.rs b/testing/webdriver/src/error.rs index 28fe61ec6fda..4b949269e418 100644 --- a/testing/webdriver/src/error.rs +++ b/testing/webdriver/src/error.rs @@ -1,5 +1,5 @@ use base64::DecodeError; -use hyper::status::StatusCode; +use hyper::StatusCode; use serde::ser::{Serialize, Serializer}; use serde_json; use std::borrow::Cow; @@ -188,37 +188,36 @@ impl ErrorStatus { /// Returns the correct HTTP status code associated with the error type. pub fn http_status(&self) -> StatusCode { use self::ErrorStatus::*; - use self::StatusCode::*; match *self { - ElementClickIntercepted => BadRequest, - ElementNotInteractable => BadRequest, - ElementNotSelectable => BadRequest, - InsecureCertificate => BadRequest, - InvalidArgument => BadRequest, - InvalidCookieDomain => BadRequest, - InvalidCoordinates => BadRequest, - InvalidElementState => BadRequest, - InvalidSelector => BadRequest, - InvalidSessionId => NotFound, - JavascriptError => InternalServerError, - MoveTargetOutOfBounds => InternalServerError, - NoSuchAlert => NotFound, - NoSuchCookie => NotFound, - NoSuchElement => NotFound, - NoSuchFrame => NotFound, - NoSuchWindow => NotFound, - ScriptTimeout => RequestTimeout, - SessionNotCreated => InternalServerError, - StaleElementReference => NotFound, - Timeout => RequestTimeout, - UnableToCaptureScreen => BadRequest, - UnableToSetCookie => InternalServerError, - UnexpectedAlertOpen => InternalServerError, - UnknownCommand => NotFound, - UnknownError => InternalServerError, - UnknownMethod => MethodNotAllowed, - UnknownPath => NotFound, - UnsupportedOperation => InternalServerError, + ElementClickIntercepted => StatusCode::BAD_REQUEST, + ElementNotInteractable => StatusCode::BAD_REQUEST, + ElementNotSelectable => StatusCode::BAD_REQUEST, + InsecureCertificate => StatusCode::BAD_REQUEST, + InvalidArgument => StatusCode::BAD_REQUEST, + InvalidCookieDomain => StatusCode::BAD_REQUEST, + InvalidCoordinates => StatusCode::BAD_REQUEST, + InvalidElementState => StatusCode::BAD_REQUEST, + InvalidSelector => StatusCode::BAD_REQUEST, + InvalidSessionId => StatusCode::NOT_FOUND, + JavascriptError => StatusCode::INTERNAL_SERVER_ERROR, + MoveTargetOutOfBounds => StatusCode::INTERNAL_SERVER_ERROR, + NoSuchAlert => StatusCode::NOT_FOUND, + NoSuchCookie => StatusCode::NOT_FOUND, + NoSuchElement => StatusCode::NOT_FOUND, + NoSuchFrame => StatusCode::NOT_FOUND, + NoSuchWindow => StatusCode::NOT_FOUND, + ScriptTimeout => StatusCode::INTERNAL_SERVER_ERROR, + SessionNotCreated => StatusCode::INTERNAL_SERVER_ERROR, + StaleElementReference => StatusCode::NOT_FOUND, + Timeout => StatusCode::INTERNAL_SERVER_ERROR, + UnableToCaptureScreen => StatusCode::BAD_REQUEST, + UnableToSetCookie => StatusCode::INTERNAL_SERVER_ERROR, + UnexpectedAlertOpen => StatusCode::INTERNAL_SERVER_ERROR, + UnknownCommand => StatusCode::NOT_FOUND, + UnknownError => StatusCode::INTERNAL_SERVER_ERROR, + UnknownMethod => StatusCode::METHOD_NOT_ALLOWED, + UnknownPath => StatusCode::NOT_FOUND, + UnsupportedOperation => StatusCode::INTERNAL_SERVER_ERROR, } } } diff --git a/testing/webdriver/src/httpapi.rs b/testing/webdriver/src/httpapi.rs index b305e500d690..eceaaaa0b3a8 100644 --- a/testing/webdriver/src/httpapi.rs +++ b/testing/webdriver/src/httpapi.rs @@ -1,7 +1,6 @@ use regex::{Captures, Regex}; -use hyper::method::Method; -use hyper::method::Method::{Delete, Get, Post}; +use hyper::Method; use serde_json::Value; use command::{VoidWebDriverExtensionCommand, WebDriverCommand, WebDriverExtensionCommand, @@ -10,214 +9,214 @@ use error::{ErrorStatus, WebDriverError, WebDriverResult}; fn standard_routes() -> Vec<(Method, &'static str, Route)> { return vec![ - (Post, "/session", Route::NewSession), - (Delete, "/session/{sessionId}", Route::DeleteSession), - (Post, "/session/{sessionId}/url", Route::Get), - (Get, "/session/{sessionId}/url", Route::GetCurrentUrl), - (Post, "/session/{sessionId}/back", Route::GoBack), - (Post, "/session/{sessionId}/forward", Route::GoForward), - (Post, "/session/{sessionId}/refresh", Route::Refresh), - (Get, "/session/{sessionId}/title", Route::GetTitle), - (Get, "/session/{sessionId}/source", Route::GetPageSource), - (Get, "/session/{sessionId}/window", Route::GetWindowHandle), + (Method::POST, "/session", Route::NewSession), + (Method::DELETE, "/session/{sessionId}", Route::DeleteSession), + (Method::POST, "/session/{sessionId}/url", Route::Get), + (Method::GET, "/session/{sessionId}/url", Route::GetCurrentUrl), + (Method::POST, "/session/{sessionId}/back", Route::GoBack), + (Method::POST, "/session/{sessionId}/forward", Route::GoForward), + (Method::POST, "/session/{sessionId}/refresh", Route::Refresh), + (Method::GET, "/session/{sessionId}/title", Route::GetTitle), + (Method::GET, "/session/{sessionId}/source", Route::GetPageSource), + (Method::GET, "/session/{sessionId}/window", Route::GetWindowHandle), ( - Get, + Method::GET, "/session/{sessionId}/window/handles", Route::GetWindowHandles, ), - (Delete, "/session/{sessionId}/window", Route::CloseWindow), + (Method::DELETE, "/session/{sessionId}/window", Route::CloseWindow), ( - Get, + Method::GET, "/session/{sessionId}/window/size", Route::GetWindowSize, ), ( - Post, + Method::POST, "/session/{sessionId}/window/size", Route::SetWindowSize, ), ( - Get, + Method::GET, "/session/{sessionId}/window/position", Route::GetWindowPosition, ), ( - Post, + Method::POST, "/session/{sessionId}/window/position", Route::SetWindowPosition, ), ( - Get, + Method::GET, "/session/{sessionId}/window/rect", Route::GetWindowRect, ), ( - Post, + Method::POST, "/session/{sessionId}/window/rect", Route::SetWindowRect, ), ( - Post, + Method::POST, "/session/{sessionId}/window/minimize", Route::MinimizeWindow, ), ( - Post, + Method::POST, "/session/{sessionId}/window/maximize", Route::MaximizeWindow, ), ( - Post, + Method::POST, "/session/{sessionId}/window/fullscreen", Route::FullscreenWindow, ), - (Post, "/session/{sessionId}/window", Route::SwitchToWindow), - (Post, "/session/{sessionId}/frame", Route::SwitchToFrame), + (Method::POST, "/session/{sessionId}/window", Route::SwitchToWindow), + (Method::POST, "/session/{sessionId}/frame", Route::SwitchToFrame), ( - Post, + Method::POST, "/session/{sessionId}/frame/parent", Route::SwitchToParentFrame, ), - (Post, "/session/{sessionId}/element", Route::FindElement), - (Post, "/session/{sessionId}/elements", Route::FindElements), + (Method::POST, "/session/{sessionId}/element", Route::FindElement), + (Method::POST, "/session/{sessionId}/elements", Route::FindElements), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/element", Route::FindElementElement, ), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/elements", Route::FindElementElements, ), ( - Get, + Method::GET, "/session/{sessionId}/element/active", Route::GetActiveElement, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/displayed", Route::IsDisplayed, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/selected", Route::IsSelected, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/attribute/{name}", Route::GetElementAttribute, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/property/{name}", Route::GetElementProperty, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/css/{propertyName}", Route::GetCSSValue, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/text", Route::GetElementText, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/name", Route::GetElementTagName, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/rect", Route::GetElementRect, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/enabled", Route::IsEnabled, ), ( - Post, + Method::POST, "/session/{sessionId}/execute/sync", Route::ExecuteScript, ), ( - Post, + Method::POST, "/session/{sessionId}/execute/async", Route::ExecuteAsyncScript, ), - (Get, "/session/{sessionId}/cookie", Route::GetCookies), + (Method::GET, "/session/{sessionId}/cookie", Route::GetCookies), ( - Get, + Method::GET, "/session/{sessionId}/cookie/{name}", Route::GetNamedCookie, ), - (Post, "/session/{sessionId}/cookie", Route::AddCookie), - (Delete, "/session/{sessionId}/cookie", Route::DeleteCookies), + (Method::POST, "/session/{sessionId}/cookie", Route::AddCookie), + (Method::DELETE, "/session/{sessionId}/cookie", Route::DeleteCookies), ( - Delete, + Method::DELETE, "/session/{sessionId}/cookie/{name}", Route::DeleteCookie, ), - (Get, "/session/{sessionId}/timeouts", Route::GetTimeouts), - (Post, "/session/{sessionId}/timeouts", Route::SetTimeouts), + (Method::GET, "/session/{sessionId}/timeouts", Route::GetTimeouts), + (Method::POST, "/session/{sessionId}/timeouts", Route::SetTimeouts), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/click", Route::ElementClick, ), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/tap", Route::ElementTap, ), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/clear", Route::ElementClear, ), ( - Post, + Method::POST, "/session/{sessionId}/element/{elementId}/value", Route::ElementSendKeys, ), ( - Post, + Method::POST, "/session/{sessionId}/alert/dismiss", Route::DismissAlert, ), ( - Post, + Method::POST, "/session/{sessionId}/alert/accept", Route::AcceptAlert, ), - (Get, "/session/{sessionId}/alert/text", Route::GetAlertText), + (Method::GET, "/session/{sessionId}/alert/text", Route::GetAlertText), ( - Post, + Method::POST, "/session/{sessionId}/alert/text", Route::SendAlertText, ), ( - Get, + Method::GET, "/session/{sessionId}/screenshot", Route::TakeScreenshot, ), ( - Get, + Method::GET, "/session/{sessionId}/element/{elementId}/screenshot", Route::TakeElementScreenshot, ), - (Post, "/session/{sessionId}/actions", Route::PerformActions), + (Method::POST, "/session/{sessionId}/actions", Route::PerformActions), ( - Delete, + Method::DELETE, "/session/{sessionId}/actions", Route::ReleaseActions, ), - (Get, "/status", Route::Status), + (Method::GET, "/status", Route::Status), ]; } @@ -394,7 +393,7 @@ impl WebDriverHttpApi { matcher.match_type.clone(), &captures.unwrap(), body, - method == Post, + method == Method::POST, ); } else { error = ErrorStatus::UnknownMethod; diff --git a/testing/webdriver/src/lib.rs b/testing/webdriver/src/lib.rs index 1f763292ca26..231c9e69b0f0 100644 --- a/testing/webdriver/src/lib.rs +++ b/testing/webdriver/src/lib.rs @@ -3,9 +3,9 @@ extern crate base64; extern crate cookie; #[macro_use] -extern crate lazy_static; -#[macro_use] extern crate log; +extern crate futures; +extern crate http; extern crate hyper; extern crate regex; extern crate serde; @@ -13,6 +13,7 @@ extern crate serde; extern crate serde_derive; extern crate serde_json; extern crate time; +extern crate tokio; extern crate unicode_segmentation; extern crate url; @@ -27,5 +28,8 @@ pub mod httpapi; pub mod response; pub mod server; +#[cfg(test)] +#[macro_use] +extern crate lazy_static; #[cfg(test)] pub mod test; diff --git a/testing/webdriver/src/server.rs b/testing/webdriver/src/server.rs index 67618d188a18..76e6ffb173b6 100644 --- a/testing/webdriver/src/server.rs +++ b/testing/webdriver/src/server.rs @@ -1,19 +1,18 @@ use serde_json; -use std::io::Read; use std::marker::PhantomData; -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpListener as StdTcpListener}; use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::thread; -use std::time::Duration; -use hyper::header::{CacheControl, CacheDirective, ContentType}; -use hyper::method::Method; -use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value}; -use hyper::server::{Handler, Listening, Request, Response, Server}; -use hyper::status::StatusCode; -use hyper::uri::RequestUri::AbsolutePath; -use hyper::Result; +use futures::{future, Future, Stream}; +use hyper::{self, Body, Method, Request, Response, StatusCode}; +use hyper::service::Service; +use hyper::server::conn::Http; +use http; +use tokio::runtime::current_thread::Runtime; +use tokio::reactor::Handle; +use tokio::net::TcpListener; use command::{WebDriverCommand, WebDriverMessage}; use error::{ErrorStatus, WebDriverError, WebDriverResult}; @@ -93,7 +92,7 @@ impl, U: WebDriverExtensionRoute> Dispatcher { }; } Ok(DispatchMessage::Quit) => break, - Err(_) => panic!("Error receiving message in handler"), + Err(e) => panic!("Error receiving message in handler: {:?}", e), } } } @@ -152,96 +151,101 @@ impl, U: WebDriverExtensionRoute> Dispatcher { } } -#[derive(Debug)] +#[derive(Debug, Clone)] struct HttpHandler { - chan: Mutex>>, - api: Mutex>, + chan: Arc>>>, + api: Arc>>, } -impl HttpHandler { - fn new(api: WebDriverHttpApi, chan: Sender>) -> HttpHandler { +impl HttpHandler { + fn new(api: Arc>>, chan: Sender>) -> HttpHandler { HttpHandler { - chan: Mutex::new(chan), - api: Mutex::new(api), + chan: Arc::new(Mutex::new(chan)), + api: api, } } } -impl Handler for HttpHandler { - fn handle(&self, req: Request, res: Response) { - let mut req = req; - let mut res = res; +impl Service for HttpHandler { + type ReqBody = Body; + type ResBody = Body; - let mut body = String::new(); - if let Method::Post = req.method { - req.read_to_string(&mut body).unwrap(); - } + type Error = hyper::Error; + type Future = Box, Error=hyper::Error> + Send>; - debug!("-> {} {} {}", req.method, req.uri, body); + fn call(&mut self, req: Request) -> Self::Future { + let uri = req.uri().clone(); + let method = req.method().clone(); + let api = self.api.clone(); + let chan = self.chan.clone(); - match req.uri { - AbsolutePath(path) => { - let msg_result = { - // The fact that this locks for basically the whole request doesn't - // matter as long as we are only handling one request at a time. - match self.api.lock() { - Ok(ref api) => api.decode_request(req.method, &path[..], &body[..]), - Err(_) => return, - } - }; - let (status, resp_body) = match msg_result { - Ok(message) => { - let (send_res, recv_res) = channel(); - match self.chan.lock() { - Ok(ref c) => { - let res = - c.send(DispatchMessage::HandleWebDriver(message, send_res)); - match res { - Ok(x) => x, - Err(_) => { - error!("Something terrible happened"); - return; - } - } - } - Err(_) => { - error!("Something terrible happened"); - return; - } - } - match recv_res.recv() { - Ok(data) => match data { - Ok(response) => { - (StatusCode::Ok, serde_json::to_string(&response).unwrap()) - } - Err(err) => { - (err.http_status(), serde_json::to_string(&err).unwrap()) - } - }, - Err(e) => panic!("Error reading response: {:?}", e), - } - } - Err(err) => (err.http_status(), serde_json::to_string(&err).unwrap()), - }; + Box::new(req.into_body().concat2().and_then(move |body| { + let body = String::from_utf8(body.to_vec()).unwrap(); + debug!("-> {} {} {}", method, uri, body); - debug!("<- {} {}", status, resp_body); - - { - let resp_status = res.status_mut(); - *resp_status = status; + let msg_result = { + // The fact that this locks for basically the whole request doesn't + // matter as long as we are only handling one request at a time. + match api.lock() { + Ok(ref api) => api.decode_request(method, &uri.path(), &body[..]), + Err(_) => panic!("Something terrible happened"), } - res.headers_mut().set(ContentType(Mime( - TopLevel::Application, - SubLevel::Json, - vec![(Attr::Charset, Value::Utf8)], - ))); - res.headers_mut() - .set(CacheControl(vec![CacheDirective::NoCache])); + }; - res.send(&resp_body.as_bytes()).unwrap(); - } - _ => {} - } + let (status, resp_body) = match msg_result { + Ok(message) => { + let (send_res, recv_res) = channel(); + match chan.lock() { + Ok(ref c) => { + let res = + c.send(DispatchMessage::HandleWebDriver(message, send_res)); + match res { + Ok(x) => x, + Err(_) => { + panic!("Something terrible happened"); + } + } + } + Err(e) => panic!("Error reading response: {:?}", e), + } + + match recv_res.recv() { + Ok(data) => match data { + Ok(response) => { + (StatusCode::OK, serde_json::to_string(&response).unwrap()) + } + Err(err) => { + (err.http_status(), serde_json::to_string(&err).unwrap()) + } + }, + Err(e) => panic!("Error reading response: {:?}", e), + } + } + Err(err) => (err.http_status(), serde_json::to_string(&err).unwrap()), + }; + + debug!("<- {} {}", status, resp_body); + + let response = Response::builder() + .status(status) + .header(http::header::CONTENT_TYPE, "application/json; charset=utf8") + .header(http::header::CACHE_CONTROL, "no-cache") + .body(resp_body.into()) + .unwrap(); + + Ok(response) + })) + } +} + +pub struct Listener { + _guard: Option>, + pub socket: SocketAddr, +} + +impl Drop for Listener { + fn drop(&mut self) { + let _ = self._guard.take().map(|j| j.join()); } } @@ -249,17 +253,35 @@ pub fn start( address: SocketAddr, handler: T, extension_routes: &[(Method, &str, U)], -) -> Result +) -> ::std::io::Result where T: 'static + WebDriverHandler, U: 'static + WebDriverExtensionRoute, { + let listener = StdTcpListener::bind(address)?; + let addr = listener.local_addr()?; let (msg_send, msg_recv) = channel(); - let api = WebDriverHttpApi::new(extension_routes); - let http_handler = HttpHandler::new(api, msg_send); - let mut server = Server::http(address)?; - server.keep_alive(Some(Duration::from_secs(90))); + let api = Arc::new(Mutex::new(WebDriverHttpApi::new(extension_routes))); + + let builder = thread::Builder::new().name("webdriver server".to_string()); + let handle = builder.spawn(move || { + let mut rt = Runtime::new().unwrap(); + let listener = TcpListener::from_std(listener, &Handle::default()).unwrap(); + + let http_handler = HttpHandler::new(api, msg_send.clone()); + let http = Http::new(); + let handle = rt.handle(); + + let fut = listener.incoming() + .for_each(move |socket| { + let fut = http.serve_connection(socket, http_handler.clone()).map_err(|_| ()); + handle.spawn(fut).unwrap(); + Ok(()) + }); + + rt.block_on(fut).unwrap(); + })?; let builder = thread::Builder::new().name("webdriver dispatcher".to_string()); builder.spawn(move || { @@ -267,5 +289,5 @@ where dispatcher.run(msg_recv); })?; - server.handle(http_handler) + Ok(Listener { _guard: Some(handle), socket: addr }) } diff --git a/third_party/rust/bitflags-0.7.0/.cargo-checksum.json b/third_party/rust/bitflags-0.7.0/.cargo-checksum.json deleted file mode 100644 index 0d7ad4676271..000000000000 --- a/third_party/rust/bitflags-0.7.0/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"2b615144d3f4b2e63ba6ec435cc18df7d76354aa07c2a02d6c707028cc448784","Cargo.toml":"db8c2e9ea912c5f3d2d89cf4cf936c448300e356b0fb533db8875923cb135256","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"8cfbc986af45867d9e620188af2392320fe6e0d9536753ba415c94ab522f5fb5","src/lib.rs":"618ce383bb219725363fba174fc66beb4874d9682e5da953f9e3e9cb3f786d5f","tests/external.rs":"546e549ec831876a5dc272bd0537adc9e9886c6da54656c825e7bffc079e2c74","tests/external_no_std.rs":"48929f5109aabc156442d5ae2ab07b4bce5d648488bf49dba725f6ab23bcb48a"},"package":"aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"} \ No newline at end of file diff --git a/third_party/rust/bitflags-0.7.0/.travis.yml b/third_party/rust/bitflags-0.7.0/.travis.yml deleted file mode 100644 index 60344466a177..000000000000 --- a/third_party/rust/bitflags-0.7.0/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -sudo: false -script: - - cargo build --verbose - - cargo test --verbose - - cargo doc -after_success: | - [ $TRAVIS_BRANCH = master ] && - [ $TRAVIS_PULL_REQUEST = false ] && - [ $TRAVIS_RUST_VERSION = nightly ] && - echo '' > target/doc/index.html && - pip install ghp-import --user $USER && - $HOME/.local/bin/ghp-import -n target/doc && - git push -qf https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages -env: - global: - secure: d+l63TtlF6cfFVDGauYRexgx4lBww4ORqqK4Vt75nWbiCbjZYsKXbcTUdhAr193nIVGiNW50A8SekM01F3EngHwHwr6u5kFleOggm+HA0kkBVeX+k2A4WCVVfYI+gth+zk99WaF8h46MA0evhx6FYDoqeyl9oqmVifI4kaqhMwc= -notifications: - email: - on_success: never diff --git a/third_party/rust/bitflags-0.7.0/Cargo.toml b/third_party/rust/bitflags-0.7.0/Cargo.toml deleted file mode 100644 index 042497e9caf7..000000000000 --- a/third_party/rust/bitflags-0.7.0/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] - -name = "bitflags" -version = "0.7.0" -authors = ["The Rust Project Developers"] -license = "MIT/Apache-2.0" -readme = "README.md" -repository = "https://github.com/rust-lang/bitflags" -homepage = "https://github.com/rust-lang/bitflags" -documentation = "https://doc.rust-lang.org/bitflags" -description = """ -A macro to generate structures which behave like bitflags. -""" diff --git a/third_party/rust/bitflags-0.7.0/README.md b/third_party/rust/bitflags-0.7.0/README.md deleted file mode 100644 index 3edd8a361ef4..000000000000 --- a/third_party/rust/bitflags-0.7.0/README.md +++ /dev/null @@ -1,24 +0,0 @@ -bitflags -======== - -A Rust macro to generate structures which behave like a set of bitflags - -[![Build Status](https://travis-ci.org/rust-lang-nursery/bitflags.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/bitflags) - -[Documentation](https://doc.rust-lang.org/bitflags) - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -bitflags = "0.6" -``` - -and this to your crate root: - -```rust -#[macro_use] -extern crate bitflags; -``` diff --git a/third_party/rust/bitflags-0.7.0/src/lib.rs b/third_party/rust/bitflags-0.7.0/src/lib.rs deleted file mode 100644 index 698799dab2ea..000000000000 --- a/third_party/rust/bitflags-0.7.0/src/lib.rs +++ /dev/null @@ -1,808 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A typesafe bitmask flag generator. - -#![no_std] - -#[cfg(test)] -#[macro_use] -extern crate std; - -// Re-export libstd/libcore using an alias so that the macros can work in no_std -// crates while remaining compatible with normal crates. -#[allow(private_in_public)] -#[doc(hidden)] -pub use core as __core; - -/// The `bitflags!` macro generates a `struct` that holds a set of C-style -/// bitmask flags. It is useful for creating typesafe wrappers for C APIs. -/// -/// The flags should only be defined for integer types, otherwise unexpected -/// type errors may occur at compile time. -/// -/// # Example -/// -/// ```{.rust} -/// #[macro_use] -/// extern crate bitflags; -/// -/// bitflags! { -/// flags Flags: u32 { -/// const FLAG_A = 0b00000001, -/// const FLAG_B = 0b00000010, -/// const FLAG_C = 0b00000100, -/// const FLAG_ABC = FLAG_A.bits -/// | FLAG_B.bits -/// | FLAG_C.bits, -/// } -/// } -/// -/// fn main() { -/// let e1 = FLAG_A | FLAG_C; -/// let e2 = FLAG_B | FLAG_C; -/// assert_eq!((e1 | e2), FLAG_ABC); // union -/// assert_eq!((e1 & e2), FLAG_C); // intersection -/// assert_eq!((e1 - e2), FLAG_A); // set difference -/// assert_eq!(!e2, FLAG_A); // set complement -/// } -/// ``` -/// -/// The generated `struct`s can also be extended with type and trait -/// implementations: -/// -/// ```{.rust} -/// #[macro_use] -/// extern crate bitflags; -/// -/// use std::fmt; -/// -/// bitflags! { -/// flags Flags: u32 { -/// const FLAG_A = 0b00000001, -/// const FLAG_B = 0b00000010, -/// } -/// } -/// -/// impl Flags { -/// pub fn clear(&mut self) { -/// self.bits = 0; // The `bits` field can be accessed from within the -/// // same module where the `bitflags!` macro was invoked. -/// } -/// } -/// -/// impl fmt::Display for Flags { -/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -/// write!(f, "hi!") -/// } -/// } -/// -/// fn main() { -/// let mut flags = FLAG_A | FLAG_B; -/// flags.clear(); -/// assert!(flags.is_empty()); -/// assert_eq!(format!("{}", flags), "hi!"); -/// assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B"); -/// assert_eq!(format!("{:?}", FLAG_B), "FLAG_B"); -/// } -/// ``` -/// -/// # Visibility -/// -/// The generated struct and its associated flag constants are not exported -/// out of the current module by default. A definition can be exported out of -/// the current module by adding `pub` before `flags`: -/// -/// ```{.rust},ignore -/// #[macro_use] -/// extern crate bitflags; -/// -/// mod example { -/// bitflags! { -/// pub flags Flags1: u32 { -/// const FLAG_A = 0b00000001, -/// } -/// } -/// bitflags! { -/// flags Flags2: u32 { -/// const FLAG_B = 0b00000010, -/// } -/// } -/// } -/// -/// fn main() { -/// let flag1 = example::FLAG_A; -/// let flag2 = example::FLAG_B; // error: const `FLAG_B` is private -/// } -/// ``` -/// -/// # Attributes -/// -/// Attributes can be attached to the generated `struct` by placing them -/// before the `flags` keyword. -/// -/// # Trait implementations -/// -/// The `Copy`, `Clone`, `PartialEq`, `Eq`, `PartialOrd`, `Ord` and `Hash` -/// traits automatically derived for the `struct` using the `derive` attribute. -/// Additional traits can be derived by providing an explicit `derive` -/// attribute on `flags`. -/// -/// The `Extend` and `FromIterator` traits are implemented for the `struct`, -/// too: `Extend` adds the union of the instances of the `struct` iterated over, -/// while `FromIterator` calculates the union. -/// -/// The `Debug` trait is also implemented by displaying the bits value of the -/// internal struct. -/// -/// ## Operators -/// -/// The following operator traits are implemented for the generated `struct`: -/// -/// - `BitOr` and `BitOrAssign`: union -/// - `BitAnd` and `BitAndAssign`: intersection -/// - `BitXor` and `BitXorAssign`: toggle -/// - `Sub` and `SubAssign`: set difference -/// - `Not`: set complement -/// -/// As long as the assignment operators are unstable rust feature they are only -/// available with the crate feature `assignment_ops` enabled. -/// -/// # Methods -/// -/// The following methods are defined for the generated `struct`: -/// -/// - `empty`: an empty set of flags -/// - `all`: the set of all flags -/// - `bits`: the raw value of the flags currently stored -/// - `from_bits`: convert from underlying bit representation, unless that -/// representation contains bits that do not correspond to a flag -/// - `from_bits_truncate`: convert from underlying bit representation, dropping -/// any bits that do not correspond to flags -/// - `is_empty`: `true` if no flags are currently stored -/// - `is_all`: `true` if all flags are currently set -/// - `intersects`: `true` if there are flags common to both `self` and `other` -/// - `contains`: `true` all of the flags in `other` are contained within `self` -/// - `insert`: inserts the specified flags in-place -/// - `remove`: removes the specified flags in-place -/// - `toggle`: the specified flags will be inserted if not present, and removed -/// if they are. -#[macro_export] -macro_rules! bitflags { - ($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ - }) => { - #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] - $(#[$attr])* - pub struct $BitFlags { - bits: $T, - } - - $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+ - - bitflags! { - @_impl flags $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value),+ - } - } - }; - ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ - }) => { - #[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] - $(#[$attr])* - struct $BitFlags { - bits: $T, - } - - $($(#[$Flag_attr])* const $Flag: $BitFlags = $BitFlags { bits: $value };)+ - - bitflags! { - @_impl flags $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value),+ - } - } - }; - (@_impl flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+ - }) => { - impl $crate::__core::fmt::Debug for $BitFlags { - fn fmt(&self, f: &mut $crate::__core::fmt::Formatter) -> $crate::__core::fmt::Result { - // This convoluted approach is to handle #[cfg]-based flag - // omission correctly. Some of the $Flag variants may not be - // defined in this module so we create an inner module which - // defines *all* flags to the value of 0. We then create a - // second inner module that defines all of the flags with #[cfg] - // to their real values. Afterwards the glob will import - // variants from the second inner module, shadowing all - // defined variants, leaving only the undefined ones with the - // bit value of 0. - #[allow(dead_code)] - #[allow(unused_assignments)] - mod dummy { - // We can't use the real $BitFlags struct because it may be - // private, which prevents us from using it to define - // public constants. - pub struct $BitFlags { - bits: u64, - } - mod real_flags { - use super::$BitFlags; - $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { - bits: super::super::$Flag.bits as u64 - };)+ - } - // Now we define the "undefined" versions of the flags. - // This way, all the names exist, even if some are #[cfg]ed - // out. - $(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+ - - #[inline] - pub fn fmt(self_: u64, - f: &mut $crate::__core::fmt::Formatter) - -> $crate::__core::fmt::Result { - // Now we import the real values for the flags. - // Only ones that are #[cfg]ed out will be 0. - use self::real_flags::*; - - let mut first = true; - $( - // $Flag.bits == 0 means that $Flag doesn't exist - if $Flag.bits != 0 && self_ & $Flag.bits as u64 == $Flag.bits as u64 { - if !first { - try!(f.write_str(" | ")); - } - first = false; - try!(f.write_str(stringify!($Flag))); - } - )+ - Ok(()) - } - } - dummy::fmt(self.bits as u64, f) - } - } - - #[allow(dead_code)] - impl $BitFlags { - /// Returns an empty set of flags. - #[inline] - pub fn empty() -> $BitFlags { - $BitFlags { bits: 0 } - } - - /// Returns the set containing all flags. - #[inline] - pub fn all() -> $BitFlags { - // See above `dummy` module for why this approach is taken. - #[allow(dead_code)] - mod dummy { - pub struct $BitFlags { - bits: u64, - } - mod real_flags { - use super::$BitFlags; - $($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { - bits: super::super::$Flag.bits as u64 - };)+ - } - $(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+ - - #[inline] - pub fn all() -> u64 { - use self::real_flags::*; - $($Flag.bits)|+ - } - } - $BitFlags { bits: dummy::all() as $T } - } - - /// Returns the raw value of the flags currently stored. - #[inline] - pub fn bits(&self) -> $T { - self.bits - } - - /// Convert from underlying bit representation, unless that - /// representation contains bits that do not correspond to a flag. - #[inline] - pub fn from_bits(bits: $T) -> $crate::__core::option::Option<$BitFlags> { - if (bits & !$BitFlags::all().bits()) == 0 { - $crate::__core::option::Option::Some($BitFlags { bits: bits }) - } else { - $crate::__core::option::Option::None - } - } - - /// Convert from underlying bit representation, dropping any bits - /// that do not correspond to flags. - #[inline] - pub fn from_bits_truncate(bits: $T) -> $BitFlags { - $BitFlags { bits: bits } & $BitFlags::all() - } - - /// Returns `true` if no flags are currently stored. - #[inline] - pub fn is_empty(&self) -> bool { - *self == $BitFlags::empty() - } - - /// Returns `true` if all flags are currently set. - #[inline] - pub fn is_all(&self) -> bool { - *self == $BitFlags::all() - } - - /// Returns `true` if there are flags common to both `self` and `other`. - #[inline] - pub fn intersects(&self, other: $BitFlags) -> bool { - !(*self & other).is_empty() - } - - /// Returns `true` all of the flags in `other` are contained within `self`. - #[inline] - pub fn contains(&self, other: $BitFlags) -> bool { - (*self & other) == other - } - - /// Inserts the specified flags in-place. - #[inline] - pub fn insert(&mut self, other: $BitFlags) { - self.bits |= other.bits; - } - - /// Removes the specified flags in-place. - #[inline] - pub fn remove(&mut self, other: $BitFlags) { - self.bits &= !other.bits; - } - - /// Toggles the specified flags in-place. - #[inline] - pub fn toggle(&mut self, other: $BitFlags) { - self.bits ^= other.bits; - } - } - - impl $crate::__core::ops::BitOr for $BitFlags { - type Output = $BitFlags; - - /// Returns the union of the two sets of flags. - #[inline] - fn bitor(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits | other.bits } - } - } - - impl $crate::__core::ops::BitOrAssign for $BitFlags { - - /// Adds the set of flags. - #[inline] - fn bitor_assign(&mut self, other: $BitFlags) { - self.bits |= other.bits; - } - } - - impl $crate::__core::ops::BitXor for $BitFlags { - type Output = $BitFlags; - - /// Returns the left flags, but with all the right flags toggled. - #[inline] - fn bitxor(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits ^ other.bits } - } - } - - impl $crate::__core::ops::BitXorAssign for $BitFlags { - - /// Toggles the set of flags. - #[inline] - fn bitxor_assign(&mut self, other: $BitFlags) { - self.bits ^= other.bits; - } - } - - impl $crate::__core::ops::BitAnd for $BitFlags { - type Output = $BitFlags; - - /// Returns the intersection between the two sets of flags. - #[inline] - fn bitand(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits & other.bits } - } - } - - impl $crate::__core::ops::BitAndAssign for $BitFlags { - - /// Disables all flags disabled in the set. - #[inline] - fn bitand_assign(&mut self, other: $BitFlags) { - self.bits &= other.bits; - } - } - - impl $crate::__core::ops::Sub for $BitFlags { - type Output = $BitFlags; - - /// Returns the set difference of the two sets of flags. - #[inline] - fn sub(self, other: $BitFlags) -> $BitFlags { - $BitFlags { bits: self.bits & !other.bits } - } - } - - impl $crate::__core::ops::SubAssign for $BitFlags { - - /// Disables all flags enabled in the set. - #[inline] - fn sub_assign(&mut self, other: $BitFlags) { - self.bits &= !other.bits; - } - } - - impl $crate::__core::ops::Not for $BitFlags { - type Output = $BitFlags; - - /// Returns the complement of this set of flags. - #[inline] - fn not(self) -> $BitFlags { - $BitFlags { bits: !self.bits } & $BitFlags::all() - } - } - - impl $crate::__core::iter::Extend<$BitFlags> for $BitFlags { - fn extend>(&mut self, iterator: T) { - for item in iterator { - self.insert(item) - } - } - } - - impl $crate::__core::iter::FromIterator<$BitFlags> for $BitFlags { - fn from_iter>(iterator: T) -> $BitFlags { - let mut result = Self::empty(); - result.extend(iterator); - result - } - } - }; - ($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+, - }) => { - bitflags! { - $(#[$attr])* - pub flags $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value),+ - } - } - }; - ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty { - $($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+, - }) => { - bitflags! { - $(#[$attr])* - flags $BitFlags: $T { - $($(#[$Flag_attr])* const $Flag = $value),+ - } - } - }; -} - -#[cfg(test)] -#[allow(non_upper_case_globals, dead_code)] -mod tests { - use std::hash::{SipHasher, Hash, Hasher}; - - bitflags! { - #[doc = "> The first principle is that you must not fool yourself — and"] - #[doc = "> you are the easiest person to fool."] - #[doc = "> "] - #[doc = "> - Richard Feynman"] - flags Flags: u32 { - const FlagA = 0b00000001, - #[doc = " macros are way better at generating code than trans is"] - const FlagB = 0b00000010, - const FlagC = 0b00000100, - #[doc = "* cmr bed"] - #[doc = "* strcat table"] - #[doc = " wait what?"] - const FlagABC = FlagA.bits - | FlagB.bits - | FlagC.bits, - } - } - - bitflags! { - flags _CfgFlags: u32 { - #[cfg(windows)] - const _CfgA = 0b01, - #[cfg(unix)] - const _CfgB = 0b01, - #[cfg(windows)] - const _CfgC = _CfgA.bits | 0b10, - } - } - - bitflags! { - flags AnotherSetOfFlags: i8 { - const AnotherFlag = -1_i8, - } - } - - #[test] - fn test_bits(){ - assert_eq!(Flags::empty().bits(), 0b00000000); - assert_eq!(FlagA.bits(), 0b00000001); - assert_eq!(FlagABC.bits(), 0b00000111); - - assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00); - assert_eq!(AnotherFlag.bits(), !0_i8); - } - - #[test] - fn test_from_bits() { - assert_eq!(Flags::from_bits(0), Some(Flags::empty())); - assert_eq!(Flags::from_bits(0b1), Some(FlagA)); - assert_eq!(Flags::from_bits(0b10), Some(FlagB)); - assert_eq!(Flags::from_bits(0b11), Some(FlagA | FlagB)); - assert_eq!(Flags::from_bits(0b1000), None); - - assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherFlag)); - } - - #[test] - fn test_from_bits_truncate() { - assert_eq!(Flags::from_bits_truncate(0), Flags::empty()); - assert_eq!(Flags::from_bits_truncate(0b1), FlagA); - assert_eq!(Flags::from_bits_truncate(0b10), FlagB); - assert_eq!(Flags::from_bits_truncate(0b11), (FlagA | FlagB)); - assert_eq!(Flags::from_bits_truncate(0b1000), Flags::empty()); - assert_eq!(Flags::from_bits_truncate(0b1001), FlagA); - - assert_eq!(AnotherSetOfFlags::from_bits_truncate(0_i8), AnotherSetOfFlags::empty()); - } - - #[test] - fn test_is_empty(){ - assert!(Flags::empty().is_empty()); - assert!(!FlagA.is_empty()); - assert!(!FlagABC.is_empty()); - - assert!(!AnotherFlag.is_empty()); - } - - #[test] - fn test_is_all() { - assert!(Flags::all().is_all()); - assert!(!FlagA.is_all()); - assert!(FlagABC.is_all()); - - assert!(AnotherFlag.is_all()); - } - - #[test] - fn test_two_empties_do_not_intersect() { - let e1 = Flags::empty(); - let e2 = Flags::empty(); - assert!(!e1.intersects(e2)); - - assert!(AnotherFlag.intersects(AnotherFlag)); - } - - #[test] - fn test_empty_does_not_intersect_with_full() { - let e1 = Flags::empty(); - let e2 = FlagABC; - assert!(!e1.intersects(e2)); - } - - #[test] - fn test_disjoint_intersects() { - let e1 = FlagA; - let e2 = FlagB; - assert!(!e1.intersects(e2)); - } - - #[test] - fn test_overlapping_intersects() { - let e1 = FlagA; - let e2 = FlagA | FlagB; - assert!(e1.intersects(e2)); - } - - #[test] - fn test_contains() { - let e1 = FlagA; - let e2 = FlagA | FlagB; - assert!(!e1.contains(e2)); - assert!(e2.contains(e1)); - assert!(FlagABC.contains(e2)); - - assert!(AnotherFlag.contains(AnotherFlag)); - } - - #[test] - fn test_insert(){ - let mut e1 = FlagA; - let e2 = FlagA | FlagB; - e1.insert(e2); - assert_eq!(e1, e2); - - let mut e3 = AnotherSetOfFlags::empty(); - e3.insert(AnotherFlag); - assert_eq!(e3, AnotherFlag); - } - - #[test] - fn test_remove(){ - let mut e1 = FlagA | FlagB; - let e2 = FlagA | FlagC; - e1.remove(e2); - assert_eq!(e1, FlagB); - - let mut e3 = AnotherFlag; - e3.remove(AnotherFlag); - assert_eq!(e3, AnotherSetOfFlags::empty()); - } - - #[test] - fn test_operators() { - let e1 = FlagA | FlagC; - let e2 = FlagB | FlagC; - assert_eq!((e1 | e2), FlagABC); // union - assert_eq!((e1 & e2), FlagC); // intersection - assert_eq!((e1 - e2), FlagA); // set difference - assert_eq!(!e2, FlagA); // set complement - assert_eq!(e1 ^ e2, FlagA | FlagB); // toggle - let mut e3 = e1; - e3.toggle(e2); - assert_eq!(e3, FlagA | FlagB); - - let mut m4 = AnotherSetOfFlags::empty(); - m4.toggle(AnotherSetOfFlags::empty()); - assert_eq!(m4, AnotherSetOfFlags::empty()); - } - - #[test] - fn test_assignment_operators() { - let mut m1 = Flags::empty(); - let e1 = FlagA | FlagC; - // union - m1 |= FlagA; - assert_eq!(m1, FlagA); - // intersection - m1 &= e1; - assert_eq!(m1, FlagA); - // set difference - m1 -= m1; - assert_eq!(m1, Flags::empty()); - // toggle - m1 ^= e1; - assert_eq!(m1, e1); - } - - #[test] - fn test_extend() { - let mut flags; - - flags = Flags::empty(); - flags.extend([].iter().cloned()); - assert_eq!(flags, Flags::empty()); - - flags = Flags::empty(); - flags.extend([FlagA, FlagB].iter().cloned()); - assert_eq!(flags, FlagA | FlagB); - - flags = FlagA; - flags.extend([FlagA, FlagB].iter().cloned()); - assert_eq!(flags, FlagA | FlagB); - - flags = FlagB; - flags.extend([FlagA, FlagABC].iter().cloned()); - assert_eq!(flags, FlagABC); - } - - #[test] - fn test_from_iterator() { - assert_eq!([].iter().cloned().collect::(), Flags::empty()); - assert_eq!([FlagA, FlagB].iter().cloned().collect::(), FlagA | FlagB); - assert_eq!([FlagA, FlagABC].iter().cloned().collect::(), FlagABC); - } - - #[test] - fn test_lt() { - let mut a = Flags::empty(); - let mut b = Flags::empty(); - - assert!(!(a < b) && !(b < a)); - b = FlagB; - assert!(a < b); - a = FlagC; - assert!(!(a < b) && b < a); - b = FlagC | FlagB; - assert!(a < b); - } - - #[test] - fn test_ord() { - let mut a = Flags::empty(); - let mut b = Flags::empty(); - - assert!(a <= b && a >= b); - a = FlagA; - assert!(a > b && a >= b); - assert!(b < a && b <= a); - b = FlagB; - assert!(b > a && b >= a); - assert!(a < b && a <= b); - } - - fn hash(t: &T) -> u64 { - let mut s = SipHasher::new_with_keys(0, 0); - t.hash(&mut s); - s.finish() - } - - #[test] - fn test_hash() { - let mut x = Flags::empty(); - let mut y = Flags::empty(); - assert_eq!(hash(&x), hash(&y)); - x = Flags::all(); - y = FlagABC; - assert_eq!(hash(&x), hash(&y)); - } - - #[test] - fn test_debug() { - assert_eq!(format!("{:?}", FlagA | FlagB), "FlagA | FlagB"); - assert_eq!(format!("{:?}", FlagABC), "FlagA | FlagB | FlagC | FlagABC"); - } - - mod submodule { - bitflags! { - pub flags PublicFlags: i8 { - const FlagX = 0, - } - } - bitflags! { - flags PrivateFlags: i8 { - const FlagY = 0, - } - } - - #[test] - fn test_private() { - let _ = FlagY; - } - } - - #[test] - fn test_public() { - let _ = submodule::FlagX; - } - - mod t1 { - mod foo { - pub type Bar = i32; - } - - bitflags! { - /// baz - flags Flags: foo::Bar { - const A = 0b00000001, - #[cfg(foo)] - const B = 0b00000010, - #[cfg(foo)] - const C = 0b00000010, - } - } - } -} diff --git a/third_party/rust/bitflags-0.7.0/tests/external.rs b/third_party/rust/bitflags-0.7.0/tests/external.rs deleted file mode 100644 index 0f0c7f665fbc..000000000000 --- a/third_party/rust/bitflags-0.7.0/tests/external.rs +++ /dev/null @@ -1,21 +0,0 @@ -#![allow(dead_code)] - -#[macro_use] -extern crate bitflags; - -bitflags! { - /// baz - flags Flags: u32 { - const A = 0b00000001, - #[doc = "bar"] - const B = 0b00000010, - const C = 0b00000100, - #[doc = "foo"] - const ABC = A.bits | B.bits | C.bits, - } -} - -#[test] -fn smoke() { - assert_eq!(ABC, A | B | C); -} diff --git a/third_party/rust/bitflags-0.7.0/tests/external_no_std.rs b/third_party/rust/bitflags-0.7.0/tests/external_no_std.rs deleted file mode 100644 index 46526fd71298..000000000000 --- a/third_party/rust/bitflags-0.7.0/tests/external_no_std.rs +++ /dev/null @@ -1,22 +0,0 @@ -#![allow(dead_code)] -#![no_std] - -#[macro_use] -extern crate bitflags; - -bitflags! { - /// baz - flags Flags: u32 { - const A = 0b00000001, - #[doc = "bar"] - const B = 0b00000010, - const C = 0b00000100, - #[doc = "foo"] - const ABC = A.bits | B.bits | C.bits, - } -} - -#[test] -fn smoke() { - assert_eq!(ABC, A | B | C); -} diff --git a/third_party/rust/bytes/.cargo-checksum.json b/third_party/rust/bytes/.cargo-checksum.json index d657aa0f316b..8301dbd01e9e 100644 --- a/third_party/rust/bytes/.cargo-checksum.json +++ b/third_party/rust/bytes/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"c6b490cbd81117cd0500e8dc26ca74cdf04eb49639ac0287eef559d7791cde1f","Cargo.toml":"3f1fe6c7e1e0bb164730bb584a58e128587dd742cfd1ab6bcda4c482be694bf5","LICENSE-APACHE":"01b5abb4a95cc87b220efbd67a1e99c74bef3d744806dd44b4d57e81db814962","LICENSE-MIT":"d4784f55731ba75b77ad73a52808914b26b2f93b69dd4c03249528a75afbd946","README.md":"7f5f585db959c73bcb1e8afd52b1c4110e57c2264a387f713b388f98181faebf","benches/bytes.rs":"bc1ef63dae52f111c78009399b16308e9e3c454b3ab5c46f89626e246fce3bd4","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","src/buf/buf.rs":"a8a26bb22fd5becd2062e756fc272eb6e09606e9e40120c4999634cb068b1837","src/buf/buf_mut.rs":"35e7fee4727f1628bc899216a74f9652235be255a035687d56bf8df71ebd29a4","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"d982cb82f3f2ddba863366c36f9f6041b2076e7bb8906e882e47ef65742974db","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"7b1ba792e6062ac9453b46bf1f8af7ea7784ccb142d38b40491b1a3c6d2f2e5a","src/debug.rs":"f01f07b199994400a62aa872344a19737198c8bce0fdc5a4b5b34d9cd37dee75","src/lib.rs":"cf5e336f8e04a35204e092eb9a6bf0fd8dc1cf8c639b5bb45f1298e7178deef4","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"5a29764cdc3f7c1eda563562dea1b624b923c088330eb4b894c28eb4e0faaf87","tests/test_buf_mut.rs":"5aefacb92183c747c9e91a469d675d6490618742ee5982d74af220faa9343ef1","tests/test_bytes.rs":"5fbd44ae30dc07883b5c5a5e6d8c91037525dc0cf6cfdcfb78033c3867089665","tests/test_chain.rs":"7bda7550927cf7799c708fedaaf4cd2924ed3fd800f30ef126d6c9efe48c3986","tests/test_debug.rs":"232f8a604668a61dc580eb064cf0fbc21f664182928438710c7cfde14bd637f4","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"} \ No newline at end of file +{"files":{"CHANGELOG.md":"55941e30721c4b104cc8f84473da5acd0cd57903d66e8fd029b8c5160d99ed53","Cargo.toml":"f71e10b42ed8637ed615222f6d9e2af5df707f7f3d9d4fd203358c2af87b7ff0","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"3ca600d7b4175eee634621a870904fe5ec761e6fd623f745423d378dec1bfd51","benches/bytes.rs":"a60889c35cf76faf2b403f94d3ab2831a569f2e1f6e4cc4d5e88f3c26bddb8b0","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","ci/tsan":"905d22267f7493550d123b1482fc1a7f4b24e8cbc4ae4f0e0c2d42383e79ad83","src/buf/buf.rs":"1b5ff3ab694380fe59588b8d195111ba663c5f8901b272b531851deb26e4629a","src/buf/buf_mut.rs":"d2f54e9c64b86c8ddd325d40b3c8e1b2132d361937bac3b5fccb7a81154b89b8","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"b6e35d34533fae229f5209b95a39a1c35485f48a873a1d357d99218c486b0b95","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"546f2ef082656be2639314994d4228833f331747578a9ebf69075d2bcec0ae2d","src/debug.rs":"a8bd8062e7e500fdc5a79cb6c848fb860be8359d95e1c91034777fe33c78d54e","src/lib.rs":"fb61bba13236978f2c3b93cc39eb4a99c02f1ecd539c917a8380e5d344e67706","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"6409f32f734969bebeffa7592fed531953d252c5a639e422b6e4b14ec024b1d5","tests/test_buf_mut.rs":"a6a653d5053340b0254900c33e36df6db1421f821c3e985be0044b1b447ecedc","tests/test_bytes.rs":"92ae28671dee4ab91c7e0366e094b009c547defd8fd1c977520e5ad574eea70d","tests/test_chain.rs":"3fe1f28f3bce4377f8ed506718f95f3ed3ebaf251a1cb43b2705331e3dd6b43a","tests/test_debug.rs":"4cfd44c30d0b8f7c5eb8e8916ad7436e9f538732fe9f4b696dc22b84c31ac64a","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8"} \ No newline at end of file diff --git a/third_party/rust/bytes/CHANGELOG.md b/third_party/rust/bytes/CHANGELOG.md index aaa23089ec18..1e87d35be20f 100644 --- a/third_party/rust/bytes/CHANGELOG.md +++ b/third_party/rust/bytes/CHANGELOG.md @@ -1,3 +1,27 @@ +# 0.4.9 (July 12, 2018) + +* Add 128 bit number support behind a feature flag (#209). +* Implement `IntoBuf` for `&mut [u8]` + +# 0.4.8 (May 25, 2018) + +* Fix panic in `BytesMut` `FromIterator` implementation. +* Bytes: Recycle space when reserving space in vec mode (#197). +* Bytes: Add resize fn (#203). + +# 0.4.7 (April 27, 2018) + +* Make `Buf` and `BufMut` usable as trait objects (#186). +* impl BorrowMut for BytesMut (#185). +* Improve accessor performance (#195). + +# 0.4.6 (Janary 8, 2018) + +* Implement FromIterator for Bytes/BytesMut (#148). +* Add `advance` fn to Bytes/BytesMut (#166). +* Add `unsplit` fn to `BytesMut` (#162, #173). +* Improvements to Bytes split fns (#92). + # 0.4.5 (August 12, 2017) * Fix range bug in `Take::bytes` diff --git a/third_party/rust/bytes/Cargo.toml b/third_party/rust/bytes/Cargo.toml index 3cd302b72a13..61ed633cbc0e 100644 --- a/third_party/rust/bytes/Cargo.toml +++ b/third_party/rust/bytes/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "bytes" -version = "0.4.5" +version = "0.4.9" authors = ["Carl Lerche "] exclude = [".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*"] description = "Types and traits for working with bytes" @@ -21,16 +21,21 @@ documentation = "https://carllerche.github.io/bytes/bytes" readme = "README.md" keywords = ["buffers", "zero-copy", "io"] categories = ["network-programming", "data-structures"] -license = "MIT/Apache-2.0" +license = "MIT" repository = "https://github.com/carllerche/bytes" +[package.metadata.docs.rs] +features = ["i128"] [dependencies.byteorder] -version = "1.0.0" +version = "1.1.0" + +[dependencies.iovec] +version = "0.1" [dependencies.serde] version = "1.0" optional = true - -[dependencies.iovec] -version = "0.1" [dev-dependencies.serde_test] version = "1.0" + +[features] +i128 = ["byteorder/i128"] diff --git a/third_party/rust/tokio-io/LICENSE-MIT b/third_party/rust/bytes/LICENSE similarity index 96% rename from third_party/rust/tokio-io/LICENSE-MIT rename to third_party/rust/bytes/LICENSE index 28e630cf40d2..58fb29a12384 100644 --- a/third_party/rust/tokio-io/LICENSE-MIT +++ b/third_party/rust/bytes/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2016 Alex Crichton +Copyright (c) 2018 Carl Lerche Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/third_party/rust/bytes/README.md b/third_party/rust/bytes/README.md index 3a88c56bd305..3b2a80b3bbfe 100644 --- a/third_party/rust/bytes/README.md +++ b/third_party/rust/bytes/README.md @@ -33,10 +33,13 @@ Serde support is optional and disabled by default. To enable use the feature `se bytes = { version = "0.4", features = ["serde"] } ``` -# License +## License -`bytes` is primarily distributed under the terms of both the MIT license and the -Apache License (Version 2.0), with portions covered by various BSD-like -licenses. +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `bytes` by you, shall be licensed as MIT, without any additional +terms or conditions. -See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/third_party/rust/bytes/benches/bytes.rs b/third_party/rust/bytes/benches/bytes.rs index 07494f55dc4e..7a338746b04c 100644 --- a/third_party/rust/bytes/benches/bytes.rs +++ b/third_party/rust/bytes/benches/bytes.rs @@ -29,6 +29,18 @@ fn alloc_big(b: &mut Bencher) { }) } +#[bench] +fn split_off_and_drop(b: &mut Bencher) { + b.iter(|| { + for _ in 0..1024 { + let v = vec![10; 200]; + let mut b = Bytes::from(v); + test::black_box(b.split_off(100)); + test::black_box(b); + } + }) +} + #[bench] fn deref_unique(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(4096); @@ -101,6 +113,39 @@ fn deref_two(b: &mut Bencher) { }) } +#[bench] +fn clone_inline(b: &mut Bencher) { + let bytes = Bytes::from_static(b"hello world"); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_static(b: &mut Bencher) { + let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_arc(b: &mut Bencher) { + let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + #[bench] fn alloc_write_split_to_mid(b: &mut Bencher) { b.iter(|| { diff --git a/third_party/rust/bytes/ci/tsan b/third_party/rust/bytes/ci/tsan new file mode 100644 index 000000000000..657d4266a3aa --- /dev/null +++ b/third_party/rust/bytes/ci/tsan @@ -0,0 +1,21 @@ +# TSAN suppressions file for `bytes` + +# TSAN does not understand fences and `Arc::drop` is implemented using a fence. +# This causes many false positives. +race:Arc*drop +race:arc*Weak*drop + +# `std` mpsc is not used in any Bytes code base. This race is triggered by some +# rust runtime logic. +race:std*mpsc_queue + +# Not sure why this is warning, but it is in the test harness and not the library. +race:TestEvent*clone +race:test::run_tests_console::*closure + +# Probably more fences in std. +race:__call_tls_dtors + +# `is_inline_or_static` is explicitly called concurrently without synchronization. +# The safety explanation can be found in a comment. +race:Inner::is_inline_or_static diff --git a/third_party/rust/bytes/src/buf/buf.rs b/third_party/rust/bytes/src/buf/buf.rs index 42280e793a91..b72c8d91cbe9 100644 --- a/third_party/rust/bytes/src/buf/buf.rs +++ b/third_party/rust/bytes/src/buf/buf.rs @@ -1,9 +1,41 @@ use super::{IntoBuf, Take, Reader, Iter, FromBuf, Chain}; -use byteorder::ByteOrder; +use byteorder::{BigEndian, ByteOrder, LittleEndian}; use iovec::IoVec; use std::{cmp, io, ptr}; +macro_rules! buf_get_impl { + ($this:ident, $size:expr, $conv:path) => ({ + // try to convert directly from the bytes + let ret = { + // this Option trick is to avoid keeping a borrow on self + // when advance() is called (mut borrow) and to call bytes() only once + if let Some(src) = $this.bytes().get(..($size)) { + Some($conv(src)) + } else { + None + } + }; + if let Some(ret) = ret { + // if the direct convertion was possible, advance and return + $this.advance($size); + return ret; + } else { + // if not we copy the bytes in a temp buffer then convert + let mut buf = [0; ($size)]; + $this.copy_to_slice(&mut buf); // (do the advance) + return $conv(&buf); + } + }); + ($this:ident, $buf_size:expr, $conv:path, $len_to_read:expr) => ({ + // The same trick as above does not improve the best case speed. + // It seems to be linked to the way the method is optimised by the compiler + let mut buf = [0; ($buf_size)]; + $this.copy_to_slice(&mut buf[..($len_to_read)]); + return $conv(&buf[..($len_to_read)], $len_to_read); + }); +} + /// Read bytes from a buffer. /// /// A buffer stores bytes in memory such that read operations are infallible. @@ -243,9 +275,10 @@ pub trait Buf { /// /// This function panics if there is no more remaining data in `self`. fn get_u8(&mut self) -> u8 { - let mut buf = [0; 1]; - self.copy_to_slice(&mut buf); - buf[0] + assert!(self.remaining() >= 1); + let ret = self.bytes()[0]; + self.advance(1); + ret } /// Gets a signed 8 bit integer from `self`. @@ -266,243 +299,608 @@ pub trait Buf { /// /// This function panics if there is no more remaining data in `self`. fn get_i8(&mut self) -> i8 { - let mut buf = [0; 1]; - self.copy_to_slice(&mut buf); - buf[0] as i8 + assert!(self.remaining() >= 1); + let ret = self.bytes()[0] as i8; + self.advance(1); + ret } - /// Gets an unsigned 16 bit integer from `self` in the specified byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BigEndian}; - /// use std::io::Cursor; - /// - /// let mut buf = Cursor::new(b"\x08\x09 hello"); - /// assert_eq!(0x0809, buf.get_u16::()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16(&mut self) -> u16 { + #[doc(hidden)] + #[deprecated(note="use get_u16_be or get_u16_le")] + fn get_u16(&mut self) -> u16 where Self: Sized { let mut buf = [0; 2]; self.copy_to_slice(&mut buf); T::read_u16(&buf) } - /// Gets a signed 16 bit integer from `self` in the specified byte order. + /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` - /// use bytes::{Buf, BigEndian}; + /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09 hello"); - /// assert_eq!(0x0809, buf.get_i16::()); + /// assert_eq!(0x0809, buf.get_u16_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. - fn get_i16(&mut self) -> i16 { + fn get_u16_be(&mut self) -> u16 { + buf_get_impl!(self, 2, BigEndian::read_u16); + } + + /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x09\x08 hello"); + /// assert_eq!(0x0809, buf.get_u16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16_le(&mut self) -> u16 { + buf_get_impl!(self, 2, LittleEndian::read_u16); + } + + #[doc(hidden)] + #[deprecated(note="use get_i16_be or get_i16_le")] + fn get_i16(&mut self) -> i16 where Self: Sized { let mut buf = [0; 2]; self.copy_to_slice(&mut buf); T::read_i16(&buf) } - /// Gets an unsigned 32 bit integer from `self` in the specified byte order. + /// Gets a signed 16 bit integer from `self` in big-endian byte order. /// - /// The current position is advanced by 4. + /// The current position is advanced by 2. /// /// # Examples /// /// ``` - /// use bytes::{Buf, BigEndian}; + /// use bytes::Buf; /// use std::io::Cursor; /// - /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); - /// assert_eq!(0x0809A0A1, buf.get_u32::()); + /// let mut buf = Cursor::new(b"\x08\x09 hello"); + /// assert_eq!(0x0809, buf.get_i16_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. - fn get_u32(&mut self) -> u32 { + fn get_i16_be(&mut self) -> i16 { + buf_get_impl!(self, 2, BigEndian::read_i16); + } + + /// Gets a signed 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x09\x08 hello"); + /// assert_eq!(0x0809, buf.get_i16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16_le(&mut self) -> i16 { + buf_get_impl!(self, 2, LittleEndian::read_i16); + } + + #[doc(hidden)] + #[deprecated(note="use get_u32_be or get_u32_le")] + fn get_u32(&mut self) -> u32 where Self: Sized { let mut buf = [0; 4]; self.copy_to_slice(&mut buf); T::read_u32(&buf) } - /// Gets a signed 32 bit integer from `self` in the specified byte order. + /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` - /// use bytes::{Buf, BigEndian}; + /// use bytes::Buf; /// use std::io::Cursor; /// /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); - /// assert_eq!(0x0809A0A1, buf.get_i32::()); + /// assert_eq!(0x0809A0A1, buf.get_u32_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. - fn get_i32(&mut self) -> i32 { + fn get_u32_be(&mut self) -> u32 { + buf_get_impl!(self, 4, BigEndian::read_u32); + } + + /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); + /// assert_eq!(0x0809A0A1, buf.get_u32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32_le(&mut self) -> u32 { + buf_get_impl!(self, 4, LittleEndian::read_u32); + } + + #[doc(hidden)] + #[deprecated(note="use get_i32_be or get_i32_le")] + fn get_i32(&mut self) -> i32 where Self: Sized { let mut buf = [0; 4]; self.copy_to_slice(&mut buf); T::read_i32(&buf) } - /// Gets an unsigned 64 bit integer from `self` in the specified byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BigEndian}; - /// use std::io::Cursor; - /// - /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); - /// assert_eq!(0x0102030405060708, buf.get_u64::()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64(&mut self) -> u64 { - let mut buf = [0; 8]; - self.copy_to_slice(&mut buf); - T::read_u64(&buf) - } - - /// Gets a signed 64 bit integer from `self` in the specified byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BigEndian}; - /// use std::io::Cursor; - /// - /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); - /// assert_eq!(0x0102030405060708, buf.get_i64::()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64(&mut self) -> i64 { - let mut buf = [0; 8]; - self.copy_to_slice(&mut buf); - T::read_i64(&buf) - } - - /// Gets an unsigned n-byte integer from `self` in the specified byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BigEndian}; - /// use std::io::Cursor; - /// - /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); - /// assert_eq!(0x010203, buf.get_uint::(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint(&mut self, nbytes: usize) -> u64 { - let mut buf = [0; 8]; - self.copy_to_slice(&mut buf[..nbytes]); - T::read_uint(&buf[..nbytes], nbytes) - } - - /// Gets a signed n-byte integer from `self` in the specified byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BigEndian}; - /// use std::io::Cursor; - /// - /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); - /// assert_eq!(0x010203, buf.get_int::(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int(&mut self, nbytes: usize) -> i64 { - let mut buf = [0; 8]; - self.copy_to_slice(&mut buf[..nbytes]); - T::read_int(&buf[..nbytes], nbytes) - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in the specified byte order. + /// Gets a signed 32 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` - /// use bytes::{Buf, BigEndian}; + /// use bytes::Buf; /// use std::io::Cursor; /// - /// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello"); - /// assert_eq!(1.2f32, buf.get_f32::()); + /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); + /// assert_eq!(0x0809A0A1, buf.get_i32_be()); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. - fn get_f32(&mut self) -> f32 { - let mut buf = [0; 4]; - self.copy_to_slice(&mut buf); - T::read_f32(&buf) + fn get_i32_be(&mut self) -> i32 { + buf_get_impl!(self, 4, BigEndian::read_i32); } - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in the specified byte order. + /// Gets a signed 32 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); + /// assert_eq!(0x0809A0A1, buf.get_i32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32_le(&mut self) -> i32 { + buf_get_impl!(self, 4, LittleEndian::read_i32); + } + + #[doc(hidden)] + #[deprecated(note="use get_u64_be or get_u64_le")] + fn get_u64(&mut self) -> u64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf); + T::read_u64(&buf) + } + + /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); + /// assert_eq!(0x0102030405060708, buf.get_u64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_be(&mut self) -> u64 { + buf_get_impl!(self, 8, BigEndian::read_u64); + } + + /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x0102030405060708, buf.get_u64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_le(&mut self) -> u64 { + buf_get_impl!(self, 8, LittleEndian::read_u64); + } + + #[doc(hidden)] + #[deprecated(note="use get_i64_be or get_i64_le")] + fn get_i64(&mut self) -> i64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf); + T::read_i64(&buf) + } + + /// Gets a signed 64 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); + /// assert_eq!(0x0102030405060708, buf.get_i64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_be(&mut self) -> i64 { + buf_get_impl!(self, 8, BigEndian::read_i64); + } + + /// Gets a signed 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x0102030405060708, buf.get_i64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_le(&mut self) -> i64 { + buf_get_impl!(self, 8, LittleEndian::read_i64); + } + + /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_u128_be(&mut self) -> u128 { + buf_get_impl!(self, 16, BigEndian::read_u128); + } + + /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_u128_le(&mut self) -> u128 { + buf_get_impl!(self, 16, LittleEndian::read_u128); + } + + /// Gets a signed 128 bit integer from `self` in big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_i128_be(&mut self) -> i128 { + buf_get_impl!(self, 16, BigEndian::read_i128); + } + + /// Gets a signed 128 bit integer from `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_i128_le(&mut self) -> i128 { + buf_get_impl!(self, 16, LittleEndian::read_i128); + } + + #[doc(hidden)] + #[deprecated(note="use get_uint_be or get_uint_le")] + fn get_uint(&mut self, nbytes: usize) -> u64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf[..nbytes]); + T::read_uint(&buf[..nbytes], nbytes) + } + + /// Gets an unsigned n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` /// use bytes::{Buf, BigEndian}; /// use std::io::Cursor; /// - /// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"); - /// assert_eq!(1.2f64, buf.get_f64::()); + /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); + /// assert_eq!(0x010203, buf.get_uint_be(3)); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining data in `self`. - fn get_f64(&mut self) -> f64 { + fn get_uint_be(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(self, 8, BigEndian::read_uint, nbytes); + } + + /// Gets an unsigned n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); + /// assert_eq!(0x010203, buf.get_uint_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint_le(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(self, 8, LittleEndian::read_uint, nbytes); + } + + #[doc(hidden)] + #[deprecated(note="use get_int_be or get_int_le")] + fn get_int(&mut self, nbytes: usize) -> i64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf[..nbytes]); + T::read_int(&buf[..nbytes], nbytes) + } + + /// Gets a signed n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); + /// assert_eq!(0x010203, buf.get_int_be(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_be(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(self, 8, BigEndian::read_int, nbytes); + } + + /// Gets a signed n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); + /// assert_eq!(0x010203, buf.get_int_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_le(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(self, 8, LittleEndian::read_int, nbytes); + } + + #[doc(hidden)] + #[deprecated(note="use get_f32_be or get_f32_le")] + fn get_f32(&mut self) -> f32 where Self: Sized { + let mut buf = [0; 4]; + self.copy_to_slice(&mut buf); + T::read_f32(&buf) + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello"); + /// assert_eq!(1.2f32, buf.get_f32_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_be(&mut self) -> f32 { + buf_get_impl!(self, 4, BigEndian::read_f32); + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x9A\x99\x99\x3F hello"); + /// assert_eq!(1.2f32, buf.get_f32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_le(&mut self) -> f32 { + buf_get_impl!(self, 4, LittleEndian::read_f32); + } + + #[doc(hidden)] + #[deprecated(note="use get_f64_be or get_f64_le")] + fn get_f64(&mut self) -> f64 where Self: Sized { let mut buf = [0; 8]; self.copy_to_slice(&mut buf); T::read_f64(&buf) } + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"); + /// assert_eq!(1.2f64, buf.get_f64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_be(&mut self) -> f64 { + buf_get_impl!(self, 8, BigEndian::read_f64); + } + + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"); + /// assert_eq!(1.2f64, buf.get_f64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_le(&mut self) -> f64 { + buf_get_impl!(self, 8, LittleEndian::read_f64); + } + /// Transforms a `Buf` into a concrete buffer. /// /// `collect()` can operate on any value that implements `Buf`, and turn it @@ -749,3 +1147,7 @@ impl Buf for Option<[u8; 1]> { } } } + +// The existance of this function makes the compiler catch if the Buf +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &Buf) {} diff --git a/third_party/rust/bytes/src/buf/buf_mut.rs b/third_party/rust/bytes/src/buf/buf_mut.rs index b03103a6193f..71dbda9afe6c 100644 --- a/third_party/rust/bytes/src/buf/buf_mut.rs +++ b/third_party/rust/bytes/src/buf/buf_mut.rs @@ -1,5 +1,5 @@ use super::{IntoBuf, Writer}; -use byteorder::ByteOrder; +use byteorder::{LittleEndian, ByteOrder, BigEndian}; use iovec::IoVec; use std::{cmp, io, ptr, usize}; @@ -338,41 +338,25 @@ pub trait BufMut { self.put_slice(&src) } - /// Writes an unsigned 16 bit integer to `self` in the specified byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, BigEndian}; - /// - /// let mut buf = vec![]; - /// buf.put_u16::(0x0809); - /// assert_eq!(buf, b"\x08\x09"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16(&mut self, n: u16) { + #[doc(hidden)] + #[deprecated(note="use put_u16_be or put_u16_le")] + fn put_u16(&mut self, n: u16) where Self: Sized { let mut buf = [0; 2]; T::write_u16(&mut buf, n); self.put_slice(&buf) } - /// Writes a signed 16 bit integer to `self` in the specified byte order. + /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. /// /// # Examples /// /// ``` - /// use bytes::{BufMut, BigEndian}; + /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_i16::(0x0809); + /// buf.put_u16_be(0x0809); /// assert_eq!(buf, b"\x08\x09"); /// ``` /// @@ -380,47 +364,111 @@ pub trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_i16(&mut self, n: i16) { + fn put_u16_be(&mut self, n: u16) { + let mut buf = [0; 2]; + BigEndian::write_u16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16_le(&mut self, n: u16) { + let mut buf = [0; 2]; + LittleEndian::write_u16(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i16_be or put_i16_le")] + fn put_i16(&mut self, n: i16) where Self: Sized { let mut buf = [0; 2]; T::write_i16(&mut buf, n); self.put_slice(&buf) } - /// Writes an unsigned 32 bit integer to `self` in the specified byte order. + /// Writes a signed 16 bit integer to `self` in big-endian byte order. /// - /// The current position is advanced by 4. + /// The current position is advanced by 2. /// /// # Examples /// /// ``` - /// use bytes::{BufMut, BigEndian}; + /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_u32::(0x0809A0A1); - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// buf.put_i16_be(0x0809); + /// assert_eq!(buf, b"\x08\x09"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_u32(&mut self, n: u32) { + fn put_i16_be(&mut self, n: i16) { + let mut buf = [0; 2]; + BigEndian::write_i16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16_le(&mut self, n: i16) { + let mut buf = [0; 2]; + LittleEndian::write_i16(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_u32_be or put_u32_le")] + fn put_u32(&mut self, n: u32) where Self: Sized { let mut buf = [0; 4]; T::write_u32(&mut buf, n); self.put_slice(&buf) } - /// Writes a signed 32 bit integer to `self` in the specified byte order. + /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` - /// use bytes::{BufMut, BigEndian}; + /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_i32::(0x0809A0A1); + /// buf.put_u32_be(0x0809A0A1); /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); /// ``` /// @@ -428,120 +476,440 @@ pub trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_i32(&mut self, n: i32) { + fn put_u32_be(&mut self, n: u32) { let mut buf = [0; 4]; - T::write_i32(&mut buf, n); + BigEndian::write_u32(&mut buf, n); self.put_slice(&buf) } - /// Writes an unsigned 64 bit integer to `self` in the specified byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, BigEndian}; - /// - /// let mut buf = vec![]; - /// buf.put_u64::(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64(&mut self, n: u64) { - let mut buf = [0; 8]; - T::write_u64(&mut buf, n); - self.put_slice(&buf) - } - - /// Writes a signed 64 bit integer to `self` in the specified byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, BigEndian}; - /// - /// let mut buf = vec![]; - /// buf.put_i64::(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64(&mut self, n: i64) { - let mut buf = [0; 8]; - T::write_i64(&mut buf, n); - self.put_slice(&buf) - } - - /// Writes an unsigned n-byte integer to `self` in the specified byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, BigEndian}; - /// - /// let mut buf = vec![]; - /// buf.put_uint::(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint(&mut self, n: u64, nbytes: usize) { - let mut buf = [0; 8]; - T::write_uint(&mut buf, n, nbytes); - self.put_slice(&buf[0..nbytes]) - } - - /// Writes a signed n-byte integer to `self` in the specified byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BufMut, BigEndian}; - /// - /// let mut buf = vec![]; - /// buf.put_int::(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_int(&mut self, n: i64, nbytes: usize) { - let mut buf = [0; 8]; - T::write_int(&mut buf, n, nbytes); - self.put_slice(&buf[0..nbytes]) - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in the specified byte order. + /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. /// /// The current position is advanced by 4. /// /// # Examples /// /// ``` - /// use bytes::{BufMut, BigEndian}; + /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_f32::(1.2f32); + /// buf.put_u32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32_le(&mut self, n: u32) { + let mut buf = [0; 4]; + LittleEndian::write_u32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i32_be or put_i32_le")] + fn put_i32(&mut self, n: i32) where Self: Sized { + let mut buf = [0; 4]; + T::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 32 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_be(0x0809A0A1); + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_be(&mut self, n: i32) { + let mut buf = [0; 4]; + BigEndian::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 32 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_le(&mut self, n: i32) { + let mut buf = [0; 4]; + LittleEndian::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_u64_be or put_u64_le")] + fn put_u64(&mut self, n: u64) where Self: Sized { + let mut buf = [0; 8]; + T::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_be(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_be(&mut self, n: u64) { + let mut buf = [0; 8]; + BigEndian::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_le(&mut self, n: u64) { + let mut buf = [0; 8]; + LittleEndian::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i64_be or put_i64_le")] + fn put_i64(&mut self, n: i64) where Self: Sized { + let mut buf = [0; 8]; + T::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_be(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_be(&mut self, n: i64) { + let mut buf = [0; 8]; + BigEndian::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_le(&mut self, n: i64) { + let mut buf = [0; 8]; + LittleEndian::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_be(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_u128_be(&mut self, n: u128) { + let mut buf = [0; 16]; + BigEndian::write_u128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_u128_le(&mut self, n: u128) { + let mut buf = [0; 16]; + LittleEndian::write_u128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 128 bit integer to `self` in the big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_be(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_i128_be(&mut self, n: i128) { + let mut buf = [0; 16]; + BigEndian::write_i128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 128 bit integer to `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_i128_le(&mut self, n: i128) { + let mut buf = [0; 16]; + LittleEndian::write_i128(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_uint_be or put_uint_le")] + fn put_uint(&mut self, n: u64, nbytes: usize) where Self: Sized { + let mut buf = [0; 8]; + T::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes an unsigned n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint_be(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint_be(&mut self, n: u64, nbytes: usize) { + let mut buf = [0; 8]; + BigEndian::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint_le(&mut self, n: u64, nbytes: usize) { + let mut buf = [0; 8]; + LittleEndian::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + #[doc(hidden)] + #[deprecated(note="use put_int_be or put_int_le")] + fn put_int(&mut self, n: i64, nbytes: usize) where Self: Sized { + let mut buf = [0; 8]; + T::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes a signed n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_be(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int_be(&mut self, n: i64, nbytes: usize) { + let mut buf = [0; 8]; + BigEndian::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes a signed n-byte integer to `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int_le(&mut self, n: i64, nbytes: usize) { + let mut buf = [0; 8]; + LittleEndian::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + #[doc(hidden)] + #[deprecated(note="use put_f32_be or put_f32_le")] + fn put_f32(&mut self, n: f32) where Self: Sized { + let mut buf = [0; 4]; + T::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_be(1.2f32); /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); /// ``` /// @@ -549,24 +917,57 @@ pub trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_f32(&mut self, n: f32) { + fn put_f32_be(&mut self, n: f32) { let mut buf = [0; 4]; - T::write_f32(&mut buf, n); + BigEndian::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_le(1.2f32); + /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32_le(&mut self, n: f32) { + let mut buf = [0; 4]; + LittleEndian::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_f64_be or put_f64_le")] + fn put_f64(&mut self, n: f64) where Self: Sized { + let mut buf = [0; 8]; + T::write_f64(&mut buf, n); self.put_slice(&buf) } /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in the specified byte order. + /// `self` in big-endian byte order. /// /// The current position is advanced by 8. /// /// # Examples /// /// ``` - /// use bytes::{BufMut, BigEndian}; + /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_f64::(1.2f64); + /// buf.put_f64_be(1.2f64); /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); /// ``` /// @@ -574,9 +975,34 @@ pub trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_f64(&mut self, n: f64) { + fn put_f64_be(&mut self, n: f64) { let mut buf = [0; 8]; - T::write_f64(&mut buf, n); + BigEndian::write_f64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64_le(1.2f64); + /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64_le(&mut self, n: f64) { + let mut buf = [0; 8]; + LittleEndian::write_f64(&mut buf, n); self.put_slice(&buf) } @@ -734,3 +1160,7 @@ impl BufMut for Vec { &mut slice::from_raw_parts_mut(ptr, cap)[len..] } } + +// The existance of this function makes the compiler catch if the BufMut +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &BufMut) {} diff --git a/third_party/rust/bytes/src/buf/into_buf.rs b/third_party/rust/bytes/src/buf/into_buf.rs index 1071908a2efc..4c3b4207289f 100644 --- a/third_party/rust/bytes/src/buf/into_buf.rs +++ b/third_party/rust/bytes/src/buf/into_buf.rs @@ -63,6 +63,14 @@ impl<'a> IntoBuf for &'a [u8] { } } +impl<'a> IntoBuf for &'a mut [u8] { + type Buf = io::Cursor<&'a mut [u8]>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(self) + } +} + impl<'a> IntoBuf for &'a str { type Buf = io::Cursor<&'a [u8]>; diff --git a/third_party/rust/bytes/src/bytes.rs b/third_party/rust/bytes/src/bytes.rs index c4cfcaab0567..89244dd40603 100644 --- a/third_party/rust/bytes/src/bytes.rs +++ b/third_party/rust/bytes/src/bytes.rs @@ -3,10 +3,11 @@ use buf::Iter; use debug; use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize}; -use std::borrow::Borrow; +use std::borrow::{Borrow, BorrowMut}; use std::io::Cursor; use std::sync::atomic::{self, AtomicUsize, AtomicPtr}; use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel}; +use std::iter::{FromIterator, Iterator}; /// A reference counted contiguous slice of memory. /// @@ -94,14 +95,15 @@ use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel}; /// # Inline bytes /// /// As an optimization, when the slice referenced by a `Bytes` or `BytesMut` -/// handle is small enough [1], `Bytes` will avoid the allocation by inlining -/// the slice directly in the handle. In this case, a clone is no longer -/// "shallow" and the data will be copied. +/// handle is small enough [^1], `with_capacity` will avoid the allocation by +/// inlining the slice directly in the handle. In this case, a clone is no +/// longer "shallow" and the data will be copied. Converting from a `Vec` will +/// never use inlining. /// -/// [1] Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems. +/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems. /// pub struct Bytes { - inner: Inner2, + inner: Inner, } /// A unique reference to a contiguous slice of memory. @@ -147,7 +149,7 @@ pub struct Bytes { /// assert_eq!(&b[..], b"hello"); /// ``` pub struct BytesMut { - inner: Inner2, + inner: Inner, } // Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated @@ -294,6 +296,8 @@ pub struct BytesMut { #[cfg(target_endian = "little")] #[repr(C)] struct Inner { + // WARNING: Do not access the fields directly unless you know what you are + // doing. Instead, use the fns. See implementation comment above. arc: AtomicPtr, ptr: *mut u8, len: usize, @@ -303,22 +307,14 @@ struct Inner { #[cfg(target_endian = "big")] #[repr(C)] struct Inner { + // WARNING: Do not access the fields directly unless you know what you are + // doing. Instead, use the fns. See implementation comment above. ptr: *mut u8, len: usize, cap: usize, arc: AtomicPtr, } -// This struct is only here to make older versions of Rust happy. In older -// versions of `Rust`, `repr(C)` structs could not have drop functions. While -// this is no longer the case for newer rust versions, a number of major Rust -// libraries still support older versions of Rust for which it is the case. To -// get around this, `Inner` (the actual struct) is wrapped by `Inner2` which has -// the drop fn implementation. -struct Inner2 { - inner: Inner, -} - // Thread-safe reference-counted container for the shared storage. This mostly // the same as `std::sync::Arc` but without the weak counter. The ref counting // fns are based on the ones found in `std`. @@ -330,7 +326,7 @@ struct Inner2 { // other shenanigans to make it work. struct Shared { vec: Vec, - original_capacity: usize, + original_capacity_repr: usize, ref_count: AtomicUsize, } @@ -341,7 +337,24 @@ const KIND_STATIC: usize = 0b10; const KIND_VEC: usize = 0b11; const KIND_MASK: usize = 0b11; -const MAX_ORIGINAL_CAPACITY: usize = 1 << 16; +// The max original capacity value. Any `Bytes` allocated with a greater initial +// capacity will default to this. +const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; +// The original capacity algorithm will not take effect unless the originally +// allocated capacity was at least 1kb in size. +const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; +// The original capacity is stored in powers of 2 starting at 1kb to a max of +// 64kb. Representing it as such requires only 3 bits of storage. +const ORIGINAL_CAPACITY_MASK: usize = 0b11100; +const ORIGINAL_CAPACITY_OFFSET: usize = 2; + +// When the storage is in the `Vec` representation, the pointer can be advanced +// at most this value. This is due to the amount of storage available to track +// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY +// bits. +const VEC_POS_OFFSET: usize = 5; +const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; +const NOT_VEC_POS_MASK: usize = 0b11111; // Bit op constants for extracting the inline length value from the `arc` field. const INLINE_LEN_MASK: usize = 0b11111100; @@ -356,6 +369,11 @@ const INLINE_DATA_OFFSET: isize = 1; #[cfg(target_endian = "big")] const INLINE_DATA_OFFSET: isize = 0; +#[cfg(target_pointer_width = "64")] +const PTR_WIDTH: usize = 64; +#[cfg(target_pointer_width = "32")] +const PTR_WIDTH: usize = 32; + // Inline buffer capacity. This is the size of `Inner` minus 1 byte for the // metadata. #[cfg(target_pointer_width = "64")] @@ -373,7 +391,7 @@ impl Bytes { /// Creates a new `Bytes` with the specified capacity. /// /// The returned `Bytes` will be able to hold at least `capacity` bytes - /// without reallocating. If `capacity` is under `3 * size_of::()`, + /// without reallocating. If `capacity` is under `4 * size_of::() - 1`, /// then `BytesMut` will not allocate. /// /// It is important to note that this function does not specify the length @@ -396,9 +414,7 @@ impl Bytes { #[inline] pub fn with_capacity(capacity: usize) -> Bytes { Bytes { - inner: Inner2 { - inner: Inner::with_capacity(capacity), - }, + inner: Inner::with_capacity(capacity), } } @@ -435,9 +451,7 @@ impl Bytes { #[inline] pub fn from_static(bytes: &'static [u8]) -> Bytes { Bytes { - inner: Inner2 { - inner: Inner::from_static(bytes), - } + inner: Inner::from_static(bytes), } } @@ -451,6 +465,7 @@ impl Bytes { /// let b = Bytes::from(&b"hello"[..]); /// assert_eq!(b.len(), 5); /// ``` + #[inline] pub fn len(&self) -> usize { self.inner.len() } @@ -465,6 +480,7 @@ impl Bytes { /// let b = Bytes::new(); /// assert!(b.is_empty()); /// ``` + #[inline] pub fn is_empty(&self) -> bool { self.inner.is_empty() } @@ -595,9 +611,7 @@ impl Bytes { } Bytes { - inner: Inner2 { - inner: self.inner.split_off(at), - } + inner: self.inner.split_off(at), } } @@ -636,9 +650,7 @@ impl Bytes { } Bytes { - inner: Inner2 { - inner: self.inner.split_to(at), - } + inner: self.inner.split_to(at), } } @@ -672,6 +684,22 @@ impl Bytes { self.inner.truncate(len); } + /// Shortens the buffer, dropping the first `cnt` bytes and keeping the + /// rest. + /// + /// This is the same function as `Buf::advance`, and in the next breaking + /// release of `bytes`, this implementation will be removed in favor of + /// having `Bytes` implement `Buf`. + /// + /// # Panics + /// + /// This function panics if `cnt` is greater than `self.len()` + #[inline] + pub fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.len(), "cannot advance past `remaining`"); + unsafe { self.inner.set_start(cnt); } + } + /// Clears the buffer, removing all data. /// /// # Examples @@ -785,9 +813,7 @@ impl<'a> IntoBuf for &'a Bytes { impl Clone for Bytes { fn clone(&self) -> Bytes { Bytes { - inner: Inner2 { - inner: self.inner.shallow_clone(), - } + inner: unsafe { self.inner.shallow_clone(false) }, } } } @@ -838,6 +864,28 @@ impl<'a> From<&'a str> for Bytes { } } +impl FromIterator for BytesMut { + fn from_iter>(into_iter: T) -> Self { + let iter = into_iter.into_iter(); + let (min, maybe_max) = iter.size_hint(); + + let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min)); + + for i in iter { + out.reserve(1); + out.put(i); + } + + out + } +} + +impl FromIterator for Bytes { + fn from_iter>(into_iter: T) -> Self { + BytesMut::from_iter(into_iter).freeze() + } +} + impl PartialEq for Bytes { fn eq(&self, other: &Bytes) -> bool { self.inner.as_ref() == other.inner.as_ref() @@ -945,7 +993,7 @@ impl BytesMut { /// Creates a new `BytesMut` with the specified capacity. /// /// The returned `BytesMut` will be able to hold at least `capacity` bytes - /// without reallocating. If `capacity` is under `3 * size_of::()`, + /// without reallocating. If `capacity` is under `4 * size_of::() - 1`, /// then `BytesMut` will not allocate. /// /// It is important to note that this function does not specify the length @@ -968,9 +1016,7 @@ impl BytesMut { #[inline] pub fn with_capacity(capacity: usize) -> BytesMut { BytesMut { - inner: Inner2 { - inner: Inner::with_capacity(capacity), - }, + inner: Inner::with_capacity(capacity), } } @@ -1100,9 +1146,7 @@ impl BytesMut { /// Panics if `at > capacity`. pub fn split_off(&mut self, at: usize) -> BytesMut { BytesMut { - inner: Inner2 { - inner: self.inner.split_off(at), - } + inner: self.inner.split_off(at), } } @@ -1170,9 +1214,7 @@ impl BytesMut { /// Panics if `at > len`. pub fn split_to(&mut self, at: usize) -> BytesMut { BytesMut { - inner: Inner2 { - inner: self.inner.split_to(at), - } + inner: self.inner.split_to(at), } } @@ -1206,6 +1248,22 @@ impl BytesMut { self.inner.truncate(len); } + /// Shortens the buffer, dropping the first `cnt` bytes and keeping the + /// rest. + /// + /// This is the same function as `Buf::advance`, and in the next breaking + /// release of `bytes`, this implementation will be removed in favor of + /// having `BytesMut` implement `Buf`. + /// + /// # Panics + /// + /// This function panics if `cnt` is greater than `self.len()` + #[inline] + pub fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.len(), "cannot advance past `remaining`"); + unsafe { self.inner.set_start(cnt); } + } + /// Clears the buffer, removing all data. /// /// # Examples @@ -1221,6 +1279,32 @@ impl BytesMut { self.truncate(0); } + /// Resizes the buffer so that `len` is equal to `new_len`. + /// + /// If `new_len` is greater than `len`, the buffer is extended by the + /// difference with each additional byte set to `value`. If `new_len` is + /// less than `len`, the buffer is simply truncated. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::new(); + /// + /// buf.resize(3, 0x1); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); + /// + /// buf.resize(2, 0x2); + /// assert_eq!(&buf[..], &[0x1, 0x1]); + /// + /// buf.resize(4, 0x3); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); + /// ``` + pub fn resize(&mut self, new_len: usize, value: u8) { + self.inner.resize(new_len, value); + } + /// Sets the length of the buffer. /// /// This will explicitly set the size of the buffer without actually @@ -1328,6 +1412,55 @@ impl BytesMut { self.reserve(extend.len()); self.put_slice(extend); } + + /// Combine splitted BytesMut objects back as contiguous. + /// + /// If `BytesMut` objects were not contiguous originally, they will be extended. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::with_capacity(64); + /// buf.extend_from_slice(b"aaabbbcccddd"); + /// + /// let splitted = buf.split_off(6); + /// assert_eq!(b"aaabbb", &buf[..]); + /// assert_eq!(b"cccddd", &splitted[..]); + /// + /// buf.unsplit(splitted); + /// assert_eq!(b"aaabbbcccddd", &buf[..]); + /// ``` + pub fn unsplit(&mut self, other: BytesMut) { + let ptr; + + if other.is_empty() { + return; + } + + if self.is_empty() { + *self = other; + return; + } + + unsafe { + ptr = self.inner.ptr.offset(self.inner.len as isize); + } + if ptr == other.inner.ptr && + self.inner.kind() == KIND_ARC && + other.inner.kind() == KIND_ARC + { + debug_assert_eq!(self.inner.arc.load(Acquire), + other.inner.arc.load(Acquire)); + // Contiguous blocks, just combine directly + self.inner.len += other.inner.len; + self.inner.cap += other.inner.cap; + } + else { + self.extend_from_slice(&other); + } + } } impl BufMut for BytesMut { @@ -1423,9 +1556,7 @@ impl ops::DerefMut for BytesMut { impl From> for BytesMut { fn from(src: Vec) -> BytesMut { BytesMut { - inner: Inner2 { - inner: Inner::from_vec(src), - }, + inner: Inner::from_vec(src), } } } @@ -1452,9 +1583,7 @@ impl<'a> From<&'a [u8]> for BytesMut { inner.as_raw()[0..len].copy_from_slice(src); BytesMut { - inner: Inner2 { - inner: inner, - } + inner: inner, } } } else { @@ -1523,6 +1652,12 @@ impl Borrow<[u8]> for BytesMut { } } +impl BorrowMut<[u8]> for BytesMut { + fn borrow_mut(&mut self) -> &mut [u8] { + self.as_mut() + } +} + impl fmt::Write for BytesMut { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { @@ -1616,8 +1751,8 @@ impl Inner { mem::forget(src); - let original_capacity = cmp::min(cap, MAX_ORIGINAL_CAPACITY); - let arc = (original_capacity & !KIND_MASK) | KIND_VEC; + let original_capacity_repr = original_capacity_to_repr(cap); + let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; Inner { arc: AtomicPtr::new(arc as *mut Shared), @@ -1632,10 +1767,9 @@ impl Inner { if capacity <= INLINE_CAP { unsafe { // Using uninitialized memory is ~30% faster - Inner { - arc: AtomicPtr::new(KIND_INLINE as *mut Shared), - .. mem::uninitialized() - } + let mut inner: Inner = mem::uninitialized(); + inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); + inner } } else { Inner::from_vec(Vec::with_capacity(capacity)) @@ -1727,8 +1861,8 @@ impl Inner { #[inline] fn set_inline_len(&mut self, len: usize) { debug_assert!(len <= INLINE_CAP); - let p: &mut usize = unsafe { mem::transmute(&mut self.arc) }; - *p = (*p & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET); + let p = self.arc.get_mut(); + *p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _; } /// slice. @@ -1758,7 +1892,7 @@ impl Inner { } fn split_off(&mut self, at: usize) -> Inner { - let mut other = self.shallow_clone(); + let mut other = unsafe { self.shallow_clone(true) }; unsafe { other.set_start(at); @@ -1769,7 +1903,7 @@ impl Inner { } fn split_to(&mut self, at: usize) -> Inner { - let mut other = self.shallow_clone(); + let mut other = unsafe { self.shallow_clone(true) }; unsafe { other.set_end(at); @@ -1785,20 +1919,33 @@ impl Inner { } } - unsafe fn set_start(&mut self, start: usize) { - // This function should never be called when the buffer is still backed - // by a `Vec` - debug_assert!(self.is_shared()); + fn resize(&mut self, new_len: usize, value: u8) { + let len = self.len(); + if new_len > len { + let additional = new_len - len; + self.reserve(additional); + unsafe { + let dst = self.as_raw()[len..].as_mut_ptr(); + ptr::write_bytes(dst, value, additional); + self.set_len(new_len); + } + } else { + self.truncate(new_len); + } + } + unsafe fn set_start(&mut self, start: usize) { // Setting the start to 0 is a no-op, so return early if this is the // case. if start == 0 { return; } + let kind = self.kind(); + // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. - if self.is_inline() { + if kind == KIND_INLINE { assert!(start <= INLINE_CAP); let len = self.inline_len(); @@ -1822,6 +1969,25 @@ impl Inner { } else { assert!(start <= self.cap); + if kind == KIND_VEC { + // Setting the start when in vec representation is a little more + // complicated. First, we have to track how far ahead the + // "start" of the byte buffer from the beginning of the vec. We + // also have to ensure that we don't exceed the maximum shift. + let (mut pos, prev) = self.uncoordinated_get_vec_pos(); + pos += start; + + if pos <= MAX_VEC_POS { + self.uncoordinated_set_vec_pos(pos, prev); + } else { + // The repr must be upgraded to ARC. This will never happen + // on 64 bit systems and will only happen on 32 bit systems + // when shifting past 134,217,727 bytes. As such, we don't + // worry too much about performance here. + let _ = self.shallow_clone(true); + } + } + // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. @@ -1869,131 +2035,158 @@ impl Inner { } else if kind == KIND_STATIC { false } else { - // The function requires `&mut self`, which guarantees a unique - // reference to the current handle. This means that the `arc` field - // *cannot* be concurrently mutated. As such, `Relaxed` ordering is - // fine (since we aren't synchronizing with anything). - let arc = self.arc.load(Relaxed); - // Otherwise, the underlying buffer is potentially shared with other // handles, so the ref_count needs to be checked. - unsafe { (*arc).is_unique() } + unsafe { (**self.arc.get_mut()).is_unique() } } } /// Increments the ref count. This should only be done if it is known that /// it can be done safely. As such, this fn is not public, instead other /// fns will use this one while maintaining the guarantees. - fn shallow_clone(&self) -> Inner { + /// Parameter `mut_self` should only be set to `true` if caller holds + /// `&mut self` reference. + /// + /// "Safely" is defined as not exposing two `BytesMut` values that point to + /// the same byte window. + /// + /// This function is thread safe. + unsafe fn shallow_clone(&self, mut_self: bool) -> Inner { // Always check `inline` first, because if the handle is using inline // data storage, all of the `Inner` struct fields will be gibberish. - if self.is_inline() { + // + // Additionally, if kind is STATIC, then Arc is *never* changed, making + // it safe and faster to check for it now before an atomic acquire. + + if self.is_inline_or_static() { // In this case, a shallow_clone still involves copying the data. - unsafe { - // TODO: Just copy the fields - let mut inner: Inner = mem::uninitialized(); - let len = self.inline_len(); - - inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); - inner.set_inline_len(len); - inner.as_raw()[0..len].copy_from_slice(self.as_ref()); - inner - } + let mut inner: Inner = mem::uninitialized(); + ptr::copy_nonoverlapping( + self, + &mut inner, + 1, + ); + inner } else { - // The function requires `&self`, this means that `shallow_clone` - // could be called concurrently. - // - // The first step is to load the value of `arc`. This will determine - // how to proceed. The `Acquire` ordering synchronizes with the - // `compare_and_swap` that comes later in this function. The goal is - // to ensure that if `arc` is currently set to point to a `Shared`, - // that the current thread acquires the associated memory. - let mut arc = self.arc.load(Acquire); - - // If the buffer is still tracked in a `Vec`. It is time to - // promote the vec to an `Arc`. This could potentially be called - // concurrently, so some care must be taken. - if arc as usize & KIND_MASK == KIND_VEC { - unsafe { - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let shared = Box::new(Shared { - vec: Vec::from_raw_parts(self.ptr, self.len, self.cap), - original_capacity: arc as usize & !KIND_MASK, - // Initialize refcount to 2. One for this reference, and one - // for the new clone that will be returned from - // `shallow_clone`. - ref_count: AtomicUsize::new(2), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!(0 == (shared as usize & 0b11)); - - // Try compare & swapping the pointer into the `arc` field. - // `Release` is used synchronize with other threads that - // will load the `arc` field. - // - // If the `compare_and_swap` fails, then the thread lost the - // race to promote the buffer to shared. The `Acquire` - // ordering will synchronize with the `compare_and_swap` - // that happened in the other thread and the `Shared` - // pointed to by `actual` will be visible. - let actual = self.arc.compare_and_swap(arc, shared, AcqRel); - - if actual == arc { - // The upgrade was successful, the new handle can be - // returned. - return Inner { - arc: AtomicPtr::new(shared), - .. *self - }; - } - - // The upgrade failed, a concurrent clone happened. Release - // the allocation that was made in this thread, it will not - // be needed. - let shared: Box = mem::transmute(shared); - mem::forget(*shared); - - // Update the `arc` local variable and fall through to a ref - // count update - arc = actual; - } - } else if arc as usize & KIND_MASK == KIND_STATIC { - // Static buffer - return Inner { - arc: AtomicPtr::new(arc), - .. *self - }; - } - - // Buffer already promoted to shared storage, so increment ref - // count. - unsafe { - // Relaxed ordering is acceptable as the memory has already been - // acquired via the `Acquire` load above. - let old_size = (*arc).ref_count.fetch_add(1, Relaxed); - - if old_size == usize::MAX { - panic!(); // TODO: abort - } - } - - Inner { - arc: AtomicPtr::new(arc), - .. *self - } + self.shallow_clone_sync(mut_self) } } + + #[cold] + unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner { + // The function requires `&self`, this means that `shallow_clone` + // could be called concurrently. + // + // The first step is to load the value of `arc`. This will determine + // how to proceed. The `Acquire` ordering synchronizes with the + // `compare_and_swap` that comes later in this function. The goal is + // to ensure that if `arc` is currently set to point to a `Shared`, + // that the current thread acquires the associated memory. + let arc = self.arc.load(Acquire); + let kind = arc as usize & KIND_MASK; + + if kind == KIND_ARC { + self.shallow_clone_arc(arc) + } else { + assert!(kind == KIND_VEC); + self.shallow_clone_vec(arc as usize, mut_self) + } + } + + unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner { + debug_assert!(arc as usize & KIND_MASK == KIND_ARC); + + let old_size = (*arc).ref_count.fetch_add(1, Relaxed); + + if old_size == usize::MAX { + abort(); + } + + Inner { + arc: AtomicPtr::new(arc), + .. *self + } + } + + #[cold] + unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner { + // If the buffer is still tracked in a `Vec`. It is time to + // promote the vec to an `Arc`. This could potentially be called + // concurrently, so some care must be taken. + + debug_assert!(arc & KIND_MASK == KIND_VEC); + + let original_capacity_repr = + (arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; + + // The vec offset cannot be concurrently mutated, so there + // should be no danger reading it. + let off = (arc as usize) >> VEC_POS_OFFSET; + + // First, allocate a new `Shared` instance containing the + // `Vec` fields. It's important to note that `ptr`, `len`, + // and `cap` cannot be mutated without having `&mut self`. + // This means that these fields will not be concurrently + // updated and since the buffer hasn't been promoted to an + // `Arc`, those three fields still are the components of the + // vector. + let shared = Box::new(Shared { + vec: rebuild_vec(self.ptr, self.len, self.cap, off), + original_capacity_repr: original_capacity_repr, + // Initialize refcount to 2. One for this reference, and one + // for the new clone that will be returned from + // `shallow_clone`. + ref_count: AtomicUsize::new(2), + }); + + let shared = Box::into_raw(shared); + + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!(0 == (shared as usize & 0b11)); + + // If there are no references to self in other threads, + // expensive atomic operations can be avoided. + if mut_self { + self.arc.store(shared, Relaxed); + return Inner { + arc: AtomicPtr::new(shared), + .. *self + }; + } + + // Try compare & swapping the pointer into the `arc` field. + // `Release` is used synchronize with other threads that + // will load the `arc` field. + // + // If the `compare_and_swap` fails, then the thread lost the + // race to promote the buffer to shared. The `Acquire` + // ordering will synchronize with the `compare_and_swap` + // that happened in the other thread and the `Shared` + // pointed to by `actual` will be visible. + let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel); + + if actual as usize == arc { + // The upgrade was successful, the new handle can be + // returned. + return Inner { + arc: AtomicPtr::new(shared), + .. *self + }; + } + + // The upgrade failed, a concurrent clone happened. Release + // the allocation that was made in this thread, it will not + // be needed. + let shared = Box::from_raw(shared); + mem::forget(*shared); + + // Buffer already promoted to shared storage, so increment ref + // count. + self.shallow_clone_arc(actual) + } + #[inline] fn reserve(&mut self, additional: usize) { let len = self.len(); @@ -2029,27 +2222,47 @@ impl Inner { } if kind == KIND_VEC { - // Currently backed by a vector, so just use `Vector::reserve`. + // If there's enough free space before the start of the buffer, then + // just copy the data backwards and reuse the already-allocated + // space. + // + // Otherwise, since backed by a vector, use `Vec::reserve` unsafe { - let mut v = Vec::from_raw_parts(self.ptr, self.len, self.cap); - v.reserve(additional); + let (off, prev) = self.uncoordinated_get_vec_pos(); - // Update the info - self.ptr = v.as_mut_ptr(); - self.len = v.len(); - self.cap = v.capacity(); + // Only reuse space if we stand to gain at least capacity/2 + // bytes of space back + if off >= additional && off >= (self.cap / 2) { + // There's space - reuse it + // + // Just move the pointer back to the start after copying + // data back. + let base_ptr = self.ptr.offset(-(off as isize)); + ptr::copy(self.ptr, base_ptr, self.len); + self.ptr = base_ptr; + self.uncoordinated_set_vec_pos(0, prev); - // Drop the vec reference - mem::forget(v); + // Length stays constant, but since we moved backwards we + // can gain capacity back. + self.cap += off; + } else { + // No space - allocate more + let mut v = rebuild_vec(self.ptr, self.len, self.cap, off); + v.reserve(additional); + // Update the info + self.ptr = v.as_mut_ptr().offset(off as isize); + self.len = v.len() - off; + self.cap = v.capacity() - off; + + // Drop the vec reference + mem::forget(v); + } return; } } - // `Relaxed` is Ok here (and really, no synchronization is necessary) - // due to having a `&mut self` pointer. The `&mut self` pointer ensures - // that there is no concurrent access on `self`. - let arc = self.arc.load(Relaxed); + let arc = *self.arc.get_mut(); debug_assert!(kind == KIND_ARC); @@ -2059,9 +2272,11 @@ impl Inner { // Compute the new capacity let mut new_cap = len + additional; let original_capacity; + let original_capacity_repr; unsafe { - original_capacity = (*arc).original_capacity; + original_capacity_repr = (*arc).original_capacity_repr; + original_capacity = original_capacity_from_repr(original_capacity_repr); // First, try to reclaim the buffer. This is possible if the current // handle is the only outstanding handle pointing to the buffer. @@ -2114,7 +2329,7 @@ impl Inner { self.len = v.len(); self.cap = v.capacity(); - let arc = (original_capacity & !KIND_MASK) | KIND_VEC; + let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; self.arc = AtomicPtr::new(arc as *mut Shared); @@ -2128,6 +2343,18 @@ impl Inner { self.kind() == KIND_INLINE } + #[inline] + fn is_inline_or_static(&self) -> bool { + // The value returned by `kind` isn't itself safe, but the value could + // inform what operations to take, and unsafely do something without + // synchronization. + // + // KIND_INLINE and KIND_STATIC will *never* change, so branches on that + // information is safe. + let kind = self.kind(); + kind == KIND_INLINE || kind == KIND_STATIC + } + /// Used for `debug_assert` statements. &mut is used to guarantee that it is /// safe to check VEC_KIND #[inline] @@ -2187,21 +2414,56 @@ impl Inner { imp(&self.arc) } + + #[inline] + fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) { + // Similar to above, this is a pretty crazed function. This should only + // be called when in the KIND_VEC mode. This + the &mut self argument + // guarantees that there is no possibility of concurrent calls to this + // function. + let prev = unsafe { + let p: &AtomicPtr = &self.arc; + let p: &usize = mem::transmute(p); + *p + }; + + (prev >> VEC_POS_OFFSET, prev) + } + + #[inline] + fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) { + // Once more... crazy + debug_assert!(pos <= MAX_VEC_POS); + + unsafe { + let p: &mut AtomicPtr = &mut self.arc; + let p: &mut usize = mem::transmute(p); + *p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK); + } + } } -impl Drop for Inner2 { +fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { + unsafe { + let ptr = ptr.offset(-(off as isize)); + len += off; + cap += off; + + Vec::from_raw_parts(ptr, len, cap) + } +} + +impl Drop for Inner { fn drop(&mut self) { let kind = self.kind(); if kind == KIND_VEC { + let (off, _) = self.uncoordinated_get_vec_pos(); + // Vector storage, free the vector - unsafe { - let _ = Vec::from_raw_parts(self.ptr, self.len, self.cap); - } + let _ = rebuild_vec(self.ptr, self.len, self.cap, off); } else if kind == KIND_ARC { - // &mut self guarantees correct ordering - let arc = self.arc.load(Relaxed); - release_shared(arc); + release_shared(*self.arc.get_mut()); } } } @@ -2233,7 +2495,7 @@ fn release_shared(ptr: *mut Shared) { atomic::fence(Acquire); // Drop the data - let _: Box = mem::transmute(ptr); + Box::from_raw(ptr); } } @@ -2253,31 +2515,55 @@ impl Shared { } } +fn original_capacity_to_repr(cap: usize) -> usize { + let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); + cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH) +} + +fn original_capacity_from_repr(repr: usize) -> usize { + if repr == 0 { + return 0; + } + + 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) +} + +#[test] +fn test_original_capacity_to_repr() { + for &cap in &[0, 1, 16, 1000] { + assert_eq!(0, original_capacity_to_repr(cap)); + } + + for &cap in &[1024, 1025, 1100, 2000, 2047] { + assert_eq!(1, original_capacity_to_repr(cap)); + } + + for &cap in &[2048, 2049] { + assert_eq!(2, original_capacity_to_repr(cap)); + } + + // TODO: more + + for &cap in &[65536, 65537, 68000, 1 << 17, 1 << 18, 1 << 20, 1 << 30] { + assert_eq!(7, original_capacity_to_repr(cap), "cap={}", cap); + } +} + +#[test] +fn test_original_capacity_from_repr() { + assert_eq!(0, original_capacity_from_repr(0)); + assert_eq!(1024, original_capacity_from_repr(1)); + assert_eq!(1024 * 2, original_capacity_from_repr(2)); + assert_eq!(1024 * 4, original_capacity_from_repr(3)); + assert_eq!(1024 * 8, original_capacity_from_repr(4)); + assert_eq!(1024 * 16, original_capacity_from_repr(5)); + assert_eq!(1024 * 32, original_capacity_from_repr(6)); + assert_eq!(1024 * 64, original_capacity_from_repr(7)); +} + unsafe impl Send for Inner {} unsafe impl Sync for Inner {} -/* - * - * ===== impl Inner2 ===== - * - */ - -impl ops::Deref for Inner2 { - type Target = Inner; - - #[inline] - fn deref(&self) -> &Inner { - &self.inner - } -} - -impl ops::DerefMut for Inner2 { - #[inline] - fn deref_mut(&mut self) -> &mut Inner { - &mut self.inner - } -} - /* * * ===== PartialEq / PartialOrd ===== @@ -2569,3 +2855,21 @@ impl PartialEq for BytesMut &other[..] == &self[..] } } + +// While there is `std::process:abort`, it's only available in Rust 1.17, and +// our minimum supported version is currently 1.15. So, this acts as an abort +// by triggering a double panic, which always aborts in Rust. +struct Abort; + +impl Drop for Abort { + fn drop(&mut self) { + panic!(); + } +} + +#[inline(never)] +#[cold] +fn abort() { + let _a = Abort; + panic!(); +} diff --git a/third_party/rust/bytes/src/debug.rs b/third_party/rust/bytes/src/debug.rs index abead058a9b6..f8b830a24110 100644 --- a/third_party/rust/bytes/src/debug.rs +++ b/third_party/rust/bytes/src/debug.rs @@ -27,8 +27,8 @@ impl<'a> fmt::Debug for BsDebug<'a> { try!(write!(fmt, "\\{}", c as char)); } else if c == b'\0' { try!(write!(fmt, "\\0")); - // ASCII printable except space - } else if c > 0x20 && c < 0x7f { + // ASCII printable + } else if c >= 0x20 && c < 0x7f { try!(write!(fmt, "{}", c as char)); } else { try!(write!(fmt, "\\x{:02x}", c)); diff --git a/third_party/rust/bytes/src/lib.rs b/third_party/rust/bytes/src/lib.rs index fbe65721ac72..eccb8a3806db 100644 --- a/third_party/rust/bytes/src/lib.rs +++ b/third_party/rust/bytes/src/lib.rs @@ -18,8 +18,8 @@ //! using a reference count to track when the memory is no longer needed and can //! be freed. //! -//! A `Bytes` handle can be created directly from an existing byte store (such as &[u8] -//! or Vec), but usually a `BytesMut` is used first and written to. For +//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` +//! or `Vec`), but usually a `BytesMut` is used first and written to. For //! example: //! //! ```rust @@ -69,7 +69,7 @@ //! and `BufMut` are infallible. #![deny(warnings, missing_docs, missing_debug_implementations)] -#![doc(html_root_url = "https://docs.rs/bytes/0.4")] +#![doc(html_root_url = "https://docs.rs/bytes/0.4.9")] extern crate byteorder; extern crate iovec; @@ -92,6 +92,7 @@ mod bytes; mod debug; pub use bytes::{Bytes, BytesMut}; +#[deprecated] pub use byteorder::{ByteOrder, BigEndian, LittleEndian}; // Optional Serde support diff --git a/third_party/rust/bytes/tests/test_buf.rs b/third_party/rust/bytes/tests/test_buf.rs index 5a1baedf5c3b..f25c25f2b5de 100644 --- a/third_party/rust/bytes/tests/test_buf.rs +++ b/third_party/rust/bytes/tests/test_buf.rs @@ -33,21 +33,26 @@ fn test_get_u8() { #[test] fn test_get_u16() { let buf = b"\x21\x54zomg"; - assert_eq!(0x2154, Cursor::new(buf).get_u16::()); - assert_eq!(0x5421, Cursor::new(buf).get_u16::()); + assert_eq!(0x2154, Cursor::new(buf).get_u16_be()); + assert_eq!(0x5421, Cursor::new(buf).get_u16_le()); } #[test] #[should_panic] fn test_get_u16_buffer_underflow() { let mut buf = Cursor::new(b"\x21"); - buf.get_u16::(); + buf.get_u16_be(); } #[test] fn test_bufs_vec() { let buf = Cursor::new(b"hello world"); - let mut dst: [&IoVec; 2] = Default::default(); + + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + + let mut dst: [&IoVec; 2] = + [b1.into(), b2.into()]; assert_eq!(1, buf.bytes_vec(&mut dst[..])); } diff --git a/third_party/rust/bytes/tests/test_buf_mut.rs b/third_party/rust/bytes/tests/test_buf_mut.rs index 896e31df0729..2c8faa1043fb 100644 --- a/third_party/rust/bytes/tests/test_buf_mut.rs +++ b/third_party/rust/bytes/tests/test_buf_mut.rs @@ -41,11 +41,11 @@ fn test_put_u8() { #[test] fn test_put_u16() { let mut buf = Vec::with_capacity(8); - buf.put_u16::(8532); + buf.put_u16_be(8532); assert_eq!(b"\x21\x54", &buf[..]); buf.clear(); - buf.put_u16::(8532); + buf.put_u16_le(8532); assert_eq!(b"\x54\x21", &buf[..]); } diff --git a/third_party/rust/bytes/tests/test_bytes.rs b/third_party/rust/bytes/tests/test_bytes.rs index 1c8ccd0b1472..c0cba6b76759 100644 --- a/third_party/rust/bytes/tests/test_bytes.rs +++ b/third_party/rust/bytes/tests/test_bytes.rs @@ -1,6 +1,6 @@ extern crate bytes; -use bytes::{Bytes, BytesMut, BufMut}; +use bytes::{Bytes, BytesMut, BufMut, IntoBuf}; const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; const SHORT: &'static [u8] = b"hello world"; @@ -303,6 +303,13 @@ fn fns_defined_for_bytes_mut() { assert_eq!(&v[..], bytes); } +#[test] +fn mut_into_buf() { + let mut v = vec![0, 0, 0, 0]; + let s = &mut v[..]; + s.into_buf().put_u32_le(42); +} + #[test] fn reserve_convert() { // Inline -> Vec @@ -350,16 +357,16 @@ fn reserve_growth() { #[test] fn reserve_allocates_at_least_original_capacity() { - let mut bytes = BytesMut::with_capacity(128); + let mut bytes = BytesMut::with_capacity(1024); - for i in 0..120 { + for i in 0..1020 { bytes.put(i as u8); } let _other = bytes.take(); bytes.reserve(16); - assert_eq!(bytes.capacity(), 128); + assert_eq!(bytes.capacity(), 1024); } #[test] @@ -378,6 +385,21 @@ fn reserve_max_original_capacity_value() { assert_eq!(bytes.capacity(), 64 * 1024); } +// Without either looking at the internals of the BytesMut or doing weird stuff +// with the memory allocator, there's no good way to automatically verify from +// within the program that this actually recycles memory. Instead, just exercise +// the code path to ensure that the results are correct. +#[test] +fn reserve_vec_recycling() { + let mut bytes = BytesMut::from(Vec::with_capacity(16)); + assert_eq!(bytes.capacity(), 16); + bytes.put("0123456789012345"); + bytes.advance(10); + assert_eq!(bytes.capacity(), 6); + bytes.reserve(8); + assert_eq!(bytes.capacity(), 16); +} + #[test] fn reserve_in_arc_unique_does_not_overallocate() { let mut bytes = BytesMut::with_capacity(1000); @@ -466,6 +488,44 @@ fn from_static() { assert_eq!(b, b"b"[..]); } +#[test] +fn advance_inline() { + let mut a = Bytes::from(&b"hello world"[..]); + a.advance(6); + assert_eq!(a, &b"world"[..]); +} + +#[test] +fn advance_static() { + let mut a = Bytes::from_static(b"hello world"); + a.advance(6); + assert_eq!(a, &b"world"[..]); +} + +#[test] +fn advance_vec() { + let mut a = BytesMut::from(b"hello world boooo yah world zomg wat wat".to_vec()); + a.advance(16); + assert_eq!(a, b"o yah world zomg wat wat"[..]); + + a.advance(4); + assert_eq!(a, b"h world zomg wat wat"[..]); + + // Reserve some space. + a.reserve(1024); + assert_eq!(a, b"h world zomg wat wat"[..]); + + a.advance(6); + assert_eq!(a, b"d zomg wat wat"[..]); +} + +#[test] +#[should_panic] +fn advance_past_len() { + let mut a = BytesMut::from(b"hello world".to_vec()); + a.advance(20); +} + #[test] // Only run these tests on little endian systems. CI uses qemu for testing // little endian... and qemu doesn't really support threading all that well. @@ -514,3 +574,146 @@ fn partial_eq_bytesmut() { assert!(bytes2 != bytesmut); assert!(bytesmut != bytes2); } + +#[test] +fn unsplit_basic() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + let splitted = buf.split_off(6); + assert_eq!(b"aaabbb", &buf[..]); + assert_eq!(b"cccddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_empty_other() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + // empty other + let other = BytesMut::new(); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_empty_self() { + // empty self + let mut buf = BytesMut::new(); + + let mut other = BytesMut::with_capacity(64); + other.extend_from_slice(b"aaabbbcccddd"); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_inline_arc() { + let mut buf = BytesMut::with_capacity(8); //inline + buf.extend_from_slice(b"aaaabbbb"); + + let mut buf2 = BytesMut::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_arc_inline() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + buf.split_off(8); //arc + + let mut buf2 = BytesMut::with_capacity(8); //inline + buf2.extend_from_slice(b"ccccdddd"); + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); + +} + +#[test] +fn unsplit_both_inline() { + let mut buf = BytesMut::with_capacity(16); //inline + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let splitted = buf.split_off(8); // both inline + assert_eq!(b"aaaabbbb", &buf[..]); + assert_eq!(b"ccccdddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + + +#[test] +fn unsplit_arc_different() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + buf.split_off(8); //arc + + let mut buf2 = BytesMut::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_arc_non_contiguous() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + + let buf3 = buf2.split_off(4); //arc + + buf.unsplit(buf3); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_two_split_offs() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + let buf3 = buf2.split_off(4); //arc + + buf2.unsplit(buf3); + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn from_iter_no_size_hint() { + use std::iter; + + let mut expect = vec![]; + + let actual: Bytes = iter::repeat(b'x') + .scan(100, |cnt, item| { + if *cnt >= 1 { + *cnt -= 1; + expect.push(item); + Some(item) + } else { + None + } + }) + .collect(); + + assert_eq!(&actual[..], &expect[..]); +} diff --git a/third_party/rust/bytes/tests/test_chain.rs b/third_party/rust/bytes/tests/test_chain.rs index 002df590f024..2789e7c0602d 100644 --- a/third_party/rust/bytes/tests/test_chain.rs +++ b/third_party/rust/bytes/tests/test_chain.rs @@ -55,48 +55,68 @@ fn vectored_read() { let mut buf = a.chain(b); { - let mut iovecs: [&IoVec; 4] = Default::default(); + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(2, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"hello"[..]); assert_eq!(iovecs[1][..], b"world"[..]); - assert!(iovecs[2].is_empty()); - assert!(iovecs[3].is_empty()); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(2); { - let mut iovecs: [&IoVec; 4] = Default::default(); + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(2, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"llo"[..]); assert_eq!(iovecs[1][..], b"world"[..]); - assert!(iovecs[2].is_empty()); - assert!(iovecs[3].is_empty()); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(3); { - let mut iovecs: [&IoVec; 4] = Default::default(); + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(1, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"world"[..]); - assert!(iovecs[1].is_empty()); - assert!(iovecs[2].is_empty()); - assert!(iovecs[3].is_empty()); + assert_eq!(iovecs[1][..], b"\0"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); } buf.advance(3); { - let mut iovecs: [&IoVec; 4] = Default::default(); + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; assert_eq!(1, buf.bytes_vec(&mut iovecs)); assert_eq!(iovecs[0][..], b"ld"[..]); - assert!(iovecs[1].is_empty()); - assert!(iovecs[2].is_empty()); - assert!(iovecs[3].is_empty()); + assert_eq!(iovecs[1][..], b"\0"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); } } diff --git a/third_party/rust/bytes/tests/test_debug.rs b/third_party/rust/bytes/tests/test_debug.rs index eec7bca850a0..9945a2835bb1 100644 --- a/third_party/rust/bytes/tests/test_debug.rs +++ b/third_party/rust/bytes/tests/test_debug.rs @@ -11,7 +11,7 @@ fn fmt() { \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ - \\x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ + \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ diff --git a/third_party/rust/cookie/.cargo-checksum.json b/third_party/rust/cookie/.cargo-checksum.json index ef2d3da857f1..f1027f08bf9f 100644 --- a/third_party/rust/cookie/.cargo-checksum.json +++ b/third_party/rust/cookie/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"d2a9bb7c029e8ed0acfb8dc8e786014cfa4f053b6f4c525303d69fd7e28704e9","Cargo.toml":"276e89e8f02c785f020dc5c6035de314e4d1279f9a83d6654f9a689dab5c6234","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"d4860822f8c84f3a91d8c55f600bcf86453518a778f753d2c389debe5c4ad5fa","src/builder.rs":"528640f717f5769e522a9ac066a994c21973ff3a5e9359d087f410233887c83c","src/delta.rs":"510fc3dbf0a70d635d0488c5a5a32a2ba8e1490ce05bee39d944ea8c02189bbc","src/draft.rs":"bd11960db08f4e4368937845fc18b842e474391738e4457a3441df2789c9d320","src/jar.rs":"98237c4a37143e08bcb6e84c5ed69b799a8a08f89a1b83f02c425cc92b089252","src/lib.rs":"ffe4f6eaa10002c06fd52c52af1d28006a4aa7320ea302d417b244704c938e02","src/parse.rs":"ee46cee7fa445e6545f29eac3eac81e76ec29e9c53e000195af427c7315ee11c","src/secure/key.rs":"734f35ef4b0d6b63174befdcb970f0304ac63f0895871b7c2f267fefdd43b648","src/secure/macros.rs":"83d770e5c4eb7fbd3c3d86973b69042e9e2bb9fafb72a4456598e2ae78638d5f","src/secure/mod.rs":"5d7fecb62295827d474ed1ce6b7628fe93d4a09eb14babfde036d64e8e4a04f8","src/secure/private.rs":"ee114d603a7b97e6f78c09a3612be0afa2ff7aca5d68d728336797c8a36e8000","src/secure/signed.rs":"8440c9ce5a0be4e162fb502cd1fbe24572ce00709f5554c45f8bece39637590d"},"package":"746858cae4eae40fff37e1998320068df317bc247dc91a67c6cfa053afdc2abb"} \ No newline at end of file +{"files":{".travis.yml":"d2a9bb7c029e8ed0acfb8dc8e786014cfa4f053b6f4c525303d69fd7e28704e9","Cargo.toml":"6a8f9c03d5260359e497a70910ab444f32b51551e9c0aaffabcfbbb2dd7c906d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"9205f5b7d179b5ca140ec7322c5894540aef149a38bd55874bef9de5a48e0938","src/builder.rs":"4200963d44d1a59f1268965b77407ba977eb5a777875cb76ea927ddc829be3d8","src/delta.rs":"510fc3dbf0a70d635d0488c5a5a32a2ba8e1490ce05bee39d944ea8c02189bbc","src/draft.rs":"950b43b3f6e1c4c13b1e90220c71defe02713170807b41e5ffde9a1327688f48","src/jar.rs":"0e8a6e2f0426834101bd9608baf9f695839053523e9e9ac58aea03a73506b8fb","src/lib.rs":"963ff56045a4ee22e280ee24a42efc9b1d6a96de30d3856b39287ec2b51b00db","src/parse.rs":"549844993601f20f5de3f5d5f8bea0fce3fe4f09d72e343aff9e433948a4ec5c","src/secure/key.rs":"734f35ef4b0d6b63174befdcb970f0304ac63f0895871b7c2f267fefdd43b648","src/secure/macros.rs":"83d770e5c4eb7fbd3c3d86973b69042e9e2bb9fafb72a4456598e2ae78638d5f","src/secure/mod.rs":"5d7fecb62295827d474ed1ce6b7628fe93d4a09eb14babfde036d64e8e4a04f8","src/secure/private.rs":"bea61d91772285e0db7c234bda32d9e95ce386dba5cab640859531d72f13628c","src/secure/signed.rs":"26c46c2d561ea14d1d8d79f85342a98b4bd749df776677dde91dd9b928e91fbe"},"package":"1465f8134efa296b4c19db34d909637cb2bf0f7aaf21299e23e18fa29ac557cf"} \ No newline at end of file diff --git a/third_party/rust/cookie/Cargo.toml b/third_party/rust/cookie/Cargo.toml index 84aa9e24e043..8b2f89d1790d 100644 --- a/third_party/rust/cookie/Cargo.toml +++ b/third_party/rust/cookie/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "cookie" -version = "0.10.1" +version = "0.11.0" authors = ["Alex Crichton ", "Sergio Benitez "] description = "Crate for parsing HTTP cookie headers and managing a cookie jar. Supports signed\nand private (encrypted + signed) jars.\n" documentation = "https://docs.rs/cookie" @@ -20,21 +20,21 @@ license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/cookie-rs" [package.metadata.docs.rs] all-features = true -[dependencies.time] -version = "0.1" +[dependencies.base64] +version = "0.9.0" +optional = true [dependencies.ring] -version = "0.12.0" +version = "0.13.0" optional = true -[dependencies.base64] -version = "0.6.0" -optional = true +[dependencies.time] +version = "0.1" [dependencies.url] version = "1.0" optional = true [features] -secure = ["ring", "base64"] percent-encode = ["url"] +secure = ["ring", "base64"] diff --git a/third_party/rust/cookie/README.md b/third_party/rust/cookie/README.md index 81777d48915b..3ff2827f6f58 100644 --- a/third_party/rust/cookie/README.md +++ b/third_party/rust/cookie/README.md @@ -18,9 +18,17 @@ See the [documentation](http://docs.rs/cookie) for detailed usage information. # License -`cookie-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. +This project is licensed under either of -See [LICENSE-APACHE](LICENSE-APACHE), and [LICENSE-MIT](LICENSE-MIT) for -details. + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `cookie-rs` by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/cookie/src/builder.rs b/third_party/rust/cookie/src/builder.rs index 9d0cbc7bde08..7145be5a480c 100644 --- a/third_party/rust/cookie/src/builder.rs +++ b/third_party/rust/cookie/src/builder.rs @@ -154,7 +154,7 @@ impl CookieBuilder { /// .secure(true) /// .finish(); /// - /// assert_eq!(c.secure(), true); + /// assert_eq!(c.secure(), Some(true)); /// ``` #[inline] pub fn secure(mut self, value: bool) -> CookieBuilder { @@ -173,7 +173,7 @@ impl CookieBuilder { /// .http_only(true) /// .finish(); /// - /// assert_eq!(c.http_only(), true); + /// assert_eq!(c.http_only(), Some(true)); /// ``` #[inline] pub fn http_only(mut self, value: bool) -> CookieBuilder { diff --git a/third_party/rust/cookie/src/draft.rs b/third_party/rust/cookie/src/draft.rs index bf504cfb2221..ed7729d9dcb0 100644 --- a/third_party/rust/cookie/src/draft.rs +++ b/third_party/rust/cookie/src/draft.rs @@ -10,6 +10,8 @@ use std::fmt; /// attribute is "Strict", then the cookie is never sent in cross-site requests. /// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site /// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`. +/// If the `SameSite` attribute is not present (made explicit via the +/// `SameSite::None` variant), then the cookie will be sent as normal. /// /// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition /// are subject to change. @@ -18,7 +20,9 @@ pub enum SameSite { /// The "Strict" `SameSite` attribute. Strict, /// The "Lax" `SameSite` attribute. - Lax + Lax, + /// No `SameSite` attribute. + None } impl SameSite { @@ -32,12 +36,13 @@ impl SameSite { /// let strict = SameSite::Strict; /// assert!(strict.is_strict()); /// assert!(!strict.is_lax()); + /// assert!(!strict.is_none()); /// ``` #[inline] pub fn is_strict(&self) -> bool { match *self { SameSite::Strict => true, - SameSite::Lax => false + SameSite::Lax | SameSite::None => false, } } @@ -51,12 +56,33 @@ impl SameSite { /// let lax = SameSite::Lax; /// assert!(lax.is_lax()); /// assert!(!lax.is_strict()); + /// assert!(!lax.is_none()); /// ``` #[inline] pub fn is_lax(&self) -> bool { match *self { - SameSite::Strict => false, - SameSite::Lax => true + SameSite::Lax => true, + SameSite::Strict | SameSite::None => false, + } + } + + /// Returns `true` if `self` is `SameSite::None` and `false` otherwise. + /// + /// # Example + /// + /// ```rust + /// use cookie::SameSite; + /// + /// let none = SameSite::None; + /// assert!(none.is_none()); + /// assert!(!none.is_lax()); + /// assert!(!none.is_strict()); + /// ``` + #[inline] + pub fn is_none(&self) -> bool { + match *self { + SameSite::None => true, + SameSite::Lax | SameSite::Strict => false } } } @@ -65,7 +91,8 @@ impl fmt::Display for SameSite { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { SameSite::Strict => write!(f, "Strict"), - SameSite::Lax => write!(f, "Lax") + SameSite::Lax => write!(f, "Lax"), + SameSite::None => Ok(()), } } } diff --git a/third_party/rust/cookie/src/jar.rs b/third_party/rust/cookie/src/jar.rs index c2fed8004593..b6231fd6892d 100644 --- a/third_party/rust/cookie/src/jar.rs +++ b/third_party/rust/cookie/src/jar.rs @@ -121,10 +121,13 @@ impl CookieJar { .and_then(|c| if !c.removed { Some(&c.cookie) } else { None }) } - /// Adds an "original" `cookie` to this jar. Adding an original cookie does - /// not affect the [delta](#method.delta) computation. This method is - /// intended to be used to seed the cookie jar with cookies received from a - /// client's HTTP message. + /// Adds an "original" `cookie` to this jar. If an original cookie with the + /// same name already exists, it is replaced with `cookie`. Cookies added + /// with `add` take precedence and are not replaced by this method. + /// + /// Adding an original cookie does not affect the [delta](#method.delta) + /// computation. This method is intended to be used to seed the cookie jar + /// with cookies received from a client's HTTP message. /// /// For accurate `delta` computations, this method should not be called /// after calling `remove`. @@ -147,7 +150,8 @@ impl CookieJar { self.original_cookies.replace(DeltaCookie::added(cookie)); } - /// Adds `cookie` to this jar. + /// Adds `cookie` to this jar. If a cookie with the same name already + /// exists, it is replaced with `cookie`. /// /// # Example /// @@ -228,6 +232,47 @@ impl CookieJar { } } + /// Removes `cookie` from this jar completely. This method differs from + /// `remove` in that no delta cookie is created under any condition. Neither + /// the `delta` nor `iter` methods will return a cookie that is removed + /// using this method. + /// + /// # Example + /// + /// Removing an _original_ cookie; no _removal_ cookie is generated: + /// + /// ```rust + /// # extern crate cookie; + /// extern crate time; + /// + /// use cookie::{CookieJar, Cookie}; + /// use time::Duration; + /// + /// # fn main() { + /// let mut jar = CookieJar::new(); + /// + /// // Add an original cookie and a new cookie. + /// jar.add_original(Cookie::new("name", "value")); + /// jar.add(Cookie::new("key", "value")); + /// assert_eq!(jar.delta().count(), 1); + /// assert_eq!(jar.iter().count(), 2); + /// + /// // Now force remove the original cookie. + /// jar.force_remove(Cookie::new("name", "value")); + /// assert_eq!(jar.delta().count(), 1); + /// assert_eq!(jar.iter().count(), 1); + /// + /// // Now force remove the new cookie. + /// jar.force_remove(Cookie::new("key", "value")); + /// assert_eq!(jar.delta().count(), 0); + /// assert_eq!(jar.iter().count(), 0); + /// # } + /// ``` + pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) { + self.original_cookies.remove(cookie.name()); + self.delta_cookies.remove(cookie.name()); + } + /// Removes all cookies from this cookie jar. #[deprecated(since = "0.7.0", note = "calling this method may not remove \ all cookies since the path and domain are not specified; use \ diff --git a/third_party/rust/cookie/src/lib.rs b/third_party/rust/cookie/src/lib.rs index 340bedbb455b..6f3525071ecb 100644 --- a/third_party/rust/cookie/src/lib.rs +++ b/third_party/rust/cookie/src/lib.rs @@ -10,7 +10,7 @@ //! Add the following to the `[dependencies]` section of your `Cargo.toml`: //! //! ```ignore -//! cookie = "0.10" +//! cookie = "0.11" //! ``` //! //! Then add the following line to your crate root: @@ -58,7 +58,7 @@ //! features = ["secure", "percent-encode"] //! ``` -#![doc(html_root_url = "https://docs.rs/cookie/0.10")] +#![doc(html_root_url = "https://docs.rs/cookie/0.11")] #![deny(missing_docs)] #[cfg(feature = "percent-encode")] extern crate url; @@ -74,10 +74,12 @@ mod draft; #[cfg(feature = "secure")] pub use secure::*; use std::borrow::Cow; -use std::ascii::AsciiExt; use std::fmt; use std::str::FromStr; +#[allow(unused_imports, deprecated)] +use std::ascii::AsciiExt; + #[cfg(feature = "percent-encode")] use url::percent_encoding::{USERINFO_ENCODE_SET, percent_encode}; use time::{Tm, Duration}; @@ -164,7 +166,7 @@ pub struct Cookie<'c> { name: CookieStr, /// The cookie's value. value: CookieStr, - /// The cookie's experiation, if any. + /// The cookie's expiration, if any. expires: Option, /// The cookie's maximum age, if any. max_age: Option, @@ -172,10 +174,10 @@ pub struct Cookie<'c> { domain: Option, /// The cookie's path domain, if any. path: Option, - /// Whether this cookie was marked secure. - secure: bool, - /// Whether this cookie was marked httponly. - http_only: bool, + /// Whether this cookie was marked Secure. + secure: Option, + /// Whether this cookie was marked HttpOnly. + http_only: Option, /// The draft `SameSite` attribute. same_site: Option, } @@ -203,8 +205,8 @@ impl Cookie<'static> { max_age: None, domain: None, path: None, - secure: false, - http_only: false, + secure: None, + http_only: None, same_site: None, } } @@ -256,7 +258,7 @@ impl<'c> Cookie<'c> { /// /// let c = Cookie::parse("foo=bar%20baz; HttpOnly").unwrap(); /// assert_eq!(c.name_value(), ("foo", "bar%20baz")); - /// assert_eq!(c.http_only(), true); + /// assert_eq!(c.http_only(), Some(true)); /// ``` pub fn parse(s: S) -> Result, ParseError> where S: Into> @@ -278,7 +280,7 @@ impl<'c> Cookie<'c> { /// /// let c = Cookie::parse_encoded("foo=bar%20baz; HttpOnly").unwrap(); /// assert_eq!(c.name_value(), ("foo", "bar baz")); - /// assert_eq!(c.http_only(), true); + /// assert_eq!(c.http_only(), Some(true)); /// ``` #[cfg(feature = "percent-encode")] pub fn parse_encoded(s: S) -> Result, ParseError> @@ -379,7 +381,10 @@ impl<'c> Cookie<'c> { (self.name(), self.value()) } - /// Returns whether this cookie was marked `HttpOnly` or not. + /// Returns whether this cookie was marked `HttpOnly` or not. Returns + /// `Some(true)` when the cookie was explicitly set (manually or parsed) as + /// `HttpOnly`, `Some(false)` when `http_only` was manually set to `false`, + /// and `None` otherwise. /// /// # Example /// @@ -387,14 +392,31 @@ impl<'c> Cookie<'c> { /// use cookie::Cookie; /// /// let c = Cookie::parse("name=value; httponly").unwrap(); - /// assert_eq!(c.http_only(), true); + /// assert_eq!(c.http_only(), Some(true)); + /// + /// let mut c = Cookie::new("name", "value"); + /// assert_eq!(c.http_only(), None); + /// + /// let mut c = Cookie::new("name", "value"); + /// assert_eq!(c.http_only(), None); + /// + /// // An explicitly set "false" value. + /// c.set_http_only(false); + /// assert_eq!(c.http_only(), Some(false)); + /// + /// // An explicitly set "true" value. + /// c.set_http_only(true); + /// assert_eq!(c.http_only(), Some(true)); /// ``` #[inline] - pub fn http_only(&self) -> bool { + pub fn http_only(&self) -> Option { self.http_only } - /// Returns whether this cookie was marked `Secure` or not. + /// Returns whether this cookie was marked `Secure` or not. Returns + /// `Some(true)` when the cookie was explicitly set (manually or parsed) as + /// `Secure`, `Some(false)` when `secure` was manually set to `false`, and + /// `None` otherwise. /// /// # Example /// @@ -402,10 +424,24 @@ impl<'c> Cookie<'c> { /// use cookie::Cookie; /// /// let c = Cookie::parse("name=value; Secure").unwrap(); - /// assert_eq!(c.secure(), true); + /// assert_eq!(c.secure(), Some(true)); + /// + /// let mut c = Cookie::parse("name=value").unwrap(); + /// assert_eq!(c.secure(), None); + /// + /// let mut c = Cookie::new("name", "value"); + /// assert_eq!(c.secure(), None); + /// + /// // An explicitly set "false" value. + /// c.set_secure(false); + /// assert_eq!(c.secure(), Some(false)); + /// + /// // An explicitly set "true" value. + /// c.set_secure(true); + /// assert_eq!(c.secure(), Some(true)); /// ``` #[inline] - pub fn secure(&self) -> bool { + pub fn secure(&self) -> Option { self.secure } @@ -549,14 +585,14 @@ impl<'c> Cookie<'c> { /// use cookie::Cookie; /// /// let mut c = Cookie::new("name", "value"); - /// assert_eq!(c.http_only(), false); + /// assert_eq!(c.http_only(), None); /// /// c.set_http_only(true); - /// assert_eq!(c.http_only(), true); + /// assert_eq!(c.http_only(), Some(true)); /// ``` #[inline] pub fn set_http_only(&mut self, value: bool) { - self.http_only = value; + self.http_only = Some(value); } /// Sets the value of `secure` in `self` to `value`. @@ -567,14 +603,14 @@ impl<'c> Cookie<'c> { /// use cookie::Cookie; /// /// let mut c = Cookie::new("name", "value"); - /// assert_eq!(c.secure(), false); + /// assert_eq!(c.secure(), None); /// /// c.set_secure(true); - /// assert_eq!(c.secure(), true); + /// assert_eq!(c.secure(), Some(true)); /// ``` #[inline] pub fn set_secure(&mut self, value: bool) { - self.secure = value; + self.secure = Some(value); } /// Sets the value of `same_site` in `self` to `value`. @@ -708,16 +744,18 @@ impl<'c> Cookie<'c> { } fn fmt_parameters(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.http_only() { + if let Some(true) = self.http_only() { write!(f, "; HttpOnly")?; } - if self.secure() { + if let Some(true) = self.secure() { write!(f, "; Secure")?; } if let Some(same_site) = self.same_site() { - write!(f, "; SameSite={}", same_site)?; + if !same_site.is_none() { + write!(f, "; SameSite={}", same_site)?; + } } if let Some(path) = self.path() { @@ -1002,6 +1040,10 @@ mod tests { let cookie = Cookie::build("foo", "bar") .same_site(SameSite::Lax).finish(); assert_eq!(&cookie.to_string(), "foo=bar; SameSite=Lax"); + + let cookie = Cookie::build("foo", "bar") + .same_site(SameSite::None).finish(); + assert_eq!(&cookie.to_string(), "foo=bar"); } #[test] diff --git a/third_party/rust/cookie/src/parse.rs b/third_party/rust/cookie/src/parse.rs index 2caf86dff1f9..be0a22272038 100644 --- a/third_party/rust/cookie/src/parse.rs +++ b/third_party/rust/cookie/src/parse.rs @@ -1,11 +1,13 @@ use std::borrow::Cow; use std::cmp; use std::error::Error; -use std::ascii::AsciiExt; use std::str::Utf8Error; use std::fmt; use std::convert::From; +#[allow(unused_imports, deprecated)] +use std::ascii::AsciiExt; + #[cfg(feature = "percent-encode")] use url::percent_encoding::percent_decode; use time::{self, Duration}; @@ -133,8 +135,8 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result, ParseError> { max_age: None, domain: None, path: None, - secure: false, - http_only: false, + secure: None, + http_only: None, same_site: None }; @@ -145,8 +147,8 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result, ParseError> { }; match (&*key.to_ascii_lowercase(), value) { - ("secure", _) => cookie.secure = true, - ("httponly", _) => cookie.http_only = true, + ("secure", _) => cookie.secure = Some(true), + ("httponly", _) => cookie.http_only = Some(true), ("max-age", Some(v)) => { // See RFC 6265 Section 5.2.2, negative values indicate that the // earliest possible expiration time should be used, so set the diff --git a/third_party/rust/cookie/src/secure/private.rs b/third_party/rust/cookie/src/secure/private.rs index bad4820597d9..56dcdbc226b9 100644 --- a/third_party/rust/cookie/src/secure/private.rs +++ b/third_party/rust/cookie/src/secure/private.rs @@ -104,6 +104,44 @@ impl<'a> PrivateJar<'a> { /// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value"); /// ``` pub fn add(&mut self, mut cookie: Cookie<'static>) { + self.encrypt_cookie(&mut cookie); + + // Add the sealed cookie to the parent. + self.parent.add(cookie); + } + + /// Adds an "original" `cookie` to parent jar. The cookie's value is + /// encrypted with authenticated encryption assuring confidentiality, + /// integrity, and authenticity. Adding an original cookie does not affect + /// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta) + /// computation. This method is intended to be used to seed the cookie jar + /// with cookies received from a client's HTTP message. + /// + /// For accurate `delta` computations, this method should not be called + /// after calling `remove`. + /// + /// # Example + /// + /// ```rust + /// use cookie::{CookieJar, Cookie, Key}; + /// + /// let key = Key::generate(); + /// let mut jar = CookieJar::new(); + /// jar.private(&key).add_original(Cookie::new("name", "value")); + /// + /// assert_eq!(jar.iter().count(), 1); + /// assert_eq!(jar.delta().count(), 0); + /// ``` + pub fn add_original(&mut self, mut cookie: Cookie<'static>) { + self.encrypt_cookie(&mut cookie); + + // Add the sealed cookie to the parent. + self.parent.add_original(cookie); + } + + /// Encrypts the cookie's value with + /// authenticated encryption assuring confidentiality, integrity, and authenticity. + fn encrypt_cookie(&self, cookie: &mut Cookie) { let mut data; let output_len = { // Create the `SealingKey` structure. @@ -129,9 +167,6 @@ impl<'a> PrivateJar<'a> { // Base64 encode the nonce and encrypted value. let sealed_value = base64::encode(&data[..(NONCE_LEN + output_len)]); cookie.set_value(sealed_value); - - // Add the sealed cookie to the parent. - self.parent.add(cookie); } /// Removes `cookie` from the parent jar. diff --git a/third_party/rust/cookie/src/secure/signed.rs b/third_party/rust/cookie/src/secure/signed.rs index 1c596cb1e78c..132fd746ae79 100644 --- a/third_party/rust/cookie/src/secure/signed.rs +++ b/third_party/rust/cookie/src/secure/signed.rs @@ -96,12 +96,42 @@ impl<'a> SignedJar<'a> { /// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value"); /// ``` pub fn add(&mut self, mut cookie: Cookie<'static>) { + self.sign_cookie(&mut cookie); + self.parent.add(cookie); + } + + /// Adds an "original" `cookie` to this jar. The cookie's value is signed + /// assuring integrity and authenticity. Adding an original cookie does not + /// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta) + /// computation. This method is intended to be used to seed the cookie jar + /// with cookies received from a client's HTTP message. + /// + /// For accurate `delta` computations, this method should not be called + /// after calling `remove`. + /// + /// # Example + /// + /// ```rust + /// use cookie::{CookieJar, Cookie, Key}; + /// + /// let key = Key::generate(); + /// let mut jar = CookieJar::new(); + /// jar.signed(&key).add_original(Cookie::new("name", "value")); + /// + /// assert_eq!(jar.iter().count(), 1); + /// assert_eq!(jar.delta().count(), 0); + /// ``` + pub fn add_original(&mut self, mut cookie: Cookie<'static>) { + self.sign_cookie(&mut cookie); + self.parent.add_original(cookie); + } + + /// Signs the cookie's value assuring integrity and authenticity. + fn sign_cookie(&self, cookie: &mut Cookie) { let digest = sign(&self.key, cookie.value().as_bytes()); let mut new_value = base64::encode(digest.as_ref()); new_value.push_str(cookie.value()); cookie.set_value(new_value); - - self.parent.add(cookie); } /// Removes `cookie` from the parent jar. diff --git a/third_party/rust/crossbeam-deque-0.2.0/.cargo-checksum.json b/third_party/rust/crossbeam-deque-0.2.0/.cargo-checksum.json new file mode 100644 index 000000000000..93cffc883af4 --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"7a28ab46755ee3ed2ad3078ecec5f26cf1b95fa122d947edfc1a15bff4849ae8","CHANGELOG.md":"c134cbbcfdf39e86a51337715daca6498d000e019f2d0d5050d04e14e7ef5219","Cargo.toml":"a247839eb4e5a43632eee8727e969a23b4474a6d1b390ea4a19e3e714d8ba060","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"acc366bfcc7262f4719306196e40d59b4e832179adc9cfe2cd27cc710a6787ac","src/lib.rs":"6f50bc16841c93b80d588bbeae9d56b55a2f3a32fe5232fd6e748362b680b4ef"},"package":"f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-deque-0.2.0/.travis.yml b/third_party/rust/crossbeam-deque-0.2.0/.travis.yml new file mode 100644 index 000000000000..e6105d31db26 --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/.travis.yml @@ -0,0 +1,13 @@ +language: rust + +rust: + - stable + - beta + - nightly + - 1.13.0 + +script: + - cargo build + - cargo build --release + - cargo test + - cargo test --release diff --git a/third_party/rust/crossbeam-deque-0.2.0/CHANGELOG.md b/third_party/rust/crossbeam-deque-0.2.0/CHANGELOG.md new file mode 100644 index 000000000000..77973d18e45f --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/CHANGELOG.md @@ -0,0 +1,18 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.1.1] - 2017-11-29 +### Changed +- Update `crossbeam-epoch` to `0.2.0`. + +## 0.1.0 - 2017-11-26 +### Added +- First implementation of the Chase-Lev deque. + +[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.1...HEAD +[0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1 diff --git a/third_party/rust/crossbeam-deque-0.2.0/Cargo.toml b/third_party/rust/crossbeam-deque-0.2.0/Cargo.toml new file mode 100644 index 000000000000..cc83b634f488 --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "crossbeam-deque" +version = "0.2.0" +authors = ["The Crossbeam Project Developers"] +description = "Concurrent work-stealing deque" +homepage = "https://github.com/crossbeam-rs/crossbeam-deque" +documentation = "https://docs.rs/crossbeam-deque" +readme = "README.md" +keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"] +categories = ["algorithms", "concurrency", "data-structures"] +license = "MIT/Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam-deque" +[dependencies.crossbeam-epoch] +version = "0.3.0" + +[dependencies.crossbeam-utils] +version = "0.2.1" +[dev-dependencies.rand] +version = "0.4" +[badges.travis-ci] +repository = "crossbeam-rs/crossbeam-deque" diff --git a/third_party/rust/bitflags-0.7.0/LICENSE-APACHE b/third_party/rust/crossbeam-deque-0.2.0/LICENSE-APACHE similarity index 100% rename from third_party/rust/bitflags-0.7.0/LICENSE-APACHE rename to third_party/rust/crossbeam-deque-0.2.0/LICENSE-APACHE diff --git a/third_party/rust/crossbeam-deque-0.2.0/LICENSE-MIT b/third_party/rust/crossbeam-deque-0.2.0/LICENSE-MIT new file mode 100644 index 000000000000..25597d5838fa --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/crossbeam-deque-0.2.0/README.md b/third_party/rust/crossbeam-deque-0.2.0/README.md new file mode 100644 index 000000000000..92de94b5b68c --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/README.md @@ -0,0 +1,27 @@ +# Concurrent work-stealing deque + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-deque.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-deque) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-deque) +[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](https://crates.io/crates/crossbeam-deque) +[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](https://docs.rs/crossbeam-deque) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-deque = "0.1" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_deque; +``` + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/third_party/rust/crossbeam-deque-0.2.0/src/lib.rs b/third_party/rust/crossbeam-deque-0.2.0/src/lib.rs new file mode 100644 index 000000000000..525611945ee8 --- /dev/null +++ b/third_party/rust/crossbeam-deque-0.2.0/src/lib.rs @@ -0,0 +1,1045 @@ +//! A concurrent work-stealing deque. +//! +//! The data structure can be thought of as a dynamically growable and shrinkable buffer that has +//! two ends: bottom and top. A [`Deque`] can [`push`] elements into the bottom and [`pop`] +//! elements from the bottom, but it can only [`steal`][Deque::steal] elements from the top. +//! +//! A [`Deque`] doesn't implement `Sync` so it cannot be shared among multiple threads. However, it +//! can create [`Stealer`]s, and those can be easily cloned, shared, and sent to other threads. +//! [`Stealer`]s can only [`steal`][Stealer::steal] elements from the top. +//! +//! Here's a visualization of the data structure: +//! +//! ```text +//! top +//! _ +//! Deque::steal -> | | <- Stealer::steal +//! | | +//! | | +//! | | +//! Deque::push/pop -> |_| +//! +//! bottom +//! ``` +//! +//! # Work-stealing schedulers +//! +//! Usually, the data structure is used in work-stealing schedulers as follows. +//! +//! There is a number of threads. Each thread owns a [`Deque`] and creates a [`Stealer`] that is +//! shared among all other threads. Alternatively, it creates multiple [`Stealer`]s - one for each +//! of the other threads. +//! +//! Then, all threads are executing in a loop. In the loop, each one attempts to [`pop`] some work +//! from its own [`Deque`]. But if it is empty, it attempts to [`steal`][Stealer::steal] work from +//! some other thread instead. When executing work (or being idle), a thread may produce more work, +//! which gets [`push`]ed into its [`Deque`]. +//! +//! Of course, there are many variations of this strategy. For example, sometimes it may be +//! beneficial for a thread to always [`steal`][Deque::steal] work from the top of its deque +//! instead of calling [`pop`] and taking it from the bottom. +//! +//! # Examples +//! +//! ``` +//! use crossbeam_deque::{Deque, Steal}; +//! use std::thread; +//! +//! let d = Deque::new(); +//! let s = d.stealer(); +//! +//! d.push('a'); +//! d.push('b'); +//! d.push('c'); +//! +//! assert_eq!(d.pop(), Some('c')); +//! drop(d); +//! +//! thread::spawn(move || { +//! assert_eq!(s.steal(), Steal::Data('a')); +//! assert_eq!(s.steal(), Steal::Data('b')); +//! }).join().unwrap(); +//! ``` +//! +//! # References +//! +//! The implementation is based on the following work: +//! +//! 1. [Chase and Lev. Dynamic circular work-stealing deque. SPAA 2005.][chase-lev] +//! 2. [Le, Pop, Cohen, and Nardelli. Correct and efficient work-stealing for weak memory models. +//! PPoPP 2013.][weak-mem] +//! 3. [Norris and Demsky. CDSchecker: checking concurrent data structures written with C/C++ +//! atomics. OOPSLA 2013.][checker] +//! +//! [chase-lev]: https://dl.acm.org/citation.cfm?id=1073974 +//! [weak-mem]: https://dl.acm.org/citation.cfm?id=2442524 +//! [checker]: https://dl.acm.org/citation.cfm?id=2509514 +//! +//! [`Deque`]: struct.Deque.html +//! [`Stealer`]: struct.Stealer.html +//! [`push`]: struct.Deque.html#method.push +//! [`pop`]: struct.Deque.html#method.pop +//! [Deque::steal]: struct.Deque.html#method.steal +//! [Stealer::steal]: struct.Stealer.html#method.steal + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; + +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ptr; +use std::sync::Arc; +use std::sync::atomic::{self, AtomicIsize}; +use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; + +use epoch::{Atomic, Owned}; +use utils::cache_padded::CachePadded; + +/// Minimum buffer capacity for a deque. +const DEFAULT_MIN_CAP: usize = 16; + +/// If a buffer of at least this size is retired, thread-local garbage is flushed so that it gets +/// deallocated as soon as possible. +const FLUSH_THRESHOLD_BYTES: usize = 1 << 10; + +/// Possible outcomes of a steal operation. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub enum Steal { + /// The deque was empty at the time of stealing. + Empty, + + /// Some data has been successfully stolen. + Data(T), + + /// Lost the race for stealing data to another concurrent operation. Try again. + Retry, +} + +/// A buffer that holds elements in a deque. +struct Buffer { + /// Pointer to the allocated memory. + ptr: *mut T, + + /// Capacity of the buffer. Always a power of two. + cap: usize, +} + +unsafe impl Send for Buffer {} + +impl Buffer { + /// Returns a new buffer with the specified capacity. + fn new(cap: usize) -> Self { + debug_assert_eq!(cap, cap.next_power_of_two()); + + let mut v = Vec::with_capacity(cap); + let ptr = v.as_mut_ptr(); + mem::forget(v); + + Buffer { + ptr: ptr, + cap: cap, + } + } + + /// Returns a pointer to the element at the specified `index`. + unsafe fn at(&self, index: isize) -> *mut T { + // `self.cap` is always a power of two. + self.ptr.offset(index & (self.cap - 1) as isize) + } + + /// Writes `value` into the specified `index`. + unsafe fn write(&self, index: isize, value: T) { + ptr::write(self.at(index), value) + } + + /// Reads a value from the specified `index`. + unsafe fn read(&self, index: isize) -> T { + ptr::read(self.at(index)) + } +} + +impl Drop for Buffer { + fn drop(&mut self) { + unsafe { + drop(Vec::from_raw_parts(self.ptr, 0, self.cap)); + } + } +} + +/// Internal data that is shared between the deque and its stealers. +struct Inner { + /// The bottom index. + bottom: AtomicIsize, + + /// The top index. + top: AtomicIsize, + + /// The underlying buffer. + buffer: Atomic>, + + /// Minimum capacity of the buffer. Always a power of two. + min_cap: usize, +} + +impl Inner { + /// Returns a new `Inner` with default minimum capacity. + fn new() -> Self { + Self::with_min_capacity(DEFAULT_MIN_CAP) + } + + /// Returns a new `Inner` with minimum capacity of `min_cap` rounded to the next power of two. + fn with_min_capacity(min_cap: usize) -> Self { + let power = min_cap.next_power_of_two(); + assert!(power >= min_cap, "capacity too large: {}", min_cap); + Inner { + bottom: AtomicIsize::new(0), + top: AtomicIsize::new(0), + buffer: Atomic::new(Buffer::new(power)), + min_cap: power, + } + } + + /// Resizes the internal buffer to the new capacity of `new_cap`. + #[cold] + unsafe fn resize(&self, new_cap: usize) { + // Load the bottom, top, and buffer. + let b = self.bottom.load(Relaxed); + let t = self.top.load(Relaxed); + + let buffer = self.buffer.load(Relaxed, epoch::unprotected()); + + // Allocate a new buffer. + let new = Buffer::new(new_cap); + + // Copy data from the old buffer to the new one. + let mut i = t; + while i != b { + ptr::copy_nonoverlapping(buffer.deref().at(i), new.at(i), 1); + i = i.wrapping_add(1); + } + + let guard = &epoch::pin(); + + // Replace the old buffer with the new one. + let old = self.buffer + .swap(Owned::new(new).into_shared(guard), Release, guard); + + // Destroy the old buffer later. + guard.defer(move || old.into_owned()); + + // If the buffer is very large, then flush the thread-local garbage in order to + // deallocate it as soon as possible. + if mem::size_of::() * new_cap >= FLUSH_THRESHOLD_BYTES { + guard.flush(); + } + } +} + +impl Drop for Inner { + fn drop(&mut self) { + // Load the bottom, top, and buffer. + let b = self.bottom.load(Relaxed); + let t = self.top.load(Relaxed); + + unsafe { + let buffer = self.buffer.load(Relaxed, epoch::unprotected()); + + // Go through the buffer from top to bottom and drop all elements in the deque. + let mut i = t; + while i != b { + ptr::drop_in_place(buffer.deref().at(i)); + i = i.wrapping_add(1); + } + + // Free the memory allocated by the buffer. + drop(buffer.into_owned()); + } + } +} + +/// A concurrent work-stealing deque. +/// +/// A deque has two ends: bottom and top. Elements can be [`push`]ed into the bottom and [`pop`]ped +/// from the bottom. The top end is special in that elements can only be stolen from it using the +/// [`steal`][Deque::steal] method. +/// +/// # Stealers +/// +/// While [`Deque`] doesn't implement `Sync`, it can create [`Stealer`]s using the method +/// [`stealer`][stealer], and those can be easily shared among multiple threads. [`Stealer`]s can +/// only [`steal`][Stealer::steal] elements from the top end of the deque. +/// +/// # Capacity +/// +/// The data structure is dynamically grows as elements are inserted and removed from it. If the +/// internal buffer gets full, a new one twice the size of the original is allocated. Similarly, +/// if it is less than a quarter full, a new buffer half the size of the original is allocated. +/// +/// In order to prevent frequent resizing (reallocations may be costly), it is possible to specify +/// a large minimum capacity for the deque by calling [`Deque::with_min_capacity`]. This +/// constructor will make sure that the internal buffer never shrinks below that size. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_deque::{Deque, Steal}; +/// +/// let d = Deque::with_min_capacity(1000); +/// let s = d.stealer(); +/// +/// d.push('a'); +/// d.push('b'); +/// d.push('c'); +/// +/// assert_eq!(d.pop(), Some('c')); +/// assert_eq!(d.steal(), Steal::Data('a')); +/// assert_eq!(s.steal(), Steal::Data('b')); +/// ``` +/// +/// [`Deque`]: struct.Deque.html +/// [`Stealer`]: struct.Stealer.html +/// [`push`]: struct.Deque.html#method.push +/// [`pop`]: struct.Deque.html#method.pop +/// [stealer]: struct.Deque.html#method.stealer +/// [`Deque::with_min_capacity`]: struct.Deque.html#method.with_min_capacity +/// [Deque::steal]: struct.Deque.html#method.steal +/// [Stealer::steal]: struct.Stealer.html#method.steal +pub struct Deque { + inner: Arc>>, + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +unsafe impl Send for Deque {} + +impl Deque { + /// Returns a new deque. + /// + /// The internal buffer is destructed as soon as the deque and all its stealers get dropped. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::::new(); + /// ``` + pub fn new() -> Deque { + Deque { + inner: Arc::new(CachePadded::new(Inner::new())), + _marker: PhantomData, + } + } + + /// Returns a new deque with the specified minimum capacity. + /// + /// If the capacity is not a power of two, it will be rounded up to the next one. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// // The minimum capacity will be rounded up to 1024. + /// let d = Deque::::with_min_capacity(1000); + /// ``` + pub fn with_min_capacity(min_cap: usize) -> Deque { + Deque { + inner: Arc::new(CachePadded::new(Inner::with_min_capacity(min_cap))), + _marker: PhantomData, + } + } + + /// Returns `true` if the deque is empty. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// assert!(d.is_empty()); + /// d.push("foo"); + /// assert!(!d.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of elements in the deque. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// d.push('a'); + /// d.push('b'); + /// d.push('c'); + /// assert_eq!(d.len(), 3); + /// ``` + pub fn len(&self) -> usize { + let b = self.inner.bottom.load(Relaxed); + let t = self.inner.top.load(Relaxed); + b.wrapping_sub(t) as usize + } + + /// Pushes an element into the bottom of the deque. + /// + /// If the internal buffer is full, a new one twice the capacity of the current one will be + /// allocated. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// d.push(1); + /// d.push(2); + /// ``` + pub fn push(&self, value: T) { + unsafe { + // Load the bottom, top, and buffer. The buffer doesn't have to be epoch-protected + // because the current thread (the worker) is the only one that grows and shrinks it. + let b = self.inner.bottom.load(Relaxed); + let t = self.inner.top.load(Acquire); + + let mut buffer = self.inner.buffer.load(Relaxed, epoch::unprotected()); + + // Calculate the length of the deque. + let len = b.wrapping_sub(t); + + // Is the deque full? + let cap = buffer.deref().cap; + if len >= cap as isize { + // Yes. Grow the underlying buffer. + self.inner.resize(2 * cap); + buffer = self.inner.buffer.load(Relaxed, epoch::unprotected()); + } + + // Write `value` into the right slot and increment `b`. + buffer.deref().write(b, value); + atomic::fence(Release); + self.inner.bottom.store(b.wrapping_add(1), Relaxed); + } + } + + /// Pops an element from the bottom of the deque. + /// + /// If the internal buffer is less than a quarter full, a new buffer half the capacity of the + /// current one will be allocated. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// d.push(1); + /// d.push(2); + /// + /// assert_eq!(d.pop(), Some(2)); + /// assert_eq!(d.pop(), Some(1)); + /// assert_eq!(d.pop(), None); + /// ``` + pub fn pop(&self) -> Option { + // Load the bottom. + let b = self.inner.bottom.load(Relaxed); + + // If the deque is empty, return early without incurring the cost of a SeqCst fence. + let t = self.inner.top.load(Relaxed); + if b.wrapping_sub(t) <= 0 { + return None; + } + + // Decrement the bottom. + let b = b.wrapping_sub(1); + self.inner.bottom.store(b, Relaxed); + + // Load the buffer. The buffer doesn't have to be epoch-protected because the current + // thread (the worker) is the only one that grows and shrinks it. + let buf = unsafe { self.inner.buffer.load(Relaxed, epoch::unprotected()) }; + + atomic::fence(SeqCst); + + // Load the top. + let t = self.inner.top.load(Relaxed); + + // Compute the length after the bottom was decremented. + let len = b.wrapping_sub(t); + + if len < 0 { + // The deque is empty. Restore the bottom back to the original value. + self.inner.bottom.store(b.wrapping_add(1), Relaxed); + None + } else { + // Read the value to be popped. + let mut value = unsafe { Some(buf.deref().read(b)) }; + + // Are we popping the last element from the deque? + if len == 0 { + // Try incrementing the top. + if self.inner + .top + .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed) + .is_err() + { + // Failed. We didn't pop anything. + mem::forget(value.take()); + } + + // Restore the bottom back to the original value. + self.inner.bottom.store(b.wrapping_add(1), Relaxed); + } else { + // Shrink the buffer if `len` is less than one fourth of `self.inner.min_cap`. + unsafe { + let cap = buf.deref().cap; + if cap > self.inner.min_cap && len < cap as isize / 4 { + self.inner.resize(cap / 2); + } + } + } + + value + } + } + + /// Steals an element from the top of the deque. + /// + /// Unlike most methods in concurrent data structures, if another operation gets in the way + /// while attempting to steal data, this method will return immediately with [`Steal::Retry`] + /// instead of retrying. + /// + /// If the internal buffer is less than a quarter full, a new buffer half the capacity of the + /// current one will be allocated. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{Deque, Steal}; + /// + /// let d = Deque::new(); + /// d.push(1); + /// d.push(2); + /// + /// // Attempt to steal an element. + /// // + /// // No other threads are working with the deque, so this time we know for sure that we + /// // won't get `Steal::Retry` as the result. + /// assert_eq!(d.steal(), Steal::Data(1)); + /// + /// // Attempt to steal an element, but keep retrying if we get `Retry`. + /// loop { + /// match d.steal() { + /// Steal::Empty => panic!("should steal something"), + /// Steal::Data(data) => { + /// assert_eq!(data, 2); + /// break; + /// } + /// Steal::Retry => {} + /// } + /// } + /// ``` + /// + /// [`Steal::Retry`]: enum.Steal.html#variant.Retry + pub fn steal(&self) -> Steal { + let b = self.inner.bottom.load(Relaxed); + let buf = unsafe { self.inner.buffer.load(Relaxed, epoch::unprotected()) }; + let t = self.inner.top.load(Relaxed); + let len = b.wrapping_sub(t); + + // Is the deque empty? + if len <= 0 { + return Steal::Empty; + } + + // Try incrementing the top to steal the value. + if self.inner + .top + .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed) + .is_ok() + { + let data = unsafe { buf.deref().read(t) }; + + // Shrink the buffer if `len - 1` is less than one fourth of `self.inner.min_cap`. + unsafe { + let cap = buf.deref().cap; + if cap > self.inner.min_cap && len <= cap as isize / 4 { + self.inner.resize(cap / 2); + } + } + + return Steal::Data(data); + } + + Steal::Retry + } + + /// Creates a stealer that can be shared with other threads. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{Deque, Steal}; + /// use std::thread; + /// + /// let d = Deque::new(); + /// d.push(1); + /// d.push(2); + /// + /// let s = d.stealer(); + /// + /// thread::spawn(move || { + /// assert_eq!(s.steal(), Steal::Data(1)); + /// }).join().unwrap(); + /// ``` + pub fn stealer(&self) -> Stealer { + Stealer { + inner: self.inner.clone(), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Deque { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Deque {{ ... }}") + } +} + +impl Default for Deque { + fn default() -> Deque { + Deque::new() + } +} + +/// A stealer that steals elements from the top of a deque. +/// +/// The only operation a stealer can do that manipulates the deque is [`steal`]. +/// +/// Stealers can be cloned in order to create more of them. They also implement `Send` and `Sync` +/// so they can be easily shared among multiple threads. +/// +/// [`steal`]: struct.Stealer.html#method.steal +pub struct Stealer { + inner: Arc>>, + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +unsafe impl Send for Stealer {} +unsafe impl Sync for Stealer {} + +impl Stealer { + /// Returns `true` if the deque is empty. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// d.push("foo"); + /// + /// let s = d.stealer(); + /// assert!(!d.is_empty()); + /// s.steal(); + /// assert!(d.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of elements in the deque. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::new(); + /// let s = d.stealer(); + /// d.push('a'); + /// d.push('b'); + /// d.push('c'); + /// assert_eq!(s.len(), 3); + /// ``` + pub fn len(&self) -> usize { + let t = self.inner.top.load(Relaxed); + atomic::fence(SeqCst); + let b = self.inner.bottom.load(Relaxed); + std::cmp::max(b.wrapping_sub(t), 0) as usize + } + + /// Steals an element from the top of the deque. + /// + /// Unlike most methods in concurrent data structures, if another operation gets in the way + /// while attempting to steal data, this method will return immediately with [`Steal::Retry`] + /// instead of retrying. + /// + /// This method will not attempt to resize the internal buffer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::{Deque, Steal}; + /// + /// let d = Deque::new(); + /// let s = d.stealer(); + /// d.push(1); + /// d.push(2); + /// + /// // Attempt to steal an element, but keep retrying if we get `Retry`. + /// loop { + /// match d.steal() { + /// Steal::Empty => panic!("should steal something"), + /// Steal::Data(data) => { + /// assert_eq!(data, 1); + /// break; + /// } + /// Steal::Retry => {} + /// } + /// } + /// ``` + /// + /// [`Steal::Retry`]: enum.Steal.html#variant.Retry + pub fn steal(&self) -> Steal { + // Load the top. + let t = self.inner.top.load(Acquire); + + // A SeqCst fence is needed here. + // If the current thread is already pinned (reentrantly), we must manually issue the fence. + // Otherwise, the following pinning will issue the fence anyway, so we don't have to. + if epoch::is_pinned() { + atomic::fence(SeqCst); + } + + let guard = &epoch::pin(); + + // Load the bottom. + let b = self.inner.bottom.load(Acquire); + + // Is the deque empty? + if b.wrapping_sub(t) <= 0 { + return Steal::Empty; + } + + // Load the buffer and read the value at the top. + let buf = self.inner.buffer.load(Acquire, guard); + let value = unsafe { buf.deref().read(t) }; + + // Try incrementing the top to steal the value. + if self.inner + .top + .compare_exchange(t, t.wrapping_add(1), SeqCst, Relaxed) + .is_ok() + { + return Steal::Data(value); + } + + // We didn't steal this value, forget it. + mem::forget(value); + + Steal::Retry + } +} + +impl Clone for Stealer { + /// Creates another stealer. + fn clone(&self) -> Stealer { + Stealer { + inner: self.inner.clone(), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Stealer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Stealer {{ ... }}") + } +} + +#[cfg(test)] +mod tests { + extern crate rand; + + use std::sync::{Arc, Mutex}; + use std::sync::atomic::{AtomicBool, AtomicUsize}; + use std::sync::atomic::Ordering::SeqCst; + use std::thread; + + use epoch; + use self::rand::Rng; + + use super::{Deque, Steal}; + + #[test] + fn smoke() { + let d = Deque::new(); + let s = d.stealer(); + assert_eq!(d.pop(), None); + assert_eq!(s.steal(), Steal::Empty); + assert_eq!(d.len(), 0); + assert_eq!(s.len(), 0); + + d.push(1); + assert_eq!(d.len(), 1); + assert_eq!(s.len(), 1); + assert_eq!(d.pop(), Some(1)); + assert_eq!(d.pop(), None); + assert_eq!(s.steal(), Steal::Empty); + assert_eq!(d.len(), 0); + assert_eq!(s.len(), 0); + + d.push(2); + assert_eq!(s.steal(), Steal::Data(2)); + assert_eq!(s.steal(), Steal::Empty); + assert_eq!(d.pop(), None); + + d.push(3); + d.push(4); + d.push(5); + assert_eq!(d.steal(), Steal::Data(3)); + assert_eq!(s.steal(), Steal::Data(4)); + assert_eq!(d.steal(), Steal::Data(5)); + assert_eq!(d.steal(), Steal::Empty); + } + + #[test] + fn steal_push() { + const STEPS: usize = 50_000; + + let d = Deque::new(); + let s = d.stealer(); + let t = thread::spawn(move || for i in 0..STEPS { + loop { + if let Steal::Data(v) = s.steal() { + assert_eq!(i, v); + break; + } + } + }); + + for i in 0..STEPS { + d.push(i); + } + t.join().unwrap(); + } + + #[test] + fn stampede() { + const COUNT: usize = 50_000; + + let d = Deque::new(); + + for i in 0..COUNT { + d.push(Box::new(i + 1)); + } + let remaining = Arc::new(AtomicUsize::new(COUNT)); + + let threads = (0..8) + .map(|_| { + let s = d.stealer(); + let remaining = remaining.clone(); + + thread::spawn(move || { + let mut last = 0; + while remaining.load(SeqCst) > 0 { + if let Steal::Data(x) = s.steal() { + assert!(last < *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + } + } + }) + }) + .collect::>(); + + let mut last = COUNT + 1; + while remaining.load(SeqCst) > 0 { + if let Some(x) = d.pop() { + assert!(last > *x); + last = *x; + remaining.fetch_sub(1, SeqCst); + } + } + + for t in threads { + t.join().unwrap(); + } + } + + fn run_stress() { + const COUNT: usize = 50_000; + + let d = Deque::new(); + let done = Arc::new(AtomicBool::new(false)); + let hits = Arc::new(AtomicUsize::new(0)); + + let threads = (0..8) + .map(|_| { + let s = d.stealer(); + let done = done.clone(); + let hits = hits.clone(); + + thread::spawn(move || while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + }) + }) + .collect::>(); + + let mut rng = rand::thread_rng(); + let mut expected = 0; + while expected < COUNT { + if rng.gen_range(0, 3) == 0 { + if d.pop().is_some() { + hits.fetch_add(1, SeqCst); + } + } else { + d.push(expected); + expected += 1; + } + } + + while hits.load(SeqCst) < COUNT { + if d.pop().is_some() { + hits.fetch_add(1, SeqCst); + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } + } + + #[test] + fn stress() { + run_stress(); + } + + #[test] + fn stress_pinned() { + let _guard = epoch::pin(); + run_stress(); + } + + #[test] + fn no_starvation() { + const COUNT: usize = 50_000; + + let d = Deque::new(); + let done = Arc::new(AtomicBool::new(false)); + + let (threads, hits): (Vec<_>, Vec<_>) = (0..8) + .map(|_| { + let s = d.stealer(); + let done = done.clone(); + let hits = Arc::new(AtomicUsize::new(0)); + + let t = { + let hits = hits.clone(); + thread::spawn(move || while !done.load(SeqCst) { + if let Steal::Data(_) = s.steal() { + hits.fetch_add(1, SeqCst); + } + }) + }; + + (t, hits) + }) + .unzip(); + + let mut rng = rand::thread_rng(); + let mut my_hits = 0; + loop { + for i in 0..rng.gen_range(0, COUNT) { + if rng.gen_range(0, 3) == 0 && my_hits == 0 { + if d.pop().is_some() { + my_hits += 1; + } + } else { + d.push(i); + } + } + + if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) { + break; + } + } + done.store(true, SeqCst); + + for t in threads { + t.join().unwrap(); + } + } + + #[test] + fn destructors() { + const COUNT: usize = 50_000; + + struct Elem(usize, Arc>>); + + impl Drop for Elem { + fn drop(&mut self) { + self.1.lock().unwrap().push(self.0); + } + } + + let d = Deque::new(); + + let dropped = Arc::new(Mutex::new(Vec::new())); + let remaining = Arc::new(AtomicUsize::new(COUNT)); + for i in 0..COUNT { + d.push(Elem(i, dropped.clone())); + } + + let threads = (0..8) + .map(|_| { + let s = d.stealer(); + let remaining = remaining.clone(); + + thread::spawn(move || for _ in 0..1000 { + if let Steal::Data(_) = s.steal() { + remaining.fetch_sub(1, SeqCst); + } + }) + }) + .collect::>(); + + for _ in 0..1000 { + if d.pop().is_some() { + remaining.fetch_sub(1, SeqCst); + } + } + + for t in threads { + t.join().unwrap(); + } + + let rem = remaining.load(SeqCst); + assert!(rem > 0); + assert_eq!(d.len(), rem); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), COUNT - rem); + v.clear(); + } + + drop(d); + + { + let mut v = dropped.lock().unwrap(); + assert_eq!(v.len(), rem); + v.sort(); + for pair in v.windows(2) { + assert_eq!(pair[0] + 1, pair[1]); + } + } + } +} diff --git a/third_party/rust/crossbeam-deque/.cargo-checksum.json b/third_party/rust/crossbeam-deque/.cargo-checksum.json index 93cffc883af4..9de0dec46aff 100644 --- a/third_party/rust/crossbeam-deque/.cargo-checksum.json +++ b/third_party/rust/crossbeam-deque/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"7a28ab46755ee3ed2ad3078ecec5f26cf1b95fa122d947edfc1a15bff4849ae8","CHANGELOG.md":"c134cbbcfdf39e86a51337715daca6498d000e019f2d0d5050d04e14e7ef5219","Cargo.toml":"a247839eb4e5a43632eee8727e969a23b4474a6d1b390ea4a19e3e714d8ba060","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"acc366bfcc7262f4719306196e40d59b4e832179adc9cfe2cd27cc710a6787ac","src/lib.rs":"6f50bc16841c93b80d588bbeae9d56b55a2f3a32fe5232fd6e748362b680b4ef"},"package":"f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"} \ No newline at end of file +{"files":{".travis.yml":"98bac4b37c60606a62a0e81a4a882a11f308637d3d946ca395422d9f9274dea1","CHANGELOG.md":"44023168ca8df497a6bf6145965d3eca080744dd0c1bb3f638d907451b9a47df","Cargo.toml":"777ef5e8132243b5096ce9e3f16cfd400d9216b0cf3f02ae3e1ecc0774f78de6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"27ce503b57a65de4b2a3da3bbacc0ade00230495cc5cc63d2fbbb565d999ac64","src/lib.rs":"d4fac3875f95541899fa7cb79bc0d83c706c81d548a60d6c5f1b99ef4ba2b51c"},"package":"fe8153ef04a7594ded05b427ffad46ddeaf22e63fd48d42b3e1e3bb4db07cae7"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-deque/.travis.yml b/third_party/rust/crossbeam-deque/.travis.yml index e6105d31db26..3d3b0efa7872 100644 --- a/third_party/rust/crossbeam-deque/.travis.yml +++ b/third_party/rust/crossbeam-deque/.travis.yml @@ -4,7 +4,7 @@ rust: - stable - beta - nightly - - 1.13.0 + - 1.20.0 script: - cargo build diff --git a/third_party/rust/crossbeam-deque/CHANGELOG.md b/third_party/rust/crossbeam-deque/CHANGELOG.md index 77973d18e45f..12e5ee67d61e 100644 --- a/third_party/rust/crossbeam-deque/CHANGELOG.md +++ b/third_party/rust/crossbeam-deque/CHANGELOG.md @@ -6,7 +6,32 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [Unreleased] +## [0.3.1] - 2018-05-04 + +### Added +- `Deque::capacity` +- `Deque::min_capacity` +- `Deque::shrink_to_fit` + +### Changed +- Update `crossbeam-epoch` to `0.3.0`. +- Support Rust 1.20. +- Shrink the buffer in `Deque::push` if necessary. + +## [0.3.0] - 2018-02-10 + +### Changed +- Update `crossbeam-epoch` to `0.4.0`. +- Drop support for Rust 1.13. + +## [0.2.0] - 2018-02-10 + +### Changed +- Update `crossbeam-epoch` to `0.3.0`. +- Support Rust 1.13. + ## [0.1.1] - 2017-11-29 + ### Changed - Update `crossbeam-epoch` to `0.2.0`. @@ -14,5 +39,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ### Added - First implementation of the Chase-Lev deque. -[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.1...HEAD +[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.1...HEAD +[0.3.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.2.0 [0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1 diff --git a/third_party/rust/crossbeam-deque/Cargo.toml b/third_party/rust/crossbeam-deque/Cargo.toml index cc83b634f488..8bc07a00b797 100644 --- a/third_party/rust/crossbeam-deque/Cargo.toml +++ b/third_party/rust/crossbeam-deque/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "crossbeam-deque" -version = "0.2.0" +version = "0.3.1" authors = ["The Crossbeam Project Developers"] description = "Concurrent work-stealing deque" homepage = "https://github.com/crossbeam-rs/crossbeam-deque" @@ -23,10 +23,10 @@ categories = ["algorithms", "concurrency", "data-structures"] license = "MIT/Apache-2.0" repository = "https://github.com/crossbeam-rs/crossbeam-deque" [dependencies.crossbeam-epoch] -version = "0.3.0" +version = "0.4.0" [dependencies.crossbeam-utils] -version = "0.2.1" +version = "0.3" [dev-dependencies.rand] version = "0.4" [badges.travis-ci] diff --git a/third_party/rust/crossbeam-deque/README.md b/third_party/rust/crossbeam-deque/README.md index 92de94b5b68c..588e19795cd9 100644 --- a/third_party/rust/crossbeam-deque/README.md +++ b/third_party/rust/crossbeam-deque/README.md @@ -11,7 +11,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -crossbeam-deque = "0.1" +crossbeam-deque = "0.3" ``` Next, add this to your crate: @@ -20,6 +20,8 @@ Next, add this to your crate: extern crate crossbeam_deque; ``` +The minimum required Rust version is 1.20. + ## License Licensed under the terms of MIT license and the Apache License (Version 2.0). diff --git a/third_party/rust/crossbeam-deque/src/lib.rs b/third_party/rust/crossbeam-deque/src/lib.rs index 525611945ee8..40df57333fb7 100644 --- a/third_party/rust/crossbeam-deque/src/lib.rs +++ b/third_party/rust/crossbeam-deque/src/lib.rs @@ -85,6 +85,7 @@ extern crate crossbeam_epoch as epoch; extern crate crossbeam_utils as utils; +use std::cmp; use std::fmt; use std::marker::PhantomData; use std::mem; @@ -136,10 +137,7 @@ impl Buffer { let ptr = v.as_mut_ptr(); mem::forget(v); - Buffer { - ptr: ptr, - cap: cap, - } + Buffer { ptr, cap } } /// Returns a pointer to the element at the specified `index`. @@ -342,6 +340,8 @@ impl Deque { /// /// // The minimum capacity will be rounded up to 1024. /// let d = Deque::::with_min_capacity(1000); + /// assert_eq!(d.min_capacity(), 1024); + /// assert_eq!(d.capacity(), 1024); /// ``` pub fn with_min_capacity(min_cap: usize) -> Deque { Deque { @@ -385,6 +385,96 @@ impl Deque { b.wrapping_sub(t) as usize } + /// Returns the minimum capacity of the deque. + /// + /// The minimum capacity can be specified in [`Deque::with_min_capacity`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// // Gets rounded to the next power of two. + /// let d = Deque::::with_min_capacity(50); + /// assert_eq!(d.min_capacity(), 64); + /// assert_eq!(d.capacity(), 64); + /// ``` + /// + /// [`Deque::with_min_capacity`]: struct.Deque.html#method.with_min_capacity + pub fn min_capacity(&self) -> usize { + self.inner.min_cap + } + + /// Returns the number of elements the deque can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// let d = Deque::with_min_capacity(50); + /// assert_eq!(d.capacity(), 64); + /// + /// for i in 0..200 { + /// d.push(i); + /// } + /// assert_eq!(d.capacity(), 256); + /// ``` + pub fn capacity(&self) -> usize { + unsafe { + let buf = self.inner.buffer.load(Relaxed, epoch::unprotected()); + buf.deref().cap + } + } + + /// Shrinks the capacity of the deque as much as possible. + /// + /// The capacity will drop down as close as possible to the length but there may still be some + /// free space left. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_deque::Deque; + /// + /// // Insert a lot of elements. This makes the buffer grow. + /// let d = Deque::new(); + /// for i in 0..200 { + /// d.push(i); + /// } + /// + /// // Remove all elements. + /// let s = d.stealer(); + /// for i in 0..200 { + /// s.steal(); + /// } + /// + /// // Stealers cannot shrink the buffer, so the capacity is still very large. + /// assert!(d.capacity() >= 200); + /// + /// // Shrink the buffer. The capacity drops down, but some free space may still be left. + /// d.shrink_to_fit(); + /// assert!(d.capacity() < 50); + /// ``` + pub fn shrink_to_fit(&self) { + let b = self.inner.bottom.load(Relaxed); + let t = self.inner.top.load(Acquire); + let cap = self.capacity(); + let len = b.wrapping_sub(t); + + // Shrink the capacity as much as possible without overshooting `min_cap` or `len`. + let mut new_cap = cap; + while self.inner.min_cap <= new_cap / 2 && len <= new_cap as isize / 2 { + new_cap /= 2; + } + + if new_cap != cap { + unsafe { + self.inner.resize(new_cap); + } + } + } + /// Pushes an element into the bottom of the deque. /// /// If the internal buffer is full, a new one twice the capacity of the current one will be @@ -411,12 +501,17 @@ impl Deque { // Calculate the length of the deque. let len = b.wrapping_sub(t); - // Is the deque full? let cap = buffer.deref().cap; + // Is the deque full? if len >= cap as isize { // Yes. Grow the underlying buffer. self.inner.resize(2 * cap); buffer = self.inner.buffer.load(Relaxed, epoch::unprotected()); + // Is the new length less than one fourth the capacity? + } else if cap > self.inner.min_cap && len + 1 < cap as isize / 4 { + // Yes. Shrink the underlying buffer. + self.inner.resize(cap / 2); + buffer = self.inner.buffer.load(Relaxed, epoch::unprotected()); } // Write `value` into the right slot and increment `b`. @@ -531,16 +626,14 @@ impl Deque { /// assert_eq!(d.steal(), Steal::Data(1)); /// /// // Attempt to steal an element, but keep retrying if we get `Retry`. - /// loop { + /// let stolen = loop { /// match d.steal() { - /// Steal::Empty => panic!("should steal something"), - /// Steal::Data(data) => { - /// assert_eq!(data, 2); - /// break; - /// } + /// Steal::Empty => break None, + /// Steal::Data(data) => break Some(data), /// Steal::Retry => {} /// } - /// } + /// }; + /// assert_eq!(stolen, Some(2)); /// ``` /// /// [`Steal::Retry`]: enum.Steal.html#variant.Retry @@ -669,7 +762,7 @@ impl Stealer { let t = self.inner.top.load(Relaxed); atomic::fence(SeqCst); let b = self.inner.bottom.load(Relaxed); - std::cmp::max(b.wrapping_sub(t), 0) as usize + cmp::max(b.wrapping_sub(t), 0) as usize } /// Steals an element from the top of the deque. @@ -691,16 +784,14 @@ impl Stealer { /// d.push(2); /// /// // Attempt to steal an element, but keep retrying if we get `Retry`. - /// loop { - /// match d.steal() { - /// Steal::Empty => panic!("should steal something"), - /// Steal::Data(data) => { - /// assert_eq!(data, 1); - /// break; - /// } + /// let stolen = loop { + /// match s.steal() { + /// Steal::Empty => break None, + /// Steal::Data(data) => break Some(data), /// Steal::Retry => {} /// } - /// } + /// }; + /// assert_eq!(stolen, Some(1)); /// ``` /// /// [`Steal::Retry`]: enum.Steal.html#variant.Retry diff --git a/third_party/rust/crossbeam-epoch-0.3.1/.cargo-checksum.json b/third_party/rust/crossbeam-epoch-0.3.1/.cargo-checksum.json new file mode 100644 index 000000000000..771a5f53e3b1 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"d84605e26d95fabc8172af7a621d3e48117b5180d389c6a166d15acb09c9ed9f","CHANGELOG.md":"5e62172f395348eb92a3fd2532ba5d65a7f13286449a3698b41f3aac7a9a4e57","Cargo.toml":"6bcfcac3b6b20026d1020890fcd8cd5f6ceff33741b92fea001993696e2aed17","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8728114db9ab19bca8e07b36f1cccd1e6a57db6ea03be08679aef2a982736532","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"41b2d03e2cfd46912a3722295843b841e74e10eae6eb23586d3bc3b6d0a41e32","src/atomic.rs":"469ae38d3e8b37eec79c1c21a29a63cd357e49f34f4b6cdde6817f8e1267bd8d","src/collector.rs":"ebebbf1229a0d5339b938825d0dca9dc8642f9fa5bbceafb4e371477186ed4b4","src/default.rs":"804c217df80e0b6df3c6e90c5d6f5153c153567ac28cc75cc62042ba75d24bf2","src/deferred.rs":"1bd6c66c58f92714088b6f9f811368a123143a5f03cf4afc4b19ab24f3181387","src/epoch.rs":"25b85734a4ec5bedb0384a1fe976ec97056a88910a046a270a3e38558f7dbd4b","src/garbage.rs":"b77a8f87701dca8b63d858bb234137335455b6fc1f223e73c7609542d13daa43","src/guard.rs":"08975d989ba558aba90d64865594b155b2135e628414f77bb8afb9de427a2e0d","src/internal.rs":"a5a6a52999ce99294d544ac7cb82cb820e78f0c41315fc8d7494d21ca6da1135","src/lib.rs":"f3093bc3411f2bd94d662c3cf8719411b62793449b3db1699865f4c08c207af1","src/sync/list.rs":"57c3674c40e30eaf92689ab0e09973d7d161e52a5bdb5b5481b62fd0d10fb4eb","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"868b5bd651e54216fa1827d668ab564c120779113ae7a2a056fee4371db1066c"},"package":"927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-epoch-0.3.1/.travis.yml b/third_party/rust/crossbeam-epoch-0.3.1/.travis.yml new file mode 100644 index 000000000000..2cfadc310fcc --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/.travis.yml @@ -0,0 +1,64 @@ +language: rust + +rust: + - stable + - beta + - nightly + - 1.13.0 + +addons: + apt: + sources: + - ubuntu-toolchain-r-test + - llvm-toolchain-precise + - llvm-toolchain-precise-3.8 + packages: + - llvm-3.8 + - llvm-3.8-dev + - clang-3.8 + - clang-3.8-dev + +script: + - cargo build + - cargo build --release + - cargo build --no-default-features + - cargo build --release --no-default-features + - cargo test + - cargo test --release + + - | + if [ $TRAVIS_RUST_VERSION == nightly ]; then + cargo build --features nightly --no-default-features + cargo build --features nightly --release --no-default-features + fi + + - | + if [ $TRAVIS_RUST_VERSION == nightly ]; then + cargo test --features nightly + fi + + - | + if [[ $TRAVIS_RUST_VERSION == nightly ]]; then + cargo test --features nightly --release + fi + + - | + if [[ $TRAVIS_RUST_VERSION == nightly ]]; then + ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \ + RUSTFLAGS="-Z sanitizer=address" \ + cargo run \ + --target x86_64-unknown-linux-gnu \ + --features sanitize,nightly \ + --example sanitize + fi + + - | + if [[ $TRAVIS_RUST_VERSION == nightly ]]; then + ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \ + RUSTFLAGS="-Z sanitizer=address" \ + cargo run \ + --release \ + --target x86_64-unknown-linux-gnu \ + --features sanitize,nightly \ + --example sanitize + fi diff --git a/third_party/rust/crossbeam-epoch-0.3.1/CHANGELOG.md b/third_party/rust/crossbeam-epoch-0.3.1/CHANGELOG.md new file mode 100644 index 000000000000..8554dd44481b --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/CHANGELOG.md @@ -0,0 +1,26 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.2.0] - 2017-11-29 +### Added +- Add method `Owned::into_box`. + +### Changed +- Fix a use-after-free bug in `Local::finalize`. +- Fix an ordering bug in `Global::push_bag`. +- Fix a bug in calculating distance between epochs. + +### Removed +- Remove `impl Into> for Owned`. + +## 0.1.0 - 2017-11-26 +### Added +- First version of the new epoch-based GC. + +[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...HEAD +[0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0 diff --git a/third_party/rust/crossbeam-epoch-0.3.1/Cargo.toml b/third_party/rust/crossbeam-epoch-0.3.1/Cargo.toml new file mode 100644 index 000000000000..b679defe5a36 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/Cargo.toml @@ -0,0 +1,57 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "crossbeam-epoch" +version = "0.3.1" +authors = ["The Crossbeam Project Developers"] +description = "Epoch-based garbage collection" +homepage = "https://github.com/crossbeam-rs/crossbeam-epoch" +documentation = "https://docs.rs/crossbeam-epoch" +readme = "README.md" +keywords = ["lock-free", "rcu", "atomic", "garbage"] +categories = ["concurrency", "memory-management"] +license = "MIT/Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam-epoch" +[dependencies.arrayvec] +version = "0.4" +default-features = false + +[dependencies.cfg-if] +version = "0.1" + +[dependencies.crossbeam-utils] +version = "0.2" +default-features = false + +[dependencies.lazy_static] +version = "1.0.0" +optional = true + +[dependencies.memoffset] +version = "0.2" + +[dependencies.nodrop] +version = "0.1.12" +default-features = false + +[dependencies.scopeguard] +version = "0.3" +default-features = false +[dev-dependencies.rand] +version = "0.3" + +[features] +default = ["use_std"] +nightly = ["arrayvec/use_union"] +sanitize = [] +use_std = ["lazy_static", "crossbeam-utils/use_std"] diff --git a/third_party/rust/tokio-io/LICENSE-APACHE b/third_party/rust/crossbeam-epoch-0.3.1/LICENSE-APACHE similarity index 100% rename from third_party/rust/tokio-io/LICENSE-APACHE rename to third_party/rust/crossbeam-epoch-0.3.1/LICENSE-APACHE diff --git a/third_party/rust/crossbeam-epoch-0.3.1/LICENSE-MIT b/third_party/rust/crossbeam-epoch-0.3.1/LICENSE-MIT new file mode 100644 index 000000000000..25597d5838fa --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/crossbeam-epoch-0.3.1/README.md b/third_party/rust/crossbeam-epoch-0.3.1/README.md new file mode 100644 index 000000000000..70ef3956ca77 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/README.md @@ -0,0 +1,33 @@ +# Epoch-based garbage collection + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-epoch.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-epoch) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-epoch) +[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](https://crates.io/crates/crossbeam-epoch) +[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](https://docs.rs/crossbeam-epoch) + +This crate provides epoch-based garbage collection for use in concurrent data structures. + +If a thread removes a node from a concurrent data structure, other threads +may still have pointers to that node, so it cannot be immediately destructed. +Epoch GC allows deferring destruction until it becomes safe to do so. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-epoch = "0.2" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_epoch as epoch; +``` + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/third_party/rust/crossbeam-epoch-0.3.1/benches/defer.rs b/third_party/rust/crossbeam-epoch-0.3.1/benches/defer.rs new file mode 100644 index 000000000000..5eb2167ea4f7 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/benches/defer.rs @@ -0,0 +1,73 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use epoch::Owned; +use test::Bencher; +use utils::scoped::scope; + +#[bench] +fn single_alloc_defer_free(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer(move || p.into_owned()); + } + }); +} + +#[bench] +fn single_defer(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + unsafe { + guard.defer(move || ()); + } + }); +} + +#[bench] +fn multi_alloc_defer_free(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer(move || p.into_owned()); + } + } + }); + } + }); + }); +} + +#[bench] +fn multi_defer(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + unsafe { + guard.defer(move || ()); + } + } + }); + } + }); + }); +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/benches/flush.rs b/third_party/rust/crossbeam-epoch-0.3.1/benches/flush.rs new file mode 100644 index 000000000000..5023c1105ccb --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/benches/flush.rs @@ -0,0 +1,51 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use std::sync::Barrier; + +use test::Bencher; +use utils::scoped::scope; + +#[bench] +fn single_flush(b: &mut Bencher) { + const THREADS: usize = 16; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + epoch::pin(); + start.wait(); + end.wait(); + }); + } + + start.wait(); + b.iter(|| epoch::pin().flush()); + end.wait(); + }); +} + +#[bench] +fn multi_flush(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + guard.flush(); + } + }); + } + }); + }); +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/benches/pin.rs b/third_party/rust/crossbeam-epoch-0.3.1/benches/pin.rs new file mode 100644 index 000000000000..676328bfaae4 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/benches/pin.rs @@ -0,0 +1,36 @@ +#![feature(test)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate test; + +use test::Bencher; +use utils::scoped::scope; + +#[bench] +fn single_pin(b: &mut Bencher) { + b.iter(|| epoch::pin()); +} + +#[bench] +fn single_default_handle_pin(b: &mut Bencher) { + b.iter(|| epoch::default_handle().pin()); +} + +#[bench] +fn multi_pin(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 100_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|| { + for _ in 0..STEPS { + epoch::pin(); + } + }); + } + }); + }); +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/examples/sanitize.rs b/third_party/rust/crossbeam-epoch-0.3.1/examples/sanitize.rs new file mode 100644 index 000000000000..7635ae881fc2 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/examples/sanitize.rs @@ -0,0 +1,70 @@ +extern crate crossbeam_epoch as epoch; +extern crate rand; + +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; +use std::time::{Duration, Instant}; +use std::thread; + +use epoch::{Atomic, Collector, Handle, Owned, Shared}; +use rand::Rng; + +fn worker(a: Arc>, handle: Handle) -> usize { + let mut rng = rand::thread_rng(); + let mut sum = 0; + + if rng.gen() { + thread::sleep(Duration::from_millis(1)); + } + let timeout = Duration::from_millis(rng.gen_range(0, 10)); + let now = Instant::now(); + + while now.elapsed() < timeout { + for _ in 0..100 { + let guard = &handle.pin(); + guard.flush(); + + let val = if rng.gen() { + let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard); + unsafe { + guard.defer(move || p.into_owned()); + guard.flush(); + p.deref().load(Relaxed) + } + } else { + let p = a.load(Acquire, guard); + unsafe { + p.deref().fetch_add(sum, Relaxed) + } + }; + + sum = sum.wrapping_add(val); + } + } + + sum +} + +fn main() { + for _ in 0..100 { + let collector = Collector::new(); + let a = Arc::new(Atomic::new(AtomicUsize::new(777))); + + let threads = (0..16) + .map(|_| { + let a = a.clone(); + let h = collector.handle(); + thread::spawn(move || worker(a, h)) + }) + .collect::>(); + + for t in threads { + t.join().unwrap(); + } + + unsafe { + a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned(); + } + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/atomic.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/atomic.rs new file mode 100644 index 000000000000..9023bd7ab37c --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/atomic.rs @@ -0,0 +1,1097 @@ +use core::borrow::{Borrow, BorrowMut}; +use core::cmp; +use core::fmt; +use core::marker::PhantomData; +use core::mem; +use core::ptr; +use core::ops::{Deref, DerefMut}; +use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use core::sync::atomic::Ordering; +use alloc::boxed::Box; + +use guard::Guard; + +/// Given ordering for the success case in a compare-exchange operation, returns the strongest +/// appropriate ordering for the failure case. +#[inline] +fn strongest_failure_ordering(ord: Ordering) -> Ordering { + match ord { + Ordering::Relaxed | Ordering::Release => Ordering::Relaxed, + Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire, + _ => Ordering::SeqCst, + } +} + +/// The error returned on failed compare-and-set operation. +pub struct CompareAndSetError<'g, T: 'g, P: Pointer> { + /// The value in the atomic pointer at the time of the failed operation. + pub current: Shared<'g, T>, + + /// The new value, which the operation failed to store. + pub new: P, +} + +impl<'g, T: 'g, P: Pointer + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("CompareAndSetError") + .field("current", &self.current) + .field("new", &self.new) + .finish() + } +} + +/// Memory orderings for compare-and-set operations. +/// +/// A compare-and-set operation can have different memory orderings depending on whether it +/// succeeds or fails. This trait generalizes different ways of specifying memory orderings. +/// +/// The two ways of specifying orderings for compare-and-set are: +/// +/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate +/// ordering is chosen. +/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is +/// for the failure case. +pub trait CompareAndSetOrdering { + /// The ordering of the operation when it succeeds. + fn success(&self) -> Ordering; + + /// The ordering of the operation when it fails. + /// + /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than + /// the success ordering. + fn failure(&self) -> Ordering; +} + +impl CompareAndSetOrdering for Ordering { + #[inline] + fn success(&self) -> Ordering { + *self + } + + #[inline] + fn failure(&self) -> Ordering { + strongest_failure_ordering(*self) + } +} + +impl CompareAndSetOrdering for (Ordering, Ordering) { + #[inline] + fn success(&self) -> Ordering { + self.0 + } + + #[inline] + fn failure(&self) -> Ordering { + self.1 + } +} + +/// Panics if the pointer is not properly unaligned. +#[inline] +fn ensure_aligned(raw: *const T) { + assert_eq!(raw as usize & low_bits::(), 0, "unaligned pointer"); +} + +/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. +#[inline] +fn low_bits() -> usize { + (1 << mem::align_of::().trailing_zeros()) - 1 +} + +/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. +/// +/// `tag` is truncated to fit into the unused bits of the pointer to `T`. +#[inline] +fn data_with_tag(data: usize, tag: usize) -> usize { + (data & !low_bits::()) | (tag & low_bits::()) +} + +/// Decomposes a tagged pointer `data` into the pointer and the tag. +#[inline] +fn decompose_data(data: usize) -> (*mut T, usize) { + let raw = (data & !low_bits::()) as *mut T; + let tag = data & low_bits::(); + (raw, tag) +} + +/// An atomic pointer that can be safely shared between threads. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. More precisely, a tag should be less than `(1 << +/// mem::align_of::().trailing_zeros())`. +/// +/// Any method that loads the pointer must be passed a reference to a [`Guard`]. +/// +/// [`Guard`]: struct.Guard.html +pub struct Atomic { + data: AtomicUsize, + _marker: PhantomData<*mut T>, +} + +unsafe impl Send for Atomic {} +unsafe impl Sync for Atomic {} + +impl Atomic { + /// Returns a new atomic pointer pointing to the tagged pointer `data`. + fn from_data(data: usize) -> Atomic { + Atomic { + data: AtomicUsize::new(data), + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::null(); + /// ``` + #[cfg(not(feature = "nightly"))] + pub fn null() -> Atomic { + Atomic { + data: ATOMIC_USIZE_INIT, + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::null(); + /// ``` + #[cfg(feature = "nightly")] + pub const fn null() -> Atomic { + Self { + data: ATOMIC_USIZE_INIT, + _marker: PhantomData, + } + } + + /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::new(1234); + /// ``` + pub fn new(value: T) -> Atomic { + Self::from(Owned::new(value)) + } + + /// Loads a `Shared` from the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// ``` + pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.load(ord)) } + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// a.store(Shared::null(), SeqCst); + /// a.store(Owned::new(1234), SeqCst); + /// ``` + pub fn store<'g, P: Pointer>(&self, new: P, ord: Ordering) { + self.data.store(new.into_data(), ord); + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous + /// `Shared`. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.swap(Shared::null(), SeqCst, guard); + /// ``` + pub fn swap<'g, P: Pointer>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) } + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// The return value is a result indicating whether the new pointer was written. On success the + /// pointer that was written is returned. On failure the actual current value and `new` are + /// returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// + /// let guard = &epoch::pin(); + /// let mut curr = a.load(SeqCst, guard); + /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); + /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); + /// ``` + pub fn compare_and_set<'g, O, P>( + &self, + current: Shared, + new: P, + ord: O, + _: &'g Guard, + ) -> Result, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer, + { + let new = new.into_data(); + self.data + .compare_exchange(current.into_data(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_data(new) }) + .map_err(|current| unsafe { + CompareAndSetError { + current: Shared::from_data(current), + new: P::from_data(new), + } + }) + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison + /// succeeds, which can result in more efficient code on some platforms. The return value is a + /// result indicating whether the new pointer was written. On success the pointer that was + /// written is returned. On failure the actual current value and `new` are returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set + /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// + /// let mut new = Owned::new(5678); + /// let mut ptr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { + /// Ok(p) => { + /// ptr = p; + /// break; + /// } + /// Err(err) => { + /// ptr = err.current; + /// new = err.new; + /// } + /// } + /// } + /// + /// let mut curr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { + /// Ok(_) => break, + /// Err(err) => curr = err.current, + /// } + /// } + /// ``` + pub fn compare_and_set_weak<'g, O, P>( + &self, + current: Shared, + new: P, + ord: O, + _: &'g Guard, + ) -> Result, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer, + { + let new = new.into_data(); + self.data + .compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_data(new) }) + .map_err(|current| unsafe { + CompareAndSetError { + current: Shared::from_data(current), + new: P::from_data(new), + } + }) + } + + /// Bitwise "and" with the current tag. + /// + /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(3)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::(), ord)) } + } + + /// Bitwise "or" with the current tag. + /// + /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 3); + /// ``` + pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::(), ord)) } + } + + /// Bitwise "xor" with the current tag. + /// + /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::(), ord)) } + } +} + +impl fmt::Debug for Atomic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, tag) = decompose_data::(data); + + f.debug_struct("Atomic") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl fmt::Pointer for Atomic { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, _) = decompose_data::(data); + fmt::Pointer::fmt(&raw, f) + } +} + +impl Clone for Atomic { + /// Returns a copy of the atomic value. + /// + /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other + /// atomics or fences. + fn clone(&self) -> Self { + let data = self.data.load(Ordering::Relaxed); + Atomic::from_data(data) + } +} + +impl Default for Atomic { + fn default() -> Self { + Atomic::null() + } +} + +impl From> for Atomic { + /// Returns a new atomic pointer pointing to `owned`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Owned}; + /// + /// let a = Atomic::::from(Owned::new(1234)); + /// ``` + fn from(owned: Owned) -> Self { + let data = owned.data; + mem::forget(owned); + Self::from_data(data) + } +} + +impl From> for Atomic { + fn from(b: Box) -> Self { + Self::from(Owned::from(b)) + } +} + +impl From for Atomic { + fn from(t: T) -> Self { + Self::new(t) + } +} + +impl<'g, T> From> for Atomic { + /// Returns a new atomic pointer pointing to `ptr`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Shared}; + /// + /// let a = Atomic::::from(Shared::::null()); + /// ``` + fn from(ptr: Shared<'g, T>) -> Self { + Self::from_data(ptr.data) + } +} + +impl From<*const T> for Atomic { + /// Returns a new atomic pointer pointing to `raw`. + /// + /// # Examples + /// + /// ``` + /// use std::ptr; + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::from(ptr::null::()); + /// ``` + fn from(raw: *const T) -> Self { + Self::from_data(raw as usize) + } +} + +/// A trait for either `Owned` or `Shared` pointers. +pub trait Pointer { + /// Returns the machine representation of the pointer. + fn into_data(self) -> usize; + + /// Returns a new pointer pointing to the tagged pointer `data`. + unsafe fn from_data(data: usize) -> Self; +} + +/// An owned heap-allocated object. +/// +/// This type is very similar to `Box`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Owned { + data: usize, + _marker: PhantomData>, +} + +impl Pointer for Owned { + #[inline] + fn into_data(self) -> usize { + let data = self.data; + mem::forget(self); + data + } + + /// Returns a new pointer pointing to the tagged pointer `data`. + /// + /// # Panics + /// + /// Panics if the data is zero in debug mode. + #[inline] + unsafe fn from_data(data: usize) -> Self { + debug_assert!(data != 0, "converting zero into `Owned`"); + Owned { + data: data, + _marker: PhantomData, + } + } +} + +impl Owned { + /// Allocates `value` on the heap and returns a new owned pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(1234); + /// ``` + pub fn new(value: T) -> Owned { + Self::from(Box::new(value)) + } + + /// Returns a new owned pointer pointing to `raw`. + /// + /// This function is unsafe because improper use may lead to memory problems. Argument `raw` + /// must be a valid pointer. Also, a double-free may occur if the function is called twice on + /// the same raw pointer. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + pub unsafe fn from_raw(raw: *mut T) -> Owned { + ensure_aligned(raw); + Self::from_data(raw as usize) + } + + /// Converts the owned pointer into a [`Shared`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Owned}; + /// + /// let o = Owned::new(1234); + /// let guard = &epoch::pin(); + /// let p = o.into_shared(guard); + /// ``` + /// + /// [`Shared`]: struct.Shared.html + pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.into_data()) } + } + + /// Converts the owned pointer into a `Box`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Owned}; + /// + /// let o = Owned::new(1234); + /// let b: Box = o.into_box(); + /// assert_eq!(*b, 1234); + /// ``` + pub fn into_box(self) -> Box { + let (raw, _) = decompose_data::(self.data); + mem::forget(self); + unsafe { Box::from_raw(raw) } + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// assert_eq!(Owned::new(1234).tag(), 0); + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_data::(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(0u64); + /// assert_eq!(o.tag(), 0); + /// let o = o.with_tag(5); + /// assert_eq!(o.tag(), 5); + /// ``` + pub fn with_tag(self, tag: usize) -> Owned { + let data = self.into_data(); + unsafe { Self::from_data(data_with_tag::(data, tag)) } + } +} + +impl Drop for Owned { + fn drop(&mut self) { + let (raw, _) = decompose_data::(self.data); + unsafe { + drop(Box::from_raw(raw)); + } + } +} + +impl fmt::Debug for Owned { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (raw, tag) = decompose_data::(self.data); + + f.debug_struct("Owned") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl Clone for Owned { + fn clone(&self) -> Self { + Owned::new((**self).clone()).with_tag(self.tag()) + } +} + +impl Deref for Owned { + type Target = T; + + fn deref(&self) -> &T { + let (raw, _) = decompose_data::(self.data); + unsafe { &*raw } + } +} + +impl DerefMut for Owned { + fn deref_mut(&mut self) -> &mut T { + let (raw, _) = decompose_data::(self.data); + unsafe { &mut *raw } + } +} + +impl From for Owned { + fn from(t: T) -> Self { + Owned::new(t) + } +} + +impl From> for Owned { + /// Returns a new owned pointer pointing to `b`. + /// + /// # Panics + /// + /// Panics if the pointer (the `Box`) is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + fn from(b: Box) -> Self { + unsafe { Self::from_raw(Box::into_raw(b)) } + } +} + +impl Borrow for Owned { + fn borrow(&self) -> &T { + &**self + } +} + +impl BorrowMut for Owned { + fn borrow_mut(&mut self) -> &mut T { + &mut **self + } +} + +impl AsRef for Owned { + fn as_ref(&self) -> &T { + &**self + } +} + +impl AsMut for Owned { + fn as_mut(&mut self) -> &mut T { + &mut **self + } +} + +/// A pointer to an object protected by the epoch GC. +/// +/// The pointer is valid for use only during the lifetime `'g`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Shared<'g, T: 'g> { + data: usize, + _marker: PhantomData<(&'g (), *const T)>, +} + +impl<'g, T> Clone for Shared<'g, T> { + fn clone(&self) -> Self { + Shared { + data: self.data, + _marker: PhantomData, + } + } +} + +impl<'g, T> Copy for Shared<'g, T> {} + +impl<'g, T> Pointer for Shared<'g, T> { + #[inline] + fn into_data(self) -> usize { + self.data + } + + #[inline] + unsafe fn from_data(data: usize) -> Self { + Shared { + data: data, + _marker: PhantomData, + } + } +} + +impl<'g, T> Shared<'g, T> { + /// Returns a new null pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = Shared::::null(); + /// assert!(p.is_null()); + /// ``` + pub fn null() -> Shared<'g, T> { + Shared { + data: 0, + _marker: PhantomData, + } + } + + /// Returns `true` if the pointer is null. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::null(); + /// let guard = &epoch::pin(); + /// assert!(a.load(SeqCst, guard).is_null()); + /// a.store(Owned::new(1234), SeqCst); + /// assert!(!a.load(SeqCst, guard).is_null()); + /// ``` + pub fn is_null(&self) -> bool { + self.as_raw().is_null() + } + + /// Converts the pointer to a raw pointer (without the tag). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let o = Owned::new(1234); + /// let raw = &*o as *const _; + /// let a = Atomic::from(o); + /// + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.as_raw(), raw); + /// ``` + pub fn as_raw(&self) -> *const T { + let (raw, _) = decompose_data::(self.data); + raw + } + + /// Dereferences the pointer. + /// + /// Returns a reference to the pointee that is valid during the lifetime `'g`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.deref(), &1234); + /// } + /// ``` + pub unsafe fn deref(&self) -> &'g T { + &*self.as_raw() + } + + /// Converts the pointer to a reference. + /// + /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.as_ref(), Some(&1234)); + /// } + /// ``` + pub unsafe fn as_ref(&self) -> Option<&'g T> { + self.as_raw().as_ref() + } + + /// Takes ownership of the pointee. + /// + /// # Panics + /// + /// Panics if this pointer is null, but only in debug mode. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// unsafe { + /// let guard = &epoch::unprotected(); + /// let p = a.load(SeqCst, guard); + /// drop(p.into_owned()); + /// } + /// ``` + pub unsafe fn into_owned(self) -> Owned { + debug_assert!( + self.as_raw() != ptr::null(), + "converting a null `Shared` into `Owned`" + ); + Owned::from_data(self.data) + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::::from(Owned::new(0u64).with_tag(5)); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.tag(), 5); + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_data::(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(0u64); + /// let guard = &epoch::pin(); + /// let p1 = a.load(SeqCst, guard); + /// let p2 = p1.with_tag(5); + /// + /// assert_eq!(p1.tag(), 0); + /// assert_eq!(p2.tag(), 5); + /// assert_eq!(p1.as_raw(), p2.as_raw()); + /// ``` + pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { + unsafe { Self::from_data(data_with_tag::(self.data, tag)) } + } +} + +impl<'g, T> From<*const T> for Shared<'g, T> { + /// Returns a new pointer pointing to `raw`. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) }; + /// assert!(!p.is_null()); + /// ``` + fn from(raw: *const T) -> Self { + ensure_aligned(raw); + unsafe { Self::from_data(raw as usize) } + } +} + +impl<'g, T> PartialEq> for Shared<'g, T> { + fn eq(&self, other: &Self) -> bool { + self.data == other.data + } +} + +impl<'g, T> Eq for Shared<'g, T> {} + +impl<'g, T> PartialOrd> for Shared<'g, T> { + fn partial_cmp(&self, other: &Self) -> Option { + self.data.partial_cmp(&other.data) + } +} + +impl<'g, T> Ord for Shared<'g, T> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.data.cmp(&other.data) + } +} + +impl<'g, T> fmt::Debug for Shared<'g, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (raw, tag) = decompose_data::(self.data); + + f.debug_struct("Shared") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl<'g, T> fmt::Pointer for Shared<'g, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.as_raw(), f) + } +} + +impl<'g, T> Default for Shared<'g, T> { + fn default() -> Self { + Shared::null() + } +} + +#[cfg(test)] +mod tests { + use super::Shared; + + #[test] + fn valid_tag_i8() { + Shared::::null().with_tag(0); + } + + #[test] + fn valid_tag_i64() { + Shared::::null().with_tag(7); + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/collector.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/collector.rs new file mode 100644 index 000000000000..2dedfc5fc8f9 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/collector.rs @@ -0,0 +1,426 @@ +/// Epoch-based garbage collector. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::Collector; +/// +/// let collector = Collector::new(); +/// +/// let handle = collector.handle(); +/// drop(collector); // `handle` still works after dropping `collector` +/// +/// handle.pin().flush(); +/// ``` + +use alloc::arc::Arc; + +use internal::{Global, Local}; +use guard::Guard; + +/// An epoch-based garbage collector. +pub struct Collector { + global: Arc, +} + +unsafe impl Send for Collector {} +unsafe impl Sync for Collector {} + +impl Collector { + /// Creates a new collector. + pub fn new() -> Self { + Collector { global: Arc::new(Global::new()) } + } + + /// Creates a new handle for the collector. + pub fn handle(&self) -> Handle { + Handle { local: Local::register(&self.global) } + } +} + +impl Clone for Collector { + /// Creates another reference to the same garbage collector. + fn clone(&self) -> Self { + Collector { global: self.global.clone() } + } +} + +/// A handle to a garbage collector. +pub struct Handle { + local: *const Local, +} + +impl Handle { + /// Pins the handle. + #[inline] + pub fn pin(&self) -> Guard { + unsafe { (*self.local).pin() } + } + + /// Returns `true` if the handle is pinned. + #[inline] + pub fn is_pinned(&self) -> bool { + unsafe { (*self.local).is_pinned() } + } +} + +unsafe impl Send for Handle {} + +impl Drop for Handle { + #[inline] + fn drop(&mut self) { + unsafe { + Local::release_handle(&*self.local); + } + } +} + +impl Clone for Handle { + #[inline] + fn clone(&self) -> Self { + unsafe { + Local::acquire_handle(&*self.local); + } + Handle { local: self.local } + } +} + +#[cfg(test)] +mod tests { + use std::mem; + use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + use std::sync::atomic::Ordering; + + use crossbeam_utils::scoped; + + use {Collector, Owned}; + + const NUM_THREADS: usize = 8; + + #[test] + fn pin_reentrant() { + let collector = Collector::new(); + let handle = collector.handle(); + drop(collector); + + assert!(!handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + } + assert!(handle.is_pinned()); + } + assert!(!handle.is_pinned()); + } + + #[test] + fn flush_local_bag() { + let collector = Collector::new(); + let handle = collector.handle(); + drop(collector); + + for _ in 0..100 { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(7).into_shared(guard); + guard.defer(move || a.into_owned()); + + assert!(!(*guard.get_local()).is_bag_empty()); + + while !(*guard.get_local()).is_bag_empty() { + guard.flush(); + } + } + } + } + + #[test] + fn garbage_buffering() { + let collector = Collector::new(); + let handle = collector.handle(); + drop(collector); + + let guard = &handle.pin(); + unsafe { + for _ in 0..10 { + let a = Owned::new(7).into_shared(guard); + guard.defer(move || a.into_owned()); + } + assert!(!(*guard.get_local()).is_bag_empty()); + } + } + + #[test] + fn pin_holds_advance() { + let collector = Collector::new(); + + let threads = (0..NUM_THREADS) + .map(|_| { + scoped::scope(|scope| { + scope.spawn(|| { + let handle = collector.handle(); + for _ in 0..500_000 { + let guard = &handle.pin(); + + let before = collector.global.load_epoch(Ordering::Relaxed); + collector.global.collect(guard); + let after = collector.global.load_epoch(Ordering::Relaxed); + + assert!(after.wrapping_sub(before) <= 2); + } + }) + }) + }) + .collect::>(); + drop(collector); + + for t in threads { + t.join(); + } + } + + #[test] + fn incremental() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.handle(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + let mut last = 0; + + while last < COUNT { + let curr = DESTROYS.load(Ordering::Relaxed); + assert!(curr - last <= 1024); + last = curr; + + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert!(DESTROYS.load(Ordering::Relaxed) == 100_000); + } + + #[test] + fn buffering() { + const COUNT: usize = 10; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.handle(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + } + + for _ in 0..100_000 { + collector.global.collect(&handle.pin()); + } + assert!(DESTROYS.load(Ordering::Relaxed) < COUNT); + + handle.pin().flush(); + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_drops() { + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.handle(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer(move || a.into_owned()); + } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_destroy() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.handle(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn drop_array() { + const COUNT: usize = 700; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.handle(); + + let mut guard = handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(Elem(i as i32)); + } + + { + let a = Owned::new(v).into_shared(&guard); + unsafe { guard.defer(move || a.into_owned()); } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + guard.repin(); + collector.global.collect(&guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn destroy_array() { + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; + + let collector = Collector::new(); + let handle = collector.handle(); + + unsafe { + let guard = &handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(i as i32); + } + + let ptr = v.as_mut_ptr() as usize; + let len = v.len(); + guard.defer(move || { + drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len)); + DESTROYS.fetch_add(len, Ordering::Relaxed); + }); + guard.flush(); + + mem::forget(v); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn stress() { + const THREADS: usize = 8; + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + + let threads = (0..THREADS) + .map(|_| { + scoped::scope(|scope| { + scope.spawn(|| { + let handle = collector.handle(); + for _ in 0..COUNT { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer(move || a.into_owned()); + } + } + }) + }) + }) + .collect::>(); + + for t in threads { + t.join(); + } + + let handle = collector.handle(); + while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS); + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/default.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/default.rs new file mode 100644 index 000000000000..e45e253d77ac --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/default.rs @@ -0,0 +1,40 @@ +//! The default garbage collector. +//! +//! For each thread, a participant is lazily initialized on its first use, when the current thread +//! is registered in the default collector. If initialized, the thread's participant will get +//! destructed on thread exit, which in turn unregisters the thread. + +use collector::{Collector, Handle}; +use guard::Guard; + +lazy_static! { + /// The global data for the default garbage collector. + static ref COLLECTOR: Collector = Collector::new(); +} + +thread_local! { + /// The per-thread participant for the default garbage collector. + static HANDLE: Handle = COLLECTOR.handle(); +} + +/// Pins the current thread. +#[inline] +pub fn pin() -> Guard { + // FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For + // that case, we should use `HANDLE.try_with()` instead. + HANDLE.with(|handle| handle.pin()) +} + +/// Returns `true` if the current thread is pinned. +#[inline] +pub fn is_pinned() -> bool { + // FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For + // that case, we should use `HANDLE.try_with()` instead. + HANDLE.with(|handle| handle.is_pinned()) +} + +/// Returns the default handle associated with the current thread. +#[inline] +pub fn default_handle() -> Handle { + HANDLE.with(|handle| handle.clone()) +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/deferred.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/deferred.rs new file mode 100644 index 000000000000..6634a2ac4bb7 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/deferred.rs @@ -0,0 +1,147 @@ +use core::mem; +use core::ptr; +use alloc::boxed::Box; + +/// Number of words a piece of `Data` can hold. +/// +/// Three words should be enough for the majority of cases. For example, you can fit inside it the +/// function pointer together with a fat pointer representing an object that needs to be destroyed. +const DATA_WORDS: usize = 3; + +/// Some space to keep a `FnOnce()` object on the stack. +type Data = [usize; DATA_WORDS]; + +/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. +/// +/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. +pub struct Deferred { + call: unsafe fn(*mut u8), + data: Data, +} + +impl Deferred { + /// Constructs a new `Deferred` from a `FnOnce()`. + pub fn new(f: F) -> Self { + let size = mem::size_of::(); + let align = mem::align_of::(); + + unsafe { + if size <= mem::size_of::() && align <= mem::align_of::() { + let mut data: Data = mem::uninitialized(); + ptr::write(&mut data as *mut Data as *mut F, f); + + unsafe fn call(raw: *mut u8) { + let f: F = ptr::read(raw as *mut F); + f(); + } + + Deferred { + call: call::, + data: data, + } + } else { + let b: Box = Box::new(f); + let mut data: Data = mem::uninitialized(); + ptr::write(&mut data as *mut Data as *mut Box, b); + + unsafe fn call(raw: *mut u8) { + let b: Box = ptr::read(raw as *mut Box); + (*b)(); + } + + Deferred { + call: call::, + data: data, + } + } + } + } + + /// Calls the function or panics if it was already called. + #[inline] + pub fn call(&mut self) { + unsafe fn fail(_: *mut u8) { + panic!("cannot call `FnOnce` more than once"); + } + + let call = mem::replace(&mut self.call, fail); + unsafe { + call(&mut self.data as *mut Data as *mut u8); + } + } +} + +#[cfg(test)] +mod tests { + use std::cell::Cell; + use super::Deferred; + + #[test] + fn on_stack() { + let fired = &Cell::new(false); + let a = [0usize; 1]; + + let mut d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + fn on_heap() { + let fired = &Cell::new(false); + let a = [0usize; 10]; + + let mut d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + #[should_panic(expected = "cannot call `FnOnce` more than once")] + fn twice_on_stack() { + let a = [0usize; 1]; + let mut d = Deferred::new(move || drop(a)); + d.call(); + d.call(); + } + + #[test] + #[should_panic(expected = "cannot call `FnOnce` more than once")] + fn twice_on_heap() { + let a = [0usize; 10]; + let mut d = Deferred::new(move || drop(a)); + d.call(); + d.call(); + } + + #[test] + fn string() { + let a = "hello".to_string(); + let mut d = Deferred::new(move || assert_eq!(a, "hello")); + d.call(); + } + + #[test] + fn boxed_slice_i32() { + let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); + let mut d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); + d.call(); + } + + #[test] + fn long_slice_usize() { + let a: [usize; 5] = [2, 3, 5, 7, 11]; + let mut d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); + d.call(); + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/epoch.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/epoch.rs new file mode 100644 index 000000000000..1f277cfeef08 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/epoch.rs @@ -0,0 +1,106 @@ +//! The global epoch +//! +//! The last bit in this number is unused and is always zero. Every so often the global epoch is +//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only +//! if all currently pinned participants have been pinned in the current epoch. +//! +//! If an object became garbage in some epoch, then we can be sure that after two advancements no +//! participant will hold a reference to it. That is the crux of safe memory reclamation. + +use core::sync::atomic::{AtomicUsize, Ordering}; + +/// An epoch that can be marked as pinned or unpinned. +/// +/// Internally, the epoch is represented as an integer that wraps around at some unspecified point +/// and a flag that represents whether it is pinned or unpinned. +#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] +pub struct Epoch { + /// The least significant bit is set if pinned. The rest of the bits hold the epoch. + data: usize, +} + +impl Epoch { + /// Returns the starting epoch in unpinned state. + #[inline] + pub fn starting() -> Self { + Self::default() + } + + /// Returns the number of epochs `self` is ahead of `rhs`. + /// + /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX + /// / 2)`, so the returned distance will be in the same interval. + pub fn wrapping_sub(self, rhs: Self) -> isize { + // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`, + // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)` + // will be ignored in the shift operation. + self.data.wrapping_sub(rhs.data & !1) as isize >> 1 + } + + /// Returns `true` if the epoch is marked as pinned. + #[inline] + pub fn is_pinned(self) -> bool { + (self.data & 1) == 1 + } + + /// Returns the same epoch, but marked as pinned. + #[inline] + pub fn pinned(self) -> Epoch { + Epoch { data: self.data | 1 } + } + + /// Returns the same epoch, but marked as unpinned. + #[inline] + pub fn unpinned(self) -> Epoch { + Epoch { data: self.data & !1 } + } + + /// Returns the successor epoch. + /// + /// The returned epoch will be marked as pinned only if the previous one was as well. + #[inline] + pub fn successor(self) -> Epoch { + Epoch { data: self.data.wrapping_add(2) } + } +} + +/// An atomic value that holds an `Epoch`. +#[derive(Default, Debug)] +pub struct AtomicEpoch { + /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented + /// using an `AtomicUsize`. + data: AtomicUsize, +} + +impl AtomicEpoch { + /// Creates a new atomic epoch. + #[inline] + pub fn new(epoch: Epoch) -> Self { + let data = AtomicUsize::new(epoch.data); + AtomicEpoch { data: data } + } + + /// Loads a value from the atomic epoch. + #[inline] + pub fn load(&self, ord: Ordering) -> Epoch { + Epoch { data: self.data.load(ord) } + } + + /// Stores a value into the atomic epoch. + #[inline] + pub fn store(&self, epoch: Epoch, ord: Ordering) { + self.data.store(epoch.data, ord); + } + + /// Stores a value into the atomic epoch if the current value is the same as `current`. + /// + /// The return value is always the previous value. If it is equal to `current`, then the value + /// is updated. + /// + /// The `Ordering` argument describes the memory ordering of this operation. + #[inline] + pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch { + let data = self.data.compare_and_swap(current.data, new.data, ord); + Epoch { data: data } + } +} diff --git a/third_party/rust/crossbeam-epoch/src/garbage.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/garbage.rs similarity index 100% rename from third_party/rust/crossbeam-epoch/src/garbage.rs rename to third_party/rust/crossbeam-epoch-0.3.1/src/garbage.rs diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/guard.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/guard.rs new file mode 100644 index 000000000000..d629d99c2be0 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/guard.rs @@ -0,0 +1,417 @@ +use core::ptr; +use core::mem; + +use garbage::Garbage; +use internal::Local; + +/// A guard that keeps the current thread pinned. +/// +/// # Pinning +/// +/// The current thread is pinned by calling [`pin`], which returns a new guard: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. +/// // This is not really necessary, but makes passing references to the guard a bit easier. +/// let guard = &epoch::pin(); +/// ``` +/// +/// When a guard gets dropped, the current thread is automatically unpinned. +/// +/// # Pointers on the stack +/// +/// Having a guard allows us to create pointers on the stack to heap-allocated objects. +/// For example: +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic, Owned}; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// // Create a heap-allocated number. +/// let a = Atomic::new(777); +/// +/// // Pin the current thread. +/// let guard = &epoch::pin(); +/// +/// // Load the heap-allocated object and create pointer `p` on the stack. +/// let p = a.load(SeqCst, guard); +/// +/// // Dereference the pointer and print the value: +/// if let Some(num) = unsafe { p.as_ref() } { +/// println!("The number is {}.", num); +/// } +/// ``` +/// +/// # Multiple guards +/// +/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the +/// thread will actually be pinned only when the first guard is created and unpinned when the last +/// one is dropped: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// let guard1 = epoch::pin(); +/// let guard2 = epoch::pin(); +/// assert!(epoch::is_pinned()); +/// drop(guard1); +/// assert!(epoch::is_pinned()); +/// drop(guard2); +/// assert!(!epoch::is_pinned()); +/// ``` +/// +/// The same can be achieved by cloning guards: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// let guard1 = epoch::pin(); +/// let guard2 = guard1.clone(); +/// ``` +/// +/// [`pin`]: fn.pin.html +pub struct Guard { + local: *const Local, +} + +impl Guard { + /// Creates a new guard from a pointer to `Local`. + /// + /// # Safety + /// + /// The `local` should be a valid pointer created by `Local::register()`. + #[doc(hidden)] + pub unsafe fn new(local: *const Local) -> Guard { + Guard { local: local } + } + + /// Accesses the internal pointer to `Local`. + #[doc(hidden)] + pub unsafe fn get_local(&self) -> *const Local { + self.local + } + + /// Stores a function so that it can be executed at some point after all currently pinned + /// threads get unpinned. + /// + /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache + /// becomes full, some functions are moved into the global cache. At the same time, some + /// functions from both local and global caches may get executed in order to incrementally + /// clean up the caches as they fill up. + /// + /// There is no guarantee when exactly `f` will be executed. The only guarantee is that won't + /// until all currently pinned threads get unpinned. In theory, `f` might never be deallocated, + /// but the epoch-based garbage collection will make an effort to execute it reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the function will simply be + /// executed immediately. + /// + /// # Safety + /// + /// The given function must not hold reference onto the stack. It is highly recommended that + /// the passed function is **always** marked with `move` in order to prevent accidental + /// borrows. + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// let message = "Hello!"; + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defef`. + /// guard.defer(move || { + /// println!("{}", message); + /// }); + /// } + /// ``` + /// + /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed + /// by the closure must be `Send`. + /// + /// # Examples + /// + /// When a heap-allocated object in a data structure becomes unreachable, it has to be + /// deallocated. However, the current thread and other threads may be still holding references + /// on the stack to that same object. Therefore it cannot be deallocated before those + /// references get dropped. This method can defer deallocation until all those threads get + /// unpinned and consequently drop all their references on the stack. + /// + /// ```rust + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new("foo"); + /// + /// // Now suppose that `a` is shared among multiple threads and concurrently + /// // accessed and modified... + /// + /// // Pin the current thread. + /// let guard = &epoch::pin(); + /// + /// // Steal the object currently stored in `a` and swap it with another one. + /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); + /// + /// if !p.is_null() { + /// // The object `p` is pointing to is now unreachable. + /// // Defer its deallocation until all currently pinned threads get unpinned. + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defer`. + /// guard.defer(move || { + /// println!("{} is now being deallocated.", p.deref()); + /// // Now we have unique access to the object pointed to by `p` and can turn it + /// // into an `Owned`. Dropping the `Owned` will deallocate the object. + /// drop(p.into_owned()); + /// }); + /// } + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub unsafe fn defer(&self, f: F) + where + F: FnOnce() -> R, + { + let garbage = Garbage::new(|| drop(f())); + + if let Some(local) = self.local.as_ref() { + local.defer(garbage, self); + } + } + + /// Clears up the thread-local cache of deferred functions by executing them or moving into the + /// global cache. + /// + /// Call this method after deferring execution of a function if you want to get it executed as + /// soon as possible. Flushing will make sure it is residing in in the global cache, so that + /// any thread has a chance of taking the function and executing it. + /// + /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// unsafe { + /// guard.defer(move || { + /// println!("This better be printed as soon as possible!"); + /// }); + /// } + /// guard.flush(); + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn flush(&self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.flush(self); + } + } + + /// Unpins and then immediately re-pins the thread. + /// + /// This method is useful when you don't want delay the advancement of the global epoch by + /// holding an old epoch. For safety, you should not maintain any guard-based reference across + /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this + /// is the only active guard for the current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn repin(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.repin(); + } + } + + /// Temporarily unpins the thread, executes the given function and then re-pins the thread. + /// + /// This method is useful when you need to perform a long-running operation (e.g. sleeping) + /// and don't need to maintain any guard-based reference across the call (the latter is enforced + /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the + /// current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the passed function is called + /// directly without unpinning the thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn repin_after(&mut self, f: F) -> R + where + F: FnOnce() -> R, + { + if let Some(local) = unsafe { self.local.as_ref() } { + // We need to acquire a handle here to ensure the Local doesn't + // disappear from under us. + local.acquire_handle(); + local.unpin(); + } + + // Ensure the Guard is re-pinned even if the function panics + defer! { + if let Some(local) = unsafe { self.local.as_ref() } { + mem::forget(local.pin()); + local.release_handle(); + } + } + + f() + } +} + +impl Drop for Guard { + #[inline] + fn drop(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.unpin(); + } + } +} + +impl Clone for Guard { + #[inline] + fn clone(&self) -> Guard { + match unsafe { self.local.as_ref() } { + None => Guard { local: ptr::null() }, + Some(local) => local.pin(), + } + } +} + +/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. +/// +/// This guard should be used in special occasions only. Note that it doesn't actually keep any +/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. +/// +/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just +/// execute the function immediately. +/// +/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. +/// +/// # Safety +/// +/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the +/// [`Atomic`] is not being concurrently modified by other threads. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// let a = Atomic::new(7); +/// +/// unsafe { +/// // Load `a` without pinning the current thread. +/// a.load(Relaxed, epoch::unprotected()); +/// +/// // It's possible to create more dummy guards by calling `clone()`. +/// let dummy = &epoch::unprotected().clone(); +/// +/// dummy.defer(move || { +/// println!("This gets executed immediately."); +/// }); +/// +/// // Dropping `dummy` doesn't affect the current thread - it's just a noop. +/// } +/// ``` +/// +/// The most common use of this function is when constructing or destructing a data structure. +/// +/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that +/// point no other thread could concurrently modify the [`Atomic`]s we are accessing. +/// +/// If we were to actually pin the current thread during destruction, that would just unnecessarily +/// delay garbage collection and incur some performance cost, so in cases like these `unprotected` +/// is very helpful. +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::ptr; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// struct Stack { +/// head: epoch::Atomic, +/// } +/// +/// struct Node { +/// data: u32, +/// next: epoch::Atomic, +/// } +/// +/// impl Drop for Stack { +/// fn drop(&mut self) { +/// unsafe { +/// // Unprotected load. +/// let mut node = self.head.load(Relaxed, epoch::unprotected()); +/// +/// while let Some(n) = node.as_ref() { +/// // Unprotected load. +/// let next = n.next.load(Relaxed, epoch::unprotected()); +/// +/// // Take ownership of the node, then drop it. +/// drop(node.into_owned()); +/// +/// node = next; +/// } +/// } +/// } +/// } +/// ``` +/// +/// [`Atomic`]: struct.Atomic.html +/// [`defer`]: struct.Guard.html#method.defer +#[inline] +pub unsafe fn unprotected() -> &'static Guard { + // HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null. + // Since this function returns a `'static` reference to a `Guard`, we must return a reference + // to a global guard. However, it's not possible to create a `static` `Guard` because it does + // not implement `Sync`. To get around the problem, we create a static `usize` initialized to + // zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard` + // (consisting of a single pointer) have the same representation in memory. + static UNPROTECTED: usize = 0; + &*(&UNPROTECTED as *const _ as *const Guard) +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/internal.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/internal.rs new file mode 100644 index 000000000000..9223f5338290 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/internal.rs @@ -0,0 +1,409 @@ +//! The global data and participant for garbage collection. +//! +//! # Registration +//! +//! In order to track all participants in one place, we need some form of participant +//! registration. When a participant is created, it is registered to a global lock-free +//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the +//! list. +//! +//! # Pinning +//! +//! Every participant contains an integer that tells whether the participant is pinned and if so, +//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that +//! aids in periodic global epoch advancement. +//! +//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. +//! Guards are necessary for performing atomic operations, and for freeing/dropping locations. + +use core::cell::{Cell, UnsafeCell}; +use core::mem; +use core::num::Wrapping; +use core::ptr; +use core::sync::atomic; +use core::sync::atomic::Ordering; +use alloc::boxed::Box; +use alloc::arc::Arc; + +use crossbeam_utils::cache_padded::CachePadded; +use nodrop::NoDrop; + +use atomic::Owned; +use epoch::{AtomicEpoch, Epoch}; +use guard::{unprotected, Guard}; +use garbage::{Bag, Garbage}; +use sync::list::{List, Entry, IterError, IsElement}; +use sync::queue::Queue; + +/// Number of bags to destroy. +const COLLECT_STEPS: usize = 8; + +/// Number of pinnings after which a participant will execute some deferred functions from the +/// global queue. +const PINNINGS_BETWEEN_COLLECT: usize = 128; + +/// The global data for a garbage collector. +pub struct Global { + /// The intrusive linked list of `Local`s. + locals: List, + + /// The global queue of bags of deferred functions. + queue: Queue<(Epoch, Bag)>, + + /// The global epoch. + epoch: CachePadded, +} + +impl Global { + /// Creates a new global data for garbage collection. + #[inline] + pub fn new() -> Global { + Global { + locals: List::new(), + queue: Queue::new(), + epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), + } + } + + /// Returns the current global epoch. + pub fn load_epoch(&self, ordering: Ordering) -> Epoch { + self.epoch.load(ordering) + } + + /// Pushes the bag into the global queue and replaces the bag with a new empty bag. + pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) { + let bag = mem::replace(bag, Bag::new()); + + atomic::fence(Ordering::SeqCst); + + let epoch = self.epoch.load(Ordering::Relaxed); + self.queue.push((epoch, bag), guard); + } + + /// Collects several bags from the global queue and executes deferred functions in them. + /// + /// Note: This may itself produce garbage and in turn allocate new bags. + /// + /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold + /// path. In other words, we want the compiler to optimize branching for the case when + /// `collect()` is not called. + #[cold] + pub fn collect(&self, guard: &Guard) { + let global_epoch = self.try_advance(guard); + + let condition = |item: &(Epoch, Bag)| { + // A pinned participant can witness at most one epoch advancement. Therefore, any bag + // that is within one epoch of the current one cannot be destroyed yet. + global_epoch.wrapping_sub(item.0) >= 2 + }; + + let steps = if cfg!(feature = "sanitize") { + usize::max_value() + } else { + COLLECT_STEPS + }; + + for _ in 0..steps { + match self.queue.try_pop_if(&condition, guard) { + None => break, + Some(bag) => drop(bag), + } + } + } + + /// Attempts to advance the global epoch. + /// + /// The global epoch can advance only if all currently pinned participants have been pinned in + /// the current epoch. + /// + /// Returns the current global epoch. + /// + /// `try_advance()` is annotated `#[cold]` because it is rarely called. + #[cold] + pub fn try_advance(&self, guard: &Guard) -> Epoch { + let global_epoch = self.epoch.load(Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + + // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly + // easy to implement in a lock-free manner. However, traversal can be slow due to cache + // misses and data dependencies. We should experiment with other data structures as well. + for local in self.locals.iter(&guard) { + match local { + Err(IterError::Stalled) => { + // A concurrent thread stalled this iteration. That thread might also try to + // advance the epoch, in which case we leave the job to it. Otherwise, the + // epoch will not be advanced. + return global_epoch; + } + Ok(local) => { + let local_epoch = local.epoch.load(Ordering::Relaxed); + + // If the participant was pinned in a different epoch, we cannot advance the + // global epoch just yet. + if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch { + return global_epoch; + } + } + } + } + atomic::fence(Ordering::Acquire); + + // All pinned participants were pinned in the current global epoch. + // Now let's advance the global epoch... + // + // Note that if another thread already advanced it before us, this store will simply + // overwrite the global epoch with the same value. This is true because `try_advance` was + // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be + // advanced two steps ahead of it. + let new_epoch = global_epoch.successor(); + self.epoch.store(new_epoch, Ordering::Release); + new_epoch + } +} + +/// Participant for garbage collection. +pub struct Local { + /// A node in the intrusive linked list of `Local`s. + entry: Entry, + + /// The local epoch. + epoch: AtomicEpoch, + + /// A reference to the global data. + /// + /// When all guards and handles get dropped, this reference is destroyed. + global: UnsafeCell>>, + + /// The local bag of deferred functions. + bag: UnsafeCell, + + /// The number of guards keeping this participant pinned. + guard_count: Cell, + + /// The number of active handles. + handle_count: Cell, + + /// Total number of pinnings performed. + /// + /// This is just an auxilliary counter that sometimes kicks off collection. + pin_count: Cell>, +} + +unsafe impl Sync for Local {} + +impl Local { + /// Registers a new `Local` in the provided `Global`. + pub fn register(global: &Arc) -> *const Local { + unsafe { + // Since we dereference no pointers in this block, it is safe to use `unprotected`. + + let local = Owned::new(Local { + entry: Entry::default(), + epoch: AtomicEpoch::new(Epoch::starting()), + global: UnsafeCell::new(NoDrop::new(global.clone())), + bag: UnsafeCell::new(Bag::new()), + guard_count: Cell::new(0), + handle_count: Cell::new(1), + pin_count: Cell::new(Wrapping(0)), + }).into_shared(&unprotected()); + global.locals.insert(local, &unprotected()); + local.as_raw() + } + } + + /// Returns whether the local garbage bag is empty. + #[inline] + pub fn is_bag_empty(&self) -> bool { + unsafe { (*self.bag.get()).is_empty() } + } + + /// Returns a reference to the `Global` in which this `Local` resides. + #[inline] + pub fn global(&self) -> &Global { + unsafe { &*self.global.get() } + } + + /// Returns `true` if the current participant is pinned. + #[inline] + pub fn is_pinned(&self) -> bool { + self.guard_count.get() > 0 + } + + pub fn defer(&self, mut garbage: Garbage, guard: &Guard) { + let bag = unsafe { &mut *self.bag.get() }; + + while let Err(g) = bag.try_push(garbage) { + self.global().push_bag(bag, guard); + garbage = g; + } + } + + pub fn flush(&self, guard: &Guard) { + let bag = unsafe { &mut *self.bag.get() }; + + if !bag.is_empty() { + self.global().push_bag(bag, guard); + } + + self.global().collect(guard); + } + + /// Pins the `Local`. + #[inline] + pub fn pin(&self) -> Guard { + let guard = unsafe { Guard::new(self) }; + + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count.checked_add(1).unwrap()); + + if guard_count == 0 { + let global_epoch = self.global().epoch.load(Ordering::Relaxed); + let new_epoch = global_epoch.pinned(); + + // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence. + // The fence makes sure that any future loads from `Atomic`s will not happen before + // this store. + if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg` + // instruction. + // + // Both instructions have the effect of a full barrier, but benchmarks have shown + // that the second one makes pinning faster in this particular case. + let current = Epoch::starting(); + let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst); + debug_assert_eq!(current, previous, "participant was expected to be unpinned"); + } else { + self.epoch.store(new_epoch, Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + } + + // Increment the pin counter. + let count = self.pin_count.get(); + self.pin_count.set(count + Wrapping(1)); + + // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting + // some garbage. + if count.0 % PINNINGS_BETWEEN_COLLECT == 0 { + self.global().collect(&guard); + } + } + + guard + } + + /// Unpins the `Local`. + #[inline] + pub fn unpin(&self) { + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count - 1); + + if guard_count == 1 { + self.epoch.store(Epoch::starting(), Ordering::Release); + + if self.handle_count.get() == 0 { + self.finalize(); + } + } + } + + /// Unpins and then pins the `Local`. + #[inline] + pub fn repin(&self) { + let guard_count = self.guard_count.get(); + + // Update the local epoch only if there's only one guard. + if guard_count == 1 { + let epoch = self.epoch.load(Ordering::Relaxed); + let global_epoch = self.global().epoch.load(Ordering::Relaxed); + + // Update the local epoch only if the global epoch is greater than the local epoch. + if epoch != global_epoch { + // We store the new epoch with `Release` because we need to ensure any memory + // accesses from the previous epoch do not leak into the new one. + self.epoch.store(global_epoch, Ordering::Release); + + // However, we don't need a following `SeqCst` fence, because it is safe for memory + // accesses from the new epoch to be executed before updating the local epoch. At + // worse, other threads will see the new epoch late and delay GC slightly. + } + } + } + + /// Increments the handle count. + #[inline] + pub fn acquire_handle(&self) { + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count + 1); + } + + /// Decrements the handle count. + #[inline] + pub fn release_handle(&self) { + let guard_count = self.guard_count.get(); + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count - 1); + + if guard_count == 0 && handle_count == 1 { + self.finalize(); + } + } + + /// Removes the `Local` from the global linked list. + #[cold] + fn finalize(&self) { + debug_assert_eq!(self.guard_count.get(), 0); + debug_assert_eq!(self.handle_count.get(), 0); + + // Temporarily increment handle count. This is required so that the following call to `pin` + // doesn't call `finalize` again. + self.handle_count.set(1); + unsafe { + // Pin and move the local bag into the global queue. It's important that `push_bag` + // doesn't defer destruction on any new garbage. + let guard = &self.pin(); + self.global().push_bag(&mut *self.bag.get(), guard); + } + // Revert the handle count back to zero. + self.handle_count.set(0); + + unsafe { + // Take the reference to the `Global` out of this `Local`. Since we're not protected + // by a guard at this time, it's crucial that the reference is read before marking the + // `Local` as deleted. + let global: Arc = ptr::read(&**self.global.get()); + + // Mark this node in the linked list as deleted. + self.entry.delete(&unprotected()); + + // Finally, drop the reference to the global. Note that this might be the last + // reference to the `Global`. If so, the global data will be destroyed and all deferred + // functions in its queue will be executed. + drop(global); + } + } +} + +impl IsElement for Local { + fn entry_of(local: &Local) -> &Entry { + let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry; + unsafe { &*entry_ptr } + } + + unsafe fn element_of(entry: &Entry) -> &Local { + // offset_of! macro uses unsafe, but it's unnecessary in this context. + #[allow(unused_unsafe)] + let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local; + &*local_ptr + } + + unsafe fn finalize(entry: &Entry) { + let local = Self::element_of(entry); + drop(Box::from_raw(local as *const Local as *mut Local)); + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/lib.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/lib.rs new file mode 100644 index 000000000000..91ee1c133944 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/lib.rs @@ -0,0 +1,110 @@ +//! Epoch-based memory reclamation. +//! +//! An interesting problem concurrent collections deal with comes from the remove operation. +//! Suppose that a thread removes an element from a lock-free map, while another thread is reading +//! that same element at the same time. The first thread must wait until the second thread stops +//! reading the element. Only then it is safe to destruct it. +//! +//! Programming languages that come with garbage collectors solve this problem trivially. The +//! garbage collector will destruct the removed element when no thread can hold a reference to it +//! anymore. +//! +//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an +//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and +//! marked with the current epoch. Every time a thread accesses a collection, it checks the current +//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread +//! can be referencing it anymore. +//! +//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit +//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something +//! users of concurrent collections don't have to worry much about. +//! +//! # Pointers +//! +//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which +//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a +//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely +//! read. +//! +//! # Pinning +//! +//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant +//! we declare that any object that gets removed from now on must not be destructed just +//! yet. Garbage collection of newly removed objects is suspended until the participant gets +//! unpinned. +//! +//! # Garbage +//! +//! Objects that get removed from concurrent collections must be stashed away until all currently +//! pinned participants get unpinned. Such objects can be stored into a [`Garbage`], where they are +//! kept until the right time for their destruction comes. +//! +//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an +//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data +//! structures may defer the deallocation of an object. +//! +//! # APIs +//! +//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you +//! want to create your own garbage collector, use the [`Collector`] API. +//! +//! [`Atomic`]: struct.Atomic.html +//! [`Collector`]: struct.Collector.html +//! [`Shared`]: struct.Shared.html +//! [`pin`]: fn.pin.html +//! [`defer`]: fn.defer.html + +#![cfg_attr(feature = "nightly", feature(const_fn))] +#![cfg_attr(feature = "nightly", feature(alloc))] +#![cfg_attr(not(test), no_std)] + +#[cfg(all(not(test), feature = "use_std"))] +#[macro_use] +extern crate std; +#[cfg(test)] +extern crate core; + +// Use liballoc on nightly to avoid a dependency on libstd +#[cfg(feature = "nightly")] +extern crate alloc; +#[cfg(not(feature = "nightly"))] +mod alloc { + // Tweak the module layout to match the one in liballoc + extern crate std; + pub use self::std::boxed; + pub use self::std::sync as arc; +} + +#[cfg(feature = "manually_drop")] +mod nodrop { + pub use std::mem::ManuallyDrop as NoDrop; +} +#[cfg(not(feature = "manually_drop"))] +extern crate nodrop; + +extern crate arrayvec; +extern crate crossbeam_utils; +#[cfg(feature = "use_std")] +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate memoffset; +#[macro_use] +extern crate scopeguard; + +mod atomic; +mod collector; +#[cfg(feature = "use_std")] +mod default; +mod deferred; +mod epoch; +mod garbage; +mod guard; +mod internal; +mod sync; + +pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared}; +pub use self::guard::{unprotected, Guard}; +#[cfg(feature = "use_std")] +pub use self::default::{default_handle, is_pinned, pin}; +pub use self::collector::{Collector, Handle}; diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/sync/list.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/list.rs new file mode 100644 index 000000000000..3695751c330a --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/list.rs @@ -0,0 +1,473 @@ +//! Lock-free intrusive linked list. +//! +//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA +//! 2002. http://dl.acm.org/citation.cfm?id=564870.564881 + +use core::marker::PhantomData; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use {Atomic, Shared, Guard, unprotected}; + +/// An entry in a linked list. +/// +/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different +/// cache-line than thread-local data in terms of performance. +#[derive(Debug)] +pub struct Entry { + /// The next entry in the linked list. + /// If the tag is 1, this entry is marked as deleted. + next: Atomic, +} + +/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive +/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance +/// of `Entry`. +/// +/// # Example +/// +/// ```ignore +/// struct A { +/// entry: Entry, +/// data: usize, +/// } +/// +/// impl IsElement for A { +/// fn entry_of(a: &A) -> &Entry { +/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry; +/// unsafe { &*entry_ptr } +/// } +/// +/// unsafe fn element_of(entry: &Entry) -> &T { +/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T; +/// &*elem_ptr +/// } +/// +/// unsafe fn finalize(entry: &Entry) { +/// let elem = Self::element_of(entry); +/// drop(Box::from_raw(elem as *const A as *mut A)); +/// } +/// } +/// ``` +/// +/// This trait is implemented on a type separate from `T` (although it can be just `T`), because +/// one type might be placeable into multiple lists, in which case it would require multiple +/// implementations of `IsElement`. In such cases, each struct implementing `IsElement` +/// represents a distinct `Entry` in `T`. +/// +/// For example, we can insert the following struct into two lists using `entry1` for one +/// and `entry2` for the other: +/// +/// ```ignore +/// struct B { +/// entry1: Entry, +/// entry2: Entry, +/// data: usize, +/// } +/// ``` +/// +pub trait IsElement { + /// Returns a reference to this element's `Entry`. + fn entry_of(&T) -> &Entry; + + /// Given a reference to an element's entry, returns that element. + /// + /// ```ignore + /// let elem = ListElement::new(); + /// assert_eq!(elem.entry_of(), + /// unsafe { ListElement::element_of(elem.entry_of()) } ); + /// ``` + /// + /// # Safety + /// The caller has to guarantee that the `Entry` it + /// is called with was retrieved from an instance of the element type (`T`). + unsafe fn element_of(&Entry) -> &T; + + /// Deallocates the whole element given its `Entry`. This is called when the list + /// is ready to actually free the element. + /// + /// # Safety + /// The caller has to guarantee that the `Entry` it + /// is called with was retrieved from an instance of the element type (`T`). + unsafe fn finalize(&Entry); +} + +/// A lock-free, intrusive linked list of type `T`. +#[derive(Debug)] +pub struct List = T> { + /// The head of the linked list. + head: Atomic, + + /// The phantom data for using `T` and `C`. + _marker: PhantomData<(T, C)>, +} + +/// An iterator used for retrieving values from the list. +pub struct Iter<'g, T: 'g, C: IsElement> { + /// The guard that protects the iteration. + guard: &'g Guard, + + /// Pointer from the predecessor to the current entry. + pred: &'g Atomic, + + /// The current entry. + curr: Shared<'g, Entry>, + + /// The list head, needed for restarting iteration. + head: &'g Atomic, + + /// Logically, we store a borrow of an instance of `T` and + /// use the type information from `C`. + _marker: PhantomData<(&'g T, C)>, +} + +/// An error that occurs during iteration over the list. +#[derive(PartialEq, Debug)] +pub enum IterError { + /// A concurrent thread modified the state of the list at the same place that this iterator + /// was inspecting. Subsequent iteration will restart from the beginning of the list. + Stalled, +} + +impl Default for Entry { + /// Returns the empty entry. + fn default() -> Entry { + Entry { next: Atomic::null() } + } +} + +impl Entry { + /// Marks this entry as deleted, deferring the actual deallocation to a later iteration. + /// + /// # Safety + /// + /// The entry should be a member of a linked list, and it should not have been deleted. + /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C` + /// is the associated helper for the linked list. + pub unsafe fn delete(&self, guard: &Guard) { + self.next.fetch_or(1, Release, guard); + } +} + +impl> List { + /// Returns a new, empty linked list. + pub fn new() -> List { + List { + head: Atomic::null(), + _marker: PhantomData, + } + } + + /// Inserts `entry` into the head of the list. + /// + /// # Safety + /// + /// You should guarantee that: + /// + /// - `container` is not null + /// - `container` is immovable, e.g. inside a `Box` + /// - the same `Entry` is not inserted more than once + /// - the inserted object will be removed before the list is dropped + pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { + // Insert right after head, i.e. at the beginning of the list. + let to = &self.head; + // Get the intrusively stored Entry of the new element to insert. + let entry: &Entry = C::entry_of(container.deref()); + // Make a Shared ptr to that Entry. + let entry_ptr = Shared::from(entry as *const _); + // Read the current successor of where we want to insert. + let mut next = to.load(Relaxed, guard); + + loop { + // Set the Entry of the to-be-inserted element to point to the previous successor of + // `to`. + entry.next.store(next, Relaxed); + match to.compare_and_set_weak(next, entry_ptr, Release, guard) { + Ok(_) => break, + // We lost the race or weak CAS failed spuriously. Update the successor and try + // again. + Err(err) => next = err.current, + } + } + } + + /// Returns an iterator over all objects. + /// + /// # Caveat + /// + /// Every object that is inserted at the moment this function is called and persists at least + /// until the end of iteration will be returned. Since this iterator traverses a lock-free + /// linked list that may be concurrently modified, some additional caveats apply: + /// + /// 1. If a new object is inserted during iteration, it may or may not be returned. + /// 2. If an object is deleted during iteration, it may or may not be returned. + /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning + /// thread will continue to iterate over the same list. + pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { + Iter { + guard: guard, + pred: &self.head, + curr: self.head.load(Acquire, guard), + head: &self.head, + _marker: PhantomData, + } + } +} + +impl> Drop for List { + fn drop(&mut self) { + unsafe { + let guard = &unprotected(); + let mut curr = self.head.load(Relaxed, guard); + while let Some(c) = curr.as_ref() { + let succ = c.next.load(Relaxed, guard); + // Verify that all elements have been removed from the list. + assert_eq!(succ.tag(), 1); + + C::finalize(curr.deref()); + curr = succ; + } + } + } +} + +impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { + type Item = Result<&'g T, IterError>; + + fn next(&mut self) -> Option { + while let Some(c) = unsafe { self.curr.as_ref() } { + let succ = c.next.load(Acquire, self.guard); + + if succ.tag() == 1 { + // This entry was removed. Try unlinking it from the list. + let succ = succ.with_tag(0); + + // The tag should never be zero, because removing a node after a logically deleted + // node leaves the list in an invalid state. + debug_assert!(self.curr.tag() == 0); + + match self.pred.compare_and_set( + self.curr, + succ, + Acquire, + self.guard, + ) { + Ok(_) => { + // We succeeded in unlinking this element from the list, so we have to + // schedule deallocation. Deferred drop is okay, because `list.delete()` + // can only be called if `T: 'static`. + unsafe { + let p = self.curr; + self.guard.defer(move || C::finalize(p.deref())); + } + + // Move over the removed by only advancing `curr`, not `pred`. + self.curr = succ; + continue; + } + Err(_) => { + // A concurrent thread modified the predecessor node. Since it might've + // been deleted, we need to restart from `head`. + self.pred = self.head; + self.curr = self.head.load(Acquire, self.guard); + + return Some(Err(IterError::Stalled)); + } + } + } + + // Move one step forward. + self.pred = &c.next; + self.curr = succ; + + return Some(Ok(unsafe { C::element_of(c) })); + } + + // We reached the end of the list. + None + } +} + +#[cfg(test)] +mod tests { + use {Collector, Owned, Guard}; + use crossbeam_utils::scoped; + use std::sync::Barrier; + use super::*; + + impl IsElement for Entry { + fn entry_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn element_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn finalize(entry: &Entry) { + drop(Box::from_raw(entry as *const Entry as *mut Entry)); + } + } + + /// Checks whether the list retains inserted elements + /// and returns them in the correct order. + #[test] + fn insert() { + let collector = Collector::new(); + let handle = collector.handle(); + let guard = handle.pin(); + + let l: List = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e2 = iter.next(); + assert!(maybe_e2.is_some()); + assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e2.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + } + + /// Checks whether elements can be removed from the list and whether + /// the correct elements are removed. + #[test] + fn delete() { + let collector = Collector::new(); + let handle = collector.handle(); + let guard = handle.pin(); + + let l: List = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + e2.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + const THREADS: usize = 8; + const ITERS: usize = 512; + + /// Contends the list on insert and delete operations to make sure they can run concurrently. + #[test] + fn insert_delete_multi() { + let collector = Collector::new(); + + let l: List = List::new(); + let b = Barrier::new(THREADS); + + scoped::scope(|s| for _ in 0..THREADS { + s.spawn(|| { + b.wait(); + + let handle = collector.handle(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + }); + + let handle = collector.handle(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + /// Contends the list on iteration to make sure that it can be iterated over concurrently. + #[test] + fn iter_multi() { + let collector = Collector::new(); + + let l: List = List::new(); + let b = Barrier::new(THREADS); + + scoped::scope(|s| for _ in 0..THREADS { + s.spawn(|| { + b.wait(); + + let handle = collector.handle(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + let mut iter = l.iter(&guard); + for _ in 0..ITERS { + assert!(iter.next().is_some()); + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + }); + + let handle = collector.handle(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } +} diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/sync/mod.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/mod.rs new file mode 100644 index 000000000000..f8eb25960029 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/mod.rs @@ -0,0 +1,4 @@ +//! Synchronization primitives. + +pub mod list; +pub mod queue; diff --git a/third_party/rust/crossbeam-epoch-0.3.1/src/sync/queue.rs b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/queue.rs new file mode 100644 index 000000000000..77965c57de11 --- /dev/null +++ b/third_party/rust/crossbeam-epoch-0.3.1/src/sync/queue.rs @@ -0,0 +1,435 @@ +//! Michael-Scott lock-free queue. +//! +//! Usable with any number of producers and consumers. +//! +//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue +//! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106 + +use core::fmt; +use core::mem; +use core::ptr; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use crossbeam_utils::cache_padded::CachePadded; +use nodrop::NoDrop; + +use {unprotected, Atomic, Guard, Owned, Shared}; + +// The representation here is a singly-linked list, with a sentinel node at the front. In general +// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or +// all `Blocked` (requests for data from blocked threads). +#[derive(Debug)] +pub struct Queue { + head: CachePadded>>, + tail: CachePadded>>, +} + +struct Node { + /// The slot in which a value of type `T` can be stored. + /// + /// The type of `data` is `NoDrop` because a `Node` doesn't always contain a `T`. For + /// example, the sentinel node in a queue never contains a value: its slot is always empty. + /// Other nodes start their life with a push operation and contain a value until it gets popped + /// out. After that such empty nodes get added to the collector for destruction. + data: NoDrop, + + next: Atomic>, +} + +impl fmt::Debug for Node { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "node {{ ... }}") + } +} + +// Any particular `T` should never be accessed concurrently, so no need for `Sync`. +unsafe impl Sync for Queue {} +unsafe impl Send for Queue {} + +impl Queue { + /// Create a new, empty queue. + pub fn new() -> Queue { + let q = Queue { + head: CachePadded::new(Atomic::null()), + tail: CachePadded::new(Atomic::null()), + }; + let sentinel = Owned::new(Node { + data: unsafe { mem::uninitialized() }, + next: Atomic::null(), + }); + unsafe { + let guard = &unprotected(); + let sentinel = sentinel.into_shared(guard); + q.head.store(sentinel, Relaxed); + q.tail.store(sentinel, Relaxed); + q + } + } + + /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on + /// success. The queue's `tail` pointer may be updated. + #[inline(always)] + fn push_internal(&self, onto: Shared>, new: Shared>, guard: &Guard) -> bool { + // is `onto` the actual tail? + let o = unsafe { onto.deref() }; + let next = o.next.load(Acquire, guard); + if unsafe { next.as_ref().is_some() } { + // if not, try to "help" by moving the tail pointer forward + let _ = self.tail.compare_and_set(onto, next, Release, guard); + false + } else { + // looks like the actual tail; attempt to link in `n` + let result = o.next + .compare_and_set(Shared::null(), new, Release, guard) + .is_ok(); + if result { + // try to move the tail pointer forward + let _ = self.tail.compare_and_set(onto, new, Release, guard); + } + result + } + } + + /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. + pub fn push(&self, t: T, guard: &Guard) { + let new = Owned::new(Node { + data: NoDrop::new(t), + next: Atomic::null(), + }); + let new = Owned::into_shared(new, guard); + + loop { + // We push onto the tail, so we'll start optimistically by looking there first. + let tail = self.tail.load(Acquire, guard); + + // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. + if self.push_internal(tail, new, guard) { + break; + } + } + } + + /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_internal(&self, guard: &Guard) -> Result, ()> { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) => unsafe { + self.head + .compare_and_set(head, next, Release, guard) + .map(|_| { + guard.defer(move || drop(head.into_owned())); + Some(NoDrop::into_inner(ptr::read(&n.data))) + }) + .map_err(|_| ()) + }, + None => Ok(None), + } + } + + /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue + /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_if_internal(&self, condition: F, guard: &Guard) -> Result, ()> + where + T: Sync, + F: Fn(&T) -> bool, + { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) if condition(&n.data) => unsafe { + self.head + .compare_and_set(head, next, Release, guard) + .map(|_| { + guard.defer(move || drop(head.into_owned())); + Some(NoDrop::into_inner(ptr::read(&n.data))) + }) + .map_err(|_| ()) + }, + None | Some(_) => Ok(None), + } + } + + /// Attempts to dequeue from the front. + /// + /// Returns `None` if the queue is observed to be empty. + pub fn try_pop(&self, guard: &Guard) -> Option { + loop { + if let Ok(head) = self.pop_internal(guard) { + return head; + } + } + } + + /// Attempts to dequeue from the front, if the item satisfies the given condition. + /// + /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given + /// condition. + pub fn try_pop_if(&self, condition: F, guard: &Guard) -> Option + where + T: Sync, + F: Fn(&T) -> bool, + { + loop { + if let Ok(head) = self.pop_if_internal(&condition, guard) { + return head; + } + } + } +} + +impl Drop for Queue { + fn drop(&mut self) { + unsafe { + let guard = &unprotected(); + + while let Some(_) = self.try_pop(guard) {} + + // Destroy the remaining sentinel node. + let sentinel = self.head.load(Relaxed, guard); + drop(sentinel.into_owned()); + } + } +} + + +#[cfg(test)] +mod test { + use {pin}; + + use core::sync::atomic::Ordering; + use crossbeam_utils::scoped; + + struct Queue { + queue: super::Queue, + } + + impl Queue { + pub fn new() -> Queue { + Queue { queue: super::Queue::new() } + } + + pub fn push(&self, t: T) { + let guard = &pin(); + self.queue.push(t, guard); + } + + pub fn is_empty(&self) -> bool { + let guard = &pin(); + let head = self.queue.head.load(Ordering::Acquire, guard); + let h = unsafe { head.deref() }; + h.next.load(Ordering::Acquire, guard).is_null() + } + + pub fn try_pop(&self) -> Option { + let guard = &pin(); + self.queue.try_pop(guard) + } + + pub fn pop(&self) -> T { + loop { + match self.try_pop() { + None => continue, + Some(t) => return t, + } + } + } + } + + const CONC_COUNT: i64 = 1000000; + + #[test] + fn push_try_pop_1() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(37)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_2() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + q.push(48); + assert_eq!(q.try_pop(), Some(37)); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(48)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_seq() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.try_pop(), Some(i)); + } + assert!(q.is_empty()); + } + + #[test] + fn push_pop_1() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.pop(), 37); + assert!(q.is_empty()); + } + + #[test] + fn push_pop_2() { + let q: Queue = Queue::new(); + q.push(37); + q.push(48); + assert_eq!(q.pop(), 37); + assert_eq!(q.pop(), 48); + } + + #[test] + fn push_pop_many_seq() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.pop(), i); + } + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_spsc() { + let q: Queue = Queue::new(); + assert!(q.is_empty()); + + scoped::scope(|scope| { + scope.spawn(|| { + let mut next = 0; + + while next < CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert_eq!(elem, next); + next += 1; + } + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }); + } + + #[test] + fn push_try_pop_many_spmc() { + fn recv(_t: i32, q: &Queue) { + let mut cur = -1; + for _i in 0..CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert!(elem > cur); + cur = elem; + + if cur == CONC_COUNT - 1 { + break; + } + } + } + } + + let q: Queue = Queue::new(); + assert!(q.is_empty()); + let qr = &q; + scoped::scope(|scope| { + for i in 0..3 { + scope.spawn(move || recv(i, qr)); + } + + scope.spawn(|| for i in 0..CONC_COUNT { + q.push(i); + }) + }); + } + + #[test] + fn push_try_pop_many_mpmc() { + enum LR { + Left(i64), + Right(i64), + } + + let q: Queue = Queue::new(); + assert!(q.is_empty()); + + scoped::scope(|scope| for _t in 0..2 { + scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Left(i)) + }); + scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Right(i)) + }); + scope.spawn(|| { + let mut vl = vec![]; + let mut vr = vec![]; + for _i in 0..CONC_COUNT { + match q.try_pop() { + Some(LR::Left(x)) => vl.push(x), + Some(LR::Right(x)) => vr.push(x), + _ => {} + } + } + + let mut vl2 = vl.clone(); + let mut vr2 = vr.clone(); + vl2.sort(); + vr2.sort(); + + assert_eq!(vl, vl2); + assert_eq!(vr, vr2); + }); + }); + } + + #[test] + fn push_pop_many_spsc() { + let q: Queue = Queue::new(); + + scoped::scope(|scope| { + scope.spawn(|| { + let mut next = 0; + while next < CONC_COUNT { + assert_eq!(q.pop(), next); + next += 1; + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }); + assert!(q.is_empty()); + } + + #[test] + fn is_empty_dont_pop() { + let q: Queue = Queue::new(); + q.push(20); + q.push(20); + assert!(!q.is_empty()); + assert!(!q.is_empty()); + assert!(q.try_pop().is_some()); + } +} diff --git a/third_party/rust/crossbeam-epoch/.cargo-checksum.json b/third_party/rust/crossbeam-epoch/.cargo-checksum.json index 771a5f53e3b1..6ab16c3eda53 100644 --- a/third_party/rust/crossbeam-epoch/.cargo-checksum.json +++ b/third_party/rust/crossbeam-epoch/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"d84605e26d95fabc8172af7a621d3e48117b5180d389c6a166d15acb09c9ed9f","CHANGELOG.md":"5e62172f395348eb92a3fd2532ba5d65a7f13286449a3698b41f3aac7a9a4e57","Cargo.toml":"6bcfcac3b6b20026d1020890fcd8cd5f6ceff33741b92fea001993696e2aed17","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8728114db9ab19bca8e07b36f1cccd1e6a57db6ea03be08679aef2a982736532","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"41b2d03e2cfd46912a3722295843b841e74e10eae6eb23586d3bc3b6d0a41e32","src/atomic.rs":"469ae38d3e8b37eec79c1c21a29a63cd357e49f34f4b6cdde6817f8e1267bd8d","src/collector.rs":"ebebbf1229a0d5339b938825d0dca9dc8642f9fa5bbceafb4e371477186ed4b4","src/default.rs":"804c217df80e0b6df3c6e90c5d6f5153c153567ac28cc75cc62042ba75d24bf2","src/deferred.rs":"1bd6c66c58f92714088b6f9f811368a123143a5f03cf4afc4b19ab24f3181387","src/epoch.rs":"25b85734a4ec5bedb0384a1fe976ec97056a88910a046a270a3e38558f7dbd4b","src/garbage.rs":"b77a8f87701dca8b63d858bb234137335455b6fc1f223e73c7609542d13daa43","src/guard.rs":"08975d989ba558aba90d64865594b155b2135e628414f77bb8afb9de427a2e0d","src/internal.rs":"a5a6a52999ce99294d544ac7cb82cb820e78f0c41315fc8d7494d21ca6da1135","src/lib.rs":"f3093bc3411f2bd94d662c3cf8719411b62793449b3db1699865f4c08c207af1","src/sync/list.rs":"57c3674c40e30eaf92689ab0e09973d7d161e52a5bdb5b5481b62fd0d10fb4eb","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"868b5bd651e54216fa1827d668ab564c120779113ae7a2a056fee4371db1066c"},"package":"927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"} \ No newline at end of file +{"files":{".travis.yml":"b096077a6f20d96a6f6d824b98b94f73221ef7330290839ff35ad8c586dbc2e4","CHANGELOG.md":"3f0652c2ad1fc46b10d22cc3a5ad5fd8b737746dd3f3bc20d1e2a90432391892","Cargo.toml":"dc814f5487179536504adc4c77cacd827cd09b20dc81f49d3257553843599fb9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"2721d525d6ea1309b5ce780d7748303ee24eecff074243f086bbf37768141efb","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"25ce494d162c4b730608e865894bda7fee6fdded5544f00b8882e482e39c12df","src/atomic.rs":"e9383337a4754c022a8d3c06372910299cb8318b620f26fe50347b244c4caee4","src/collector.rs":"0a068c19f67b094c52cd9e0e2cf4e6b7630cd6af810769cfebe4274631065e55","src/default.rs":"67c0e52f2ce85bc205e61a4f807848c0aab93dfcc034e8c460f7669694d4d43f","src/deferred.rs":"3e49824277fdc25a68498263a7ada67aca3977edef9545985f911ba42d7a2e61","src/epoch.rs":"47fb45f1cc07700473b25324dcdb00a086c5c145c69bed3eee6547552298fecf","src/guard.rs":"22c9d2a6c9a35e19f8d6da2cc69dc612226a1807e789291668f1ed85410dc351","src/internal.rs":"c2ee6dff11bb9a44afcff441fce04640da1bb070c778cedc9edf86c94b71aaf8","src/lib.rs":"325a7964f690d851006563341423ce69f9277db7e8bf21bb9139cdf22927f471","src/sync/list.rs":"abb9eae31f09d7c3692aed3c7ad7a3ad6d692992af891037db8eba50d1245f0c","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"0254d182f820c8c880c9a80747501eb2cb9d53aa8cb958c04beceb39abf86aa9"},"package":"2af0e75710d6181e234c8ecc79f14a97907850a541b13b0be1dd10992f2e4620"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-epoch/.travis.yml b/third_party/rust/crossbeam-epoch/.travis.yml index 2cfadc310fcc..5d0752e42cbc 100644 --- a/third_party/rust/crossbeam-epoch/.travis.yml +++ b/third_party/rust/crossbeam-epoch/.travis.yml @@ -4,7 +4,6 @@ rust: - stable - beta - nightly - - 1.13.0 addons: apt: diff --git a/third_party/rust/crossbeam-epoch/CHANGELOG.md b/third_party/rust/crossbeam-epoch/CHANGELOG.md index 8554dd44481b..fa6d6c852184 100644 --- a/third_party/rust/crossbeam-epoch/CHANGELOG.md +++ b/third_party/rust/crossbeam-epoch/CHANGELOG.md @@ -6,6 +6,45 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [Unreleased] +## [0.4.3] - 2018-06-12 +## Changed +- Downgrade `crossbeam-utils` to 0.3 because it was a breaking change. + +## [0.4.2] - 2018-06-12 +### Added +- Expose the `Pointer` trait. +- Warn missing docs and missing debug impls. + +## Changed +- Update `crossbeam-utils` to 0.4. + +## [0.4.1] - 2018-03-20 +### Added +- Add `Debug` impls for `Collector`, `Handle`, and `Guard`. +- Add `load_consume` to `Atomic`. + +### Changed +- Rename `Collector::handle` to `Collector::register`. + +### Fixed +- Remove the `Send` implementation for `Handle` (this was a bug). Only + `Collector`s can be shared among multiple threads, while `Handle`s and + `Guard`s must stay within the thread in which they were created. + +## [0.4.0] - 2018-02-10 +### Changed +- Update dependencies. + +### Removed +- Remove support for Rust 1.13. + +## [0.3.0] - 2018-02-10 +### Added +- Add support for Rust 1.13. + +### Changed +- Improve documentation for CAS. + ## [0.2.0] - 2017-11-29 ### Added - Add method `Owned::into_box`. @@ -22,5 +61,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ### Added - First version of the new epoch-based GC. -[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...HEAD +[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.3...HEAD +[0.4.3]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.2...v0.4.3 +[0.4.2]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.1...v0.4.2 +[0.4.1]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.3.0...v0.4.0 +[0.3.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...v0.3.0 [0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0 diff --git a/third_party/rust/crossbeam-epoch/Cargo.toml b/third_party/rust/crossbeam-epoch/Cargo.toml index b679defe5a36..7be00332b93a 100644 --- a/third_party/rust/crossbeam-epoch/Cargo.toml +++ b/third_party/rust/crossbeam-epoch/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "crossbeam-epoch" -version = "0.3.1" +version = "0.4.3" authors = ["The Crossbeam Project Developers"] description = "Epoch-based garbage collection" homepage = "https://github.com/crossbeam-rs/crossbeam-epoch" @@ -30,25 +30,21 @@ default-features = false version = "0.1" [dependencies.crossbeam-utils] -version = "0.2" +version = "0.3" default-features = false [dependencies.lazy_static] -version = "1.0.0" +version = "1" optional = true [dependencies.memoffset] version = "0.2" -[dependencies.nodrop] -version = "0.1.12" -default-features = false - [dependencies.scopeguard] version = "0.3" default-features = false [dev-dependencies.rand] -version = "0.3" +version = "0.4" [features] default = ["use_std"] diff --git a/third_party/rust/crossbeam-epoch/README.md b/third_party/rust/crossbeam-epoch/README.md index 70ef3956ca77..10307e2ed6c5 100644 --- a/third_party/rust/crossbeam-epoch/README.md +++ b/third_party/rust/crossbeam-epoch/README.md @@ -17,7 +17,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -crossbeam-epoch = "0.2" +crossbeam-epoch = "0.4" ``` Next, add this to your crate: diff --git a/third_party/rust/crossbeam-epoch/examples/sanitize.rs b/third_party/rust/crossbeam-epoch/examples/sanitize.rs index 7635ae881fc2..e67680be87b6 100644 --- a/third_party/rust/crossbeam-epoch/examples/sanitize.rs +++ b/third_party/rust/crossbeam-epoch/examples/sanitize.rs @@ -54,8 +54,8 @@ fn main() { let threads = (0..16) .map(|_| { let a = a.clone(); - let h = collector.handle(); - thread::spawn(move || worker(a, h)) + let c = collector.clone(); + thread::spawn(move || worker(a, c.register())) }) .collect::>(); diff --git a/third_party/rust/crossbeam-epoch/src/atomic.rs b/third_party/rust/crossbeam-epoch/src/atomic.rs index 9023bd7ab37c..e94231e9174c 100644 --- a/third_party/rust/crossbeam-epoch/src/atomic.rs +++ b/third_party/rust/crossbeam-epoch/src/atomic.rs @@ -10,15 +10,17 @@ use core::sync::atomic::Ordering; use alloc::boxed::Box; use guard::Guard; +use crossbeam_utils::consume::AtomicConsume; /// Given ordering for the success case in a compare-exchange operation, returns the strongest /// appropriate ordering for the failure case. #[inline] fn strongest_failure_ordering(ord: Ordering) -> Ordering { + use self::Ordering::*; match ord { - Ordering::Relaxed | Ordering::Release => Ordering::Relaxed, - Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire, - _ => Ordering::SeqCst, + Relaxed | Release => Relaxed, + Acquire | AcqRel => Acquire, + _ => SeqCst, } } @@ -133,8 +135,8 @@ unsafe impl Sync for Atomic {} impl Atomic { /// Returns a new atomic pointer pointing to the tagged pointer `data`. - fn from_data(data: usize) -> Atomic { - Atomic { + fn from_usize(data: usize) -> Self { + Self { data: AtomicUsize::new(data), _marker: PhantomData, } @@ -151,7 +153,7 @@ impl Atomic { /// ``` #[cfg(not(feature = "nightly"))] pub fn null() -> Atomic { - Atomic { + Self { data: ATOMIC_USIZE_INIT, _marker: PhantomData, } @@ -205,7 +207,32 @@ impl Atomic { /// let p = a.load(SeqCst, guard); /// ``` pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.data.load(ord)) } + unsafe { Shared::from_usize(self.data.load(ord)) } + } + + /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load_consume(guard); + /// ``` + pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.load_consume()) } } /// Stores a `Shared` or `Owned` pointer into the atomic pointer. @@ -226,7 +253,7 @@ impl Atomic { /// a.store(Owned::new(1234), SeqCst); /// ``` pub fn store<'g, P: Pointer>(&self, new: P, ord: Ordering) { - self.data.store(new.into_data(), ord); + self.data.store(new.into_usize(), ord); } /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous @@ -248,7 +275,7 @@ impl Atomic { /// let p = a.swap(Shared::null(), SeqCst, guard); /// ``` pub fn swap<'g, P: Pointer>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) } + unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current @@ -288,14 +315,14 @@ impl Atomic { O: CompareAndSetOrdering, P: Pointer, { - let new = new.into_data(); + let new = new.into_usize(); self.data - .compare_exchange(current.into_data(), new, ord.success(), ord.failure()) - .map(|_| unsafe { Shared::from_data(new) }) + .compare_exchange(current.into_usize(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareAndSetError { - current: Shared::from_data(current), - new: P::from_data(new), + current: Shared::from_usize(current), + new: P::from_usize(new), } }) } @@ -358,14 +385,14 @@ impl Atomic { O: CompareAndSetOrdering, P: Pointer, { - let new = new.into_data(); + let new = new.into_usize(); self.data - .compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure()) - .map(|_| unsafe { Shared::from_data(new) }) + .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareAndSetError { - current: Shared::from_data(current), - new: P::from_data(new), + current: Shared::from_usize(current), + new: P::from_usize(new), } }) } @@ -392,7 +419,7 @@ impl Atomic { /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::(), ord)) } + unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::(), ord)) } } /// Bitwise "or" with the current tag. @@ -417,7 +444,7 @@ impl Atomic { /// assert_eq!(a.load(SeqCst, guard).tag(), 3); /// ``` pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::(), ord)) } + unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::(), ord)) } } /// Bitwise "xor" with the current tag. @@ -442,7 +469,7 @@ impl Atomic { /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::(), ord)) } + unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::(), ord)) } } } @@ -473,7 +500,7 @@ impl Clone for Atomic { /// atomics or fences. fn clone(&self) -> Self { let data = self.data.load(Ordering::Relaxed); - Atomic::from_data(data) + Atomic::from_usize(data) } } @@ -496,7 +523,7 @@ impl From> for Atomic { fn from(owned: Owned) -> Self { let data = owned.data; mem::forget(owned); - Self::from_data(data) + Self::from_usize(data) } } @@ -523,7 +550,7 @@ impl<'g, T> From> for Atomic { /// let a = Atomic::::from(Shared::::null()); /// ``` fn from(ptr: Shared<'g, T>) -> Self { - Self::from_data(ptr.data) + Self::from_usize(ptr.data) } } @@ -539,17 +566,17 @@ impl From<*const T> for Atomic { /// let a = Atomic::::from(ptr::null::()); /// ``` fn from(raw: *const T) -> Self { - Self::from_data(raw as usize) + Self::from_usize(raw as usize) } } /// A trait for either `Owned` or `Shared` pointers. pub trait Pointer { /// Returns the machine representation of the pointer. - fn into_data(self) -> usize; + fn into_usize(self) -> usize; /// Returns a new pointer pointing to the tagged pointer `data`. - unsafe fn from_data(data: usize) -> Self; + unsafe fn from_usize(data: usize) -> Self; } /// An owned heap-allocated object. @@ -565,7 +592,7 @@ pub struct Owned { impl Pointer for Owned { #[inline] - fn into_data(self) -> usize { + fn into_usize(self) -> usize { let data = self.data; mem::forget(self); data @@ -577,7 +604,7 @@ impl Pointer for Owned { /// /// Panics if the data is zero in debug mode. #[inline] - unsafe fn from_data(data: usize) -> Self { + unsafe fn from_usize(data: usize) -> Self { debug_assert!(data != 0, "converting zero into `Owned`"); Owned { data: data, @@ -619,7 +646,7 @@ impl Owned { /// ``` pub unsafe fn from_raw(raw: *mut T) -> Owned { ensure_aligned(raw); - Self::from_data(raw as usize) + Self::from_usize(raw as usize) } /// Converts the owned pointer into a [`Shared`]. @@ -636,7 +663,7 @@ impl Owned { /// /// [`Shared`]: struct.Shared.html pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_data(self.into_data()) } + unsafe { Shared::from_usize(self.into_usize()) } } /// Converts the owned pointer into a `Box`. @@ -680,12 +707,12 @@ impl Owned { /// /// let o = Owned::new(0u64); /// assert_eq!(o.tag(), 0); - /// let o = o.with_tag(5); - /// assert_eq!(o.tag(), 5); + /// let o = o.with_tag(2); + /// assert_eq!(o.tag(), 2); /// ``` pub fn with_tag(self, tag: usize) -> Owned { - let data = self.into_data(); - unsafe { Self::from_data(data_with_tag::(data, tag)) } + let data = self.into_usize(); + unsafe { Self::from_usize(data_with_tag::(data, tag)) } } } @@ -804,12 +831,12 @@ impl<'g, T> Copy for Shared<'g, T> {} impl<'g, T> Pointer for Shared<'g, T> { #[inline] - fn into_data(self) -> usize { + fn into_usize(self) -> usize { self.data } #[inline] - unsafe fn from_data(data: usize) -> Self { + unsafe fn from_usize(data: usize) -> Self { Shared { data: data, _marker: PhantomData, @@ -973,7 +1000,7 @@ impl<'g, T> Shared<'g, T> { self.as_raw() != ptr::null(), "converting a null `Shared` into `Owned`" ); - Owned::from_data(self.data) + Owned::from_usize(self.data) } /// Returns the tag stored within the pointer. @@ -984,10 +1011,10 @@ impl<'g, T> Shared<'g, T> { /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// - /// let a = Atomic::::from(Owned::new(0u64).with_tag(5)); + /// let a = Atomic::::from(Owned::new(0u64).with_tag(2)); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); - /// assert_eq!(p.tag(), 5); + /// assert_eq!(p.tag(), 2); /// ``` pub fn tag(&self) -> usize { let (_, tag) = decompose_data::(self.data); @@ -1006,14 +1033,14 @@ impl<'g, T> Shared<'g, T> { /// let a = Atomic::new(0u64); /// let guard = &epoch::pin(); /// let p1 = a.load(SeqCst, guard); - /// let p2 = p1.with_tag(5); + /// let p2 = p1.with_tag(2); /// /// assert_eq!(p1.tag(), 0); - /// assert_eq!(p2.tag(), 5); + /// assert_eq!(p2.tag(), 2); /// assert_eq!(p1.as_raw(), p2.as_raw()); /// ``` pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { - unsafe { Self::from_data(data_with_tag::(self.data, tag)) } + unsafe { Self::from_usize(data_with_tag::(self.data, tag)) } } } @@ -1034,7 +1061,7 @@ impl<'g, T> From<*const T> for Shared<'g, T> { /// ``` fn from(raw: *const T) -> Self { ensure_aligned(raw); - unsafe { Self::from_data(raw as usize) } + unsafe { Self::from_usize(raw as usize) } } } diff --git a/third_party/rust/crossbeam-epoch/src/collector.rs b/third_party/rust/crossbeam-epoch/src/collector.rs index 2dedfc5fc8f9..faf10f899949 100644 --- a/third_party/rust/crossbeam-epoch/src/collector.rs +++ b/third_party/rust/crossbeam-epoch/src/collector.rs @@ -7,20 +7,21 @@ /// /// let collector = Collector::new(); /// -/// let handle = collector.handle(); +/// let handle = collector.register(); /// drop(collector); // `handle` still works after dropping `collector` /// /// handle.pin().flush(); /// ``` use alloc::arc::Arc; +use core::fmt; use internal::{Global, Local}; use guard::Guard; /// An epoch-based garbage collector. pub struct Collector { - global: Arc, + pub(crate) global: Arc, } unsafe impl Send for Collector {} @@ -32,9 +33,9 @@ impl Collector { Collector { global: Arc::new(Global::new()) } } - /// Creates a new handle for the collector. - pub fn handle(&self) -> Handle { - Handle { local: Local::register(&self.global) } + /// Registers a new handle for the collector. + pub fn register(&self) -> Handle { + Local::register(self) } } @@ -45,9 +46,23 @@ impl Clone for Collector { } } +impl fmt::Debug for Collector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Collector").finish() + } +} + +impl PartialEq for Collector { + /// Checks if both handles point to the same collector. + fn eq(&self, rhs: &Collector) -> bool { + Arc::ptr_eq(&self.global, &rhs.global) + } +} +impl Eq for Collector {} + /// A handle to a garbage collector. pub struct Handle { - local: *const Local, + pub(crate) local: *const Local, } impl Handle { @@ -62,9 +77,13 @@ impl Handle { pub fn is_pinned(&self) -> bool { unsafe { (*self.local).is_pinned() } } -} -unsafe impl Send for Handle {} + /// Returns the `Collector` associated with this handle. + #[inline] + pub fn collector(&self) -> &Collector { + unsafe { (*self.local).collector() } + } +} impl Drop for Handle { #[inline] @@ -85,6 +104,12 @@ impl Clone for Handle { } } +impl fmt::Debug for Handle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Handle").finish() + } +} + #[cfg(test)] mod tests { use std::mem; @@ -100,7 +125,7 @@ mod tests { #[test] fn pin_reentrant() { let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); drop(collector); assert!(!handle.is_pinned()); @@ -119,7 +144,7 @@ mod tests { #[test] fn flush_local_bag() { let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); drop(collector); for _ in 0..100 { @@ -128,9 +153,9 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer(move || a.into_owned()); - assert!(!(*guard.get_local()).is_bag_empty()); + assert!(!(*(*guard.local).bag.get()).is_empty()); - while !(*guard.get_local()).is_bag_empty() { + while !(*(*guard.local).bag.get()).is_empty() { guard.flush(); } } @@ -140,7 +165,7 @@ mod tests { #[test] fn garbage_buffering() { let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); drop(collector); let guard = &handle.pin(); @@ -149,7 +174,7 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer(move || a.into_owned()); } - assert!(!(*guard.get_local()).is_bag_empty()); + assert!(!(*(*guard.local).bag.get()).is_empty()); } } @@ -157,29 +182,22 @@ mod tests { fn pin_holds_advance() { let collector = Collector::new(); - let threads = (0..NUM_THREADS) - .map(|_| { - scoped::scope(|scope| { - scope.spawn(|| { - let handle = collector.handle(); - for _ in 0..500_000 { - let guard = &handle.pin(); + scoped::scope(|scope| { + for _ in 0..NUM_THREADS { + scope.spawn(|| { + let handle = collector.register(); + for _ in 0..500_000 { + let guard = &handle.pin(); - let before = collector.global.load_epoch(Ordering::Relaxed); - collector.global.collect(guard); - let after = collector.global.load_epoch(Ordering::Relaxed); + let before = collector.global.epoch.load(Ordering::Relaxed); + collector.global.collect(guard); + let after = collector.global.epoch.load(Ordering::Relaxed); - assert!(after.wrapping_sub(before) <= 2); - } - }) - }) - }) - .collect::>(); - drop(collector); - - for t in threads { - t.join(); - } + assert!(after.wrapping_sub(before) <= 2); + } + }); + } + }) } #[test] @@ -188,7 +206,7 @@ mod tests { static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); unsafe { let guard = &handle.pin(); @@ -221,7 +239,7 @@ mod tests { static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); unsafe { let guard = &handle.pin(); @@ -262,7 +280,7 @@ mod tests { } let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); unsafe { let guard = &handle.pin(); @@ -287,7 +305,7 @@ mod tests { static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); unsafe { let guard = &handle.pin(); @@ -323,7 +341,7 @@ mod tests { } let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); let mut guard = handle.pin(); @@ -351,7 +369,7 @@ mod tests { static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); unsafe { let guard = &handle.pin(); @@ -395,28 +413,22 @@ mod tests { let collector = Collector::new(); - let threads = (0..THREADS) - .map(|_| { - scoped::scope(|scope| { - scope.spawn(|| { - let handle = collector.handle(); - for _ in 0..COUNT { - let guard = &handle.pin(); - unsafe { - let a = Owned::new(Elem(7i32)).into_shared(guard); - guard.defer(move || a.into_owned()); - } + scoped::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|| { + let handle = collector.register(); + for _ in 0..COUNT { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer(move || a.into_owned()); } - }) - }) - }) - .collect::>(); + } + }); + } + }); - for t in threads { - t.join(); - } - - let handle = collector.handle(); + let handle = collector.register(); while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { let guard = &handle.pin(); collector.global.collect(guard); diff --git a/third_party/rust/crossbeam-epoch/src/default.rs b/third_party/rust/crossbeam-epoch/src/default.rs index e45e253d77ac..55554bcb2ffc 100644 --- a/third_party/rust/crossbeam-epoch/src/default.rs +++ b/third_party/rust/crossbeam-epoch/src/default.rs @@ -14,7 +14,7 @@ lazy_static! { thread_local! { /// The per-thread participant for the default garbage collector. - static HANDLE: Handle = COLLECTOR.handle(); + static HANDLE: Handle = COLLECTOR.register(); } /// Pins the current thread. @@ -38,3 +38,9 @@ pub fn is_pinned() -> bool { pub fn default_handle() -> Handle { HANDLE.with(|handle| handle.clone()) } + +/// Returns the default handle associated with the current thread. +#[inline] +pub fn default_collector() -> &'static Collector { + &COLLECTOR +} diff --git a/third_party/rust/crossbeam-epoch/src/deferred.rs b/third_party/rust/crossbeam-epoch/src/deferred.rs index 6634a2ac4bb7..3063f9829c90 100644 --- a/third_party/rust/crossbeam-epoch/src/deferred.rs +++ b/third_party/rust/crossbeam-epoch/src/deferred.rs @@ -1,3 +1,5 @@ +use core::fmt; +use core::marker::PhantomData; use core::mem; use core::ptr; use alloc::boxed::Box; @@ -17,6 +19,13 @@ type Data = [usize; DATA_WORDS]; pub struct Deferred { call: unsafe fn(*mut u8), data: Data, + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +impl fmt::Debug for Deferred { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "Deferred {{ ... }}") + } } impl Deferred { @@ -37,7 +46,8 @@ impl Deferred { Deferred { call: call::, - data: data, + data, + _marker: PhantomData, } } else { let b: Box = Box::new(f); @@ -51,23 +61,18 @@ impl Deferred { Deferred { call: call::, - data: data, + data, + _marker: PhantomData, } } } } - /// Calls the function or panics if it was already called. + /// Calls the function. #[inline] - pub fn call(&mut self) { - unsafe fn fail(_: *mut u8) { - panic!("cannot call `FnOnce` more than once"); - } - - let call = mem::replace(&mut self.call, fail); - unsafe { - call(&mut self.data as *mut Data as *mut u8); - } + pub fn call(mut self) { + let call = self.call; + unsafe { call(&mut self.data as *mut Data as *mut u8) }; } } @@ -81,7 +86,7 @@ mod tests { let fired = &Cell::new(false); let a = [0usize; 1]; - let mut d = Deferred::new(move || { + let d = Deferred::new(move || { drop(a); fired.set(true); }); @@ -96,7 +101,7 @@ mod tests { let fired = &Cell::new(false); let a = [0usize; 10]; - let mut d = Deferred::new(move || { + let d = Deferred::new(move || { drop(a); fired.set(true); }); @@ -106,42 +111,24 @@ mod tests { assert!(fired.get()); } - #[test] - #[should_panic(expected = "cannot call `FnOnce` more than once")] - fn twice_on_stack() { - let a = [0usize; 1]; - let mut d = Deferred::new(move || drop(a)); - d.call(); - d.call(); - } - - #[test] - #[should_panic(expected = "cannot call `FnOnce` more than once")] - fn twice_on_heap() { - let a = [0usize; 10]; - let mut d = Deferred::new(move || drop(a)); - d.call(); - d.call(); - } - #[test] fn string() { let a = "hello".to_string(); - let mut d = Deferred::new(move || assert_eq!(a, "hello")); + let d = Deferred::new(move || assert_eq!(a, "hello")); d.call(); } #[test] fn boxed_slice_i32() { let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); - let mut d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); + let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); d.call(); } #[test] fn long_slice_usize() { let a: [usize; 5] = [2, 3, 5, 7, 11]; - let mut d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); + let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); d.call(); } } diff --git a/third_party/rust/crossbeam-epoch/src/epoch.rs b/third_party/rust/crossbeam-epoch/src/epoch.rs index 1f277cfeef08..51076bbaa157 100644 --- a/third_party/rust/crossbeam-epoch/src/epoch.rs +++ b/third_party/rust/crossbeam-epoch/src/epoch.rs @@ -77,7 +77,7 @@ impl AtomicEpoch { #[inline] pub fn new(epoch: Epoch) -> Self { let data = AtomicUsize::new(epoch.data); - AtomicEpoch { data: data } + AtomicEpoch { data } } /// Loads a value from the atomic epoch. @@ -101,6 +101,6 @@ impl AtomicEpoch { #[inline] pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch { let data = self.data.compare_and_swap(current.data, new.data, ord); - Epoch { data: data } + Epoch { data } } } diff --git a/third_party/rust/crossbeam-epoch/src/guard.rs b/third_party/rust/crossbeam-epoch/src/guard.rs index d629d99c2be0..a538f81346cf 100644 --- a/third_party/rust/crossbeam-epoch/src/guard.rs +++ b/third_party/rust/crossbeam-epoch/src/guard.rs @@ -1,8 +1,10 @@ +use core::fmt; use core::ptr; use core::mem; -use garbage::Garbage; +use deferred::Deferred; use internal::Local; +use collector::Collector; /// A guard that keeps the current thread pinned. /// @@ -73,26 +75,10 @@ use internal::Local; /// /// [`pin`]: fn.pin.html pub struct Guard { - local: *const Local, + pub(crate) local: *const Local, } impl Guard { - /// Creates a new guard from a pointer to `Local`. - /// - /// # Safety - /// - /// The `local` should be a valid pointer created by `Local::register()`. - #[doc(hidden)] - pub unsafe fn new(local: *const Local) -> Guard { - Guard { local: local } - } - - /// Accesses the internal pointer to `Local`. - #[doc(hidden)] - pub unsafe fn get_local(&self) -> *const Local { - self.local - } - /// Stores a function so that it can be executed at some point after all currently pinned /// threads get unpinned. /// @@ -127,16 +113,29 @@ impl Guard { /// } /// ``` /// - /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed - /// by the closure must be `Send`. + /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by + /// the closure must be `Send`. + /// + /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove + /// `F: Send` for typical use cases. For example, consider the following code snippet, which + /// exemplifies the typical use case of deferring the deallocation of a shared reference: + /// + /// ```ignore + /// let shared = Owned::new(7i32).into_shared(guard); + /// guard.defer(Deferred::new(move || shared.into_owned())); // `Shared` is not `Send`! + /// ``` + /// + /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, + /// because it's called only after the grace period and `shared` is no longer shared with other + /// threads. But we don't expect type systems to prove this. /// /// # Examples /// /// When a heap-allocated object in a data structure becomes unreachable, it has to be /// deallocated. However, the current thread and other threads may be still holding references - /// on the stack to that same object. Therefore it cannot be deallocated before those - /// references get dropped. This method can defer deallocation until all those threads get - /// unpinned and consequently drop all their references on the stack. + /// on the stack to that same object. Therefore it cannot be deallocated before those references + /// get dropped. This method can defer deallocation until all those threads get unpinned and + /// consequently drop all their references on the stack. /// /// ```rust /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; @@ -173,10 +172,8 @@ impl Guard { where F: FnOnce() -> R, { - let garbage = Garbage::new(|| drop(f())); - if let Some(local) = self.local.as_ref() { - local.defer(garbage, self); + local.defer(Deferred::new(move || drop(f())), self); } } @@ -300,6 +297,28 @@ impl Guard { f() } + + /// Returns the `Collector` associated with this guard. + /// + /// This method is useful when you need to ensure that all guards used with + /// a data structure come from the same collector. + /// + /// If this method is called from an [`unprotected`] guard, then `None` is returned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let mut guard1 = epoch::pin(); + /// let mut guard2 = epoch::pin(); + /// assert!(guard1.collector() == guard2.collector()); + /// ``` + /// + /// [`unprotected`]: fn.unprotected.html + pub fn collector(&self) -> Option<&Collector> { + unsafe { self.local.as_ref().map(|local| local.collector()) } + } } impl Drop for Guard { @@ -321,6 +340,12 @@ impl Clone for Guard { } } +impl fmt::Debug for Guard { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Guard").finish() + } +} + /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. /// /// This guard should be used in special occasions only. Note that it doesn't actually keep any @@ -370,19 +395,19 @@ impl Clone for Guard { /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; -/// use std::ptr; +/// use std::mem::ManuallyDrop; /// use std::sync::atomic::Ordering::Relaxed; /// -/// struct Stack { -/// head: epoch::Atomic, +/// struct Stack { +/// head: Atomic>, /// } /// -/// struct Node { -/// data: u32, -/// next: epoch::Atomic, +/// struct Node { +/// data: ManuallyDrop, +/// next: Atomic>, /// } /// -/// impl Drop for Stack { +/// impl Drop for Stack { /// fn drop(&mut self) { /// unsafe { /// // Unprotected load. @@ -392,8 +417,10 @@ impl Clone for Guard { /// // Unprotected load. /// let next = n.next.load(Relaxed, epoch::unprotected()); /// -/// // Take ownership of the node, then drop it. -/// drop(node.into_owned()); +/// // Take ownership of the node, then drop its data and deallocate it. +/// let mut o = node.into_owned(); +/// ManuallyDrop::drop(&mut o.data); +/// drop(o); /// /// node = next; /// } diff --git a/third_party/rust/crossbeam-epoch/src/internal.rs b/third_party/rust/crossbeam-epoch/src/internal.rs index 9223f5338290..880246dbe0fd 100644 --- a/third_party/rust/crossbeam-epoch/src/internal.rs +++ b/third_party/rust/crossbeam-epoch/src/internal.rs @@ -15,32 +15,117 @@ //! //! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. //! Guards are necessary for performing atomic operations, and for freeing/dropping locations. +//! +//! # Thread-local bag +//! +//! Objects that get unlinked from concurrent data structures must be stashed away until the global +//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects +//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current +//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages +//! for amortizing the synchronization cost of pushing the garbages to a global queue. +//! +//! # Global queue +//! +//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and +//! destroyed along the way. This design reduces contention on data structures. The global queue +//! cannot be explicitly accessed: the only way to interact with it is by calling functions +//! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers +//! garbage collection. +//! +//! Ideally each instance of concurrent data structure may have its own queue that gets fully +//! destroyed as soon as the data structure gets dropped. use core::cell::{Cell, UnsafeCell}; -use core::mem; +use core::mem::{self, ManuallyDrop}; use core::num::Wrapping; use core::ptr; use core::sync::atomic; use core::sync::atomic::Ordering; use alloc::boxed::Box; -use alloc::arc::Arc; use crossbeam_utils::cache_padded::CachePadded; -use nodrop::NoDrop; +use arrayvec::ArrayVec; use atomic::Owned; +use collector::{Handle, Collector}; use epoch::{AtomicEpoch, Epoch}; use guard::{unprotected, Guard}; -use garbage::{Bag, Garbage}; +use deferred::Deferred; use sync::list::{List, Entry, IterError, IsElement}; use sync::queue::Queue; -/// Number of bags to destroy. -const COLLECT_STEPS: usize = 8; +/// Maximum number of objects a bag can contain. +#[cfg(not(feature = "sanitize"))] +const MAX_OBJECTS: usize = 64; +#[cfg(feature = "sanitize")] +const MAX_OBJECTS: usize = 4; -/// Number of pinnings after which a participant will execute some deferred functions from the -/// global queue. -const PINNINGS_BETWEEN_COLLECT: usize = 128; +/// A bag of deferred functions. +#[derive(Default, Debug)] +pub struct Bag { + /// Stashed objects. + deferreds: ArrayVec<[Deferred; MAX_OBJECTS]>, +} + +/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. +unsafe impl Send for Bag {} + +impl Bag { + /// Returns a new, empty bag. + pub fn new() -> Self { + Self::default() + } + + /// Returns `true` if the bag is empty. + pub fn is_empty(&self) -> bool { + self.deferreds.is_empty() + } + + /// Attempts to insert a deferred function into the bag. + /// + /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is + /// full. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { + self.deferreds.try_push(deferred).map_err(|e| e.element()) + } + + /// Seals the bag with the given epoch. + fn seal(self, epoch: Epoch) -> SealedBag { + SealedBag { epoch, bag: self } + } +} + +impl Drop for Bag { + fn drop(&mut self) { + // Call all deferred functions. + for deferred in self.deferreds.drain(..) { + deferred.call(); + } + } +} + +/// A pair of an epoch and a bag. +#[derive(Default, Debug)] +struct SealedBag { + epoch: Epoch, + bag: Bag, +} + +/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. +unsafe impl Sync for SealedBag {} + +impl SealedBag { + /// Checks if it is safe to drop the bag w.r.t. the given global epoch. + fn is_expired(&self, global_epoch: Epoch) -> bool { + // A pinned participant can witness at most one epoch advancement. Therefore, any bag that + // is within one epoch of the current one cannot be destroyed yet. + global_epoch.wrapping_sub(self.epoch) >= 2 + } +} /// The global data for a garbage collector. pub struct Global { @@ -48,28 +133,26 @@ pub struct Global { locals: List, /// The global queue of bags of deferred functions. - queue: Queue<(Epoch, Bag)>, + queue: Queue, /// The global epoch. - epoch: CachePadded, + pub(crate) epoch: CachePadded, } impl Global { + /// Number of bags to destroy. + const COLLECT_STEPS: usize = 8; + /// Creates a new global data for garbage collection. #[inline] - pub fn new() -> Global { - Global { + pub fn new() -> Self { + Self { locals: List::new(), queue: Queue::new(), epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), } } - /// Returns the current global epoch. - pub fn load_epoch(&self, ordering: Ordering) -> Epoch { - self.epoch.load(ordering) - } - /// Pushes the bag into the global queue and replaces the bag with a new empty bag. pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) { let bag = mem::replace(bag, Bag::new()); @@ -77,7 +160,7 @@ impl Global { atomic::fence(Ordering::SeqCst); let epoch = self.epoch.load(Ordering::Relaxed); - self.queue.push((epoch, bag), guard); + self.queue.push(bag.seal(epoch), guard); } /// Collects several bags from the global queue and executes deferred functions in them. @@ -91,22 +174,20 @@ impl Global { pub fn collect(&self, guard: &Guard) { let global_epoch = self.try_advance(guard); - let condition = |item: &(Epoch, Bag)| { - // A pinned participant can witness at most one epoch advancement. Therefore, any bag - // that is within one epoch of the current one cannot be destroyed yet. - global_epoch.wrapping_sub(item.0) >= 2 - }; - let steps = if cfg!(feature = "sanitize") { usize::max_value() } else { - COLLECT_STEPS + Self::COLLECT_STEPS }; for _ in 0..steps { - match self.queue.try_pop_if(&condition, guard) { + match self.queue.try_pop_if( + &|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch), + guard, + ) + { None => break, - Some(bag) => drop(bag), + Some(sealed_bag) => drop(sealed_bag), } } } @@ -172,10 +253,10 @@ pub struct Local { /// A reference to the global data. /// /// When all guards and handles get dropped, this reference is destroyed. - global: UnsafeCell>>, + collector: UnsafeCell>, /// The local bag of deferred functions. - bag: UnsafeCell, + pub(crate) bag: UnsafeCell, /// The number of guards keeping this participant pinned. guard_count: Cell, @@ -189,38 +270,40 @@ pub struct Local { pin_count: Cell>, } -unsafe impl Sync for Local {} - impl Local { + /// Number of pinnings after which a participant will execute some deferred functions from the + /// global queue. + const PINNINGS_BETWEEN_COLLECT: usize = 128; + /// Registers a new `Local` in the provided `Global`. - pub fn register(global: &Arc) -> *const Local { + pub fn register(collector: &Collector) -> Handle { unsafe { // Since we dereference no pointers in this block, it is safe to use `unprotected`. let local = Owned::new(Local { entry: Entry::default(), epoch: AtomicEpoch::new(Epoch::starting()), - global: UnsafeCell::new(NoDrop::new(global.clone())), + collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())), bag: UnsafeCell::new(Bag::new()), guard_count: Cell::new(0), handle_count: Cell::new(1), pin_count: Cell::new(Wrapping(0)), }).into_shared(&unprotected()); - global.locals.insert(local, &unprotected()); - local.as_raw() + collector.global.locals.insert(local, &unprotected()); + Handle { local: local.as_raw() } } } - /// Returns whether the local garbage bag is empty. - #[inline] - pub fn is_bag_empty(&self) -> bool { - unsafe { (*self.bag.get()).is_empty() } - } - /// Returns a reference to the `Global` in which this `Local` resides. #[inline] pub fn global(&self) -> &Global { - unsafe { &*self.global.get() } + &self.collector().global + } + + /// Returns a reference to the `Collector` in which this `Local` resides. + #[inline] + pub fn collector(&self) -> &Collector { + unsafe { &**self.collector.get() } } /// Returns `true` if the current participant is pinned. @@ -229,12 +312,17 @@ impl Local { self.guard_count.get() > 0 } - pub fn defer(&self, mut garbage: Garbage, guard: &Guard) { - let bag = unsafe { &mut *self.bag.get() }; + /// Adds `deferred` to the thread-local bag. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { + let bag = &mut *self.bag.get(); - while let Err(g) = bag.try_push(garbage) { + while let Err(d) = bag.try_push(deferred) { self.global().push_bag(bag, guard); - garbage = g; + deferred = d; } } @@ -251,7 +339,7 @@ impl Local { /// Pins the `Local`. #[inline] pub fn pin(&self) -> Guard { - let guard = unsafe { Guard::new(self) }; + let guard = Guard { local: self }; let guard_count = self.guard_count.get(); self.guard_count.set(guard_count.checked_add(1).unwrap()); @@ -287,7 +375,7 @@ impl Local { // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting // some garbage. - if count.0 % PINNINGS_BETWEEN_COLLECT == 0 { + if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 { self.global().collect(&guard); } } @@ -327,7 +415,7 @@ impl Local { self.epoch.store(global_epoch, Ordering::Release); // However, we don't need a following `SeqCst` fence, because it is safe for memory - // accesses from the new epoch to be executed before updating the local epoch. At + // accesses from the new epoch to be executed before updating the local epoch. At // worse, other threads will see the new epoch late and delay GC slightly. } } @@ -376,15 +464,15 @@ impl Local { // Take the reference to the `Global` out of this `Local`. Since we're not protected // by a guard at this time, it's crucial that the reference is read before marking the // `Local` as deleted. - let global: Arc = ptr::read(&**self.global.get()); + let collector: Collector = ptr::read(&*(*self.collector.get())); // Mark this node in the linked list as deleted. self.entry.delete(&unprotected()); - // Finally, drop the reference to the global. Note that this might be the last - // reference to the `Global`. If so, the global data will be destroyed and all deferred - // functions in its queue will be executed. - drop(global); + // Finally, drop the reference to the global. Note that this might be the last reference + // to the `Global`. If so, the global data will be destroyed and all deferred functions + // in its queue will be executed. + drop(collector); } } } @@ -407,3 +495,49 @@ impl IsElement for Local { drop(Box::from_raw(local as *const Local as *mut Local)); } } + +#[cfg(test)] +mod tests { + use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + use std::sync::atomic::Ordering; + + use super::*; + + #[test] + fn check_defer() { + static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; + fn set() { + FLAG.store(42, Ordering::Relaxed); + } + + let d = Deferred::new(set); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + d.call(); + assert_eq!(FLAG.load(Ordering::Relaxed), 42); + } + + #[test] + fn check_bag() { + static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; + fn incr() { + FLAG.fetch_add(1, Ordering::Relaxed); + } + + let mut bag = Bag::new(); + assert!(bag.is_empty()); + + for _ in 0..MAX_OBJECTS { + assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() }); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + } + + let result = unsafe { bag.try_push(Deferred::new(incr)) }; + assert!(result.is_err()); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + + drop(bag); + assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/lib.rs b/third_party/rust/crossbeam-epoch/src/lib.rs index 91ee1c133944..8ba99d5be1da 100644 --- a/third_party/rust/crossbeam-epoch/src/lib.rs +++ b/third_party/rust/crossbeam-epoch/src/lib.rs @@ -36,8 +36,8 @@ //! # Garbage //! //! Objects that get removed from concurrent collections must be stashed away until all currently -//! pinned participants get unpinned. Such objects can be stored into a [`Garbage`], where they are -//! kept until the right time for their destruction comes. +//! pinned participants get unpinned. Such objects can be stored into a thread-local or global +//! storage, where they are kept until the right time for their destruction comes. //! //! There is a global shared instance of garbage queue. You can [`defer`] the execution of an //! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data @@ -58,11 +58,13 @@ #![cfg_attr(feature = "nightly", feature(alloc))] #![cfg_attr(not(test), no_std)] +#![warn(missing_docs, missing_debug_implementations)] + +#[cfg(test)] +extern crate core; #[cfg(all(not(test), feature = "use_std"))] #[macro_use] extern crate std; -#[cfg(test)] -extern crate core; // Use liballoc on nightly to avoid a dependency on libstd #[cfg(feature = "nightly")] @@ -75,13 +77,6 @@ mod alloc { pub use self::std::sync as arc; } -#[cfg(feature = "manually_drop")] -mod nodrop { - pub use std::mem::ManuallyDrop as NoDrop; -} -#[cfg(not(feature = "manually_drop"))] -extern crate nodrop; - extern crate arrayvec; extern crate crossbeam_utils; #[cfg(feature = "use_std")] @@ -98,13 +93,12 @@ mod collector; mod default; mod deferred; mod epoch; -mod garbage; mod guard; mod internal; mod sync; -pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared}; +pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared, Pointer}; pub use self::guard::{unprotected, Guard}; #[cfg(feature = "use_std")] -pub use self::default::{default_handle, is_pinned, pin}; +pub use self::default::{default_collector, default_handle, is_pinned, pin}; pub use self::collector::{Collector, Handle}; diff --git a/third_party/rust/crossbeam-epoch/src/sync/list.rs b/third_party/rust/crossbeam-epoch/src/sync/list.rs index 3695751c330a..f932d4f36fd3 100644 --- a/third_party/rust/crossbeam-epoch/src/sync/list.rs +++ b/third_party/rust/crossbeam-epoch/src/sync/list.rs @@ -130,8 +130,8 @@ pub enum IterError { impl Default for Entry { /// Returns the empty entry. - fn default() -> Entry { - Entry { next: Atomic::null() } + fn default() -> Self { + Self { next: Atomic::null() } } } @@ -150,8 +150,8 @@ impl Entry { impl> List { /// Returns a new, empty linked list. - pub fn new() -> List { - List { + pub fn new() -> Self { + Self { head: Atomic::null(), _marker: PhantomData, } @@ -204,7 +204,7 @@ impl> List { /// thread will continue to iterate over the same list. pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { Iter { - guard: guard, + guard, pred: &self.head, curr: self.head.load(Acquire, guard), head: &self.head, @@ -289,7 +289,7 @@ impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { #[cfg(test)] mod tests { - use {Collector, Owned, Guard}; + use {Collector, Owned}; use crossbeam_utils::scoped; use std::sync::Barrier; use super::*; @@ -313,7 +313,7 @@ mod tests { #[test] fn insert() { let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); let guard = handle.pin(); let l: List = List::new(); @@ -352,7 +352,7 @@ mod tests { #[test] fn delete() { let collector = Collector::new(); - let handle = collector.handle(); + let handle = collector.register(); let guard = handle.pin(); let l: List = List::new(); @@ -400,7 +400,7 @@ mod tests { s.spawn(|| { b.wait(); - let handle = collector.handle(); + let handle = collector.register(); let guard: Guard = handle.pin(); let mut v = Vec::with_capacity(ITERS); @@ -420,7 +420,7 @@ mod tests { }); }); - let handle = collector.handle(); + let handle = collector.register(); let guard = handle.pin(); let mut iter = l.iter(&guard); @@ -439,7 +439,7 @@ mod tests { s.spawn(|| { b.wait(); - let handle = collector.handle(); + let handle = collector.register(); let guard: Guard = handle.pin(); let mut v = Vec::with_capacity(ITERS); @@ -464,7 +464,7 @@ mod tests { }); }); - let handle = collector.handle(); + let handle = collector.register(); let guard = handle.pin(); let mut iter = l.iter(&guard); diff --git a/third_party/rust/crossbeam-epoch/src/sync/queue.rs b/third_party/rust/crossbeam-epoch/src/sync/queue.rs index 77965c57de11..8f5d9af1579f 100644 --- a/third_party/rust/crossbeam-epoch/src/sync/queue.rs +++ b/third_party/rust/crossbeam-epoch/src/sync/queue.rs @@ -5,13 +5,11 @@ //! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue //! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106 -use core::fmt; -use core::mem; +use core::mem::{self, ManuallyDrop}; use core::ptr; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use crossbeam_utils::cache_padded::CachePadded; -use nodrop::NoDrop; use {unprotected, Atomic, Guard, Owned, Shared}; @@ -24,28 +22,24 @@ pub struct Queue { tail: CachePadded>>, } +#[derive(Debug)] struct Node { /// The slot in which a value of type `T` can be stored. /// - /// The type of `data` is `NoDrop` because a `Node` doesn't always contain a `T`. For - /// example, the sentinel node in a queue never contains a value: its slot is always empty. + /// The type of `data` is `ManuallyDrop` because a `Node` doesn't always contain a `T`. + /// For example, the sentinel node in a queue never contains a value: its slot is always empty. /// Other nodes start their life with a push operation and contain a value until it gets popped /// out. After that such empty nodes get added to the collector for destruction. - data: NoDrop, + data: ManuallyDrop, next: Atomic>, } -impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "node {{ ... }}") - } -} - // Any particular `T` should never be accessed concurrently, so no need for `Sync`. unsafe impl Sync for Queue {} unsafe impl Send for Queue {} + impl Queue { /// Create a new, empty queue. pub fn new() -> Queue { @@ -93,7 +87,7 @@ impl Queue { /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. pub fn push(&self, t: T, guard: &Guard) { let new = Owned::new(Node { - data: NoDrop::new(t), + data: ManuallyDrop::new(t), next: Atomic::null(), }); let new = Owned::into_shared(new, guard); @@ -121,7 +115,7 @@ impl Queue { .compare_and_set(head, next, Release, guard) .map(|_| { guard.defer(move || drop(head.into_owned())); - Some(NoDrop::into_inner(ptr::read(&n.data))) + Some(ManuallyDrop::into_inner(ptr::read(&n.data))) }) .map_err(|_| ()) }, @@ -146,7 +140,7 @@ impl Queue { .compare_and_set(head, next, Release, guard) .map(|_| { guard.defer(move || drop(head.into_owned())); - Some(NoDrop::into_inner(ptr::read(&n.data))) + Some(ManuallyDrop::into_inner(ptr::read(&n.data))) }) .map_err(|_| ()) }, @@ -199,10 +193,9 @@ impl Drop for Queue { #[cfg(test)] mod test { - use {pin}; - - use core::sync::atomic::Ordering; + use super::*; use crossbeam_utils::scoped; + use pin; struct Queue { queue: super::Queue, @@ -220,9 +213,9 @@ mod test { pub fn is_empty(&self) -> bool { let guard = &pin(); - let head = self.queue.head.load(Ordering::Acquire, guard); + let head = self.queue.head.load(Acquire, guard); let h = unsafe { head.deref() }; - h.next.load(Ordering::Acquire, guard).is_null() + h.next.load(Acquire, guard).is_null() } pub fn try_pop(&self) -> Option { diff --git a/third_party/rust/crossbeam-utils-0.2.2/.cargo-checksum.json b/third_party/rust/crossbeam-utils-0.2.2/.cargo-checksum.json new file mode 100644 index 000000000000..875a79ebb315 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"945485d3f79a1912bfa6944ed7b07a9c60915fae992f7abcbb1de44ec147953e","Cargo.toml":"2c8f106920b27ebe60616933c4bf04cf2a6515d65f87fafa216febc4d6e1164b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/atomic_option.rs":"0ed05d26d8980c761c4972a0f37f5b507462ed6dff5d688ef92444560e7b9c69","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/lib.rs":"ea79e01d2c2f55d27d365e8cd45e377b313f53f27c705d4e4f6a4f19d7e11a98","src/scoped.rs":"5af1b54ca167c634e4c206aeab53e6ca78682633ad0009af220b17de385b3080"},"package":"2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-utils-0.2.2/.travis.yml b/third_party/rust/crossbeam-utils-0.2.2/.travis.yml new file mode 100644 index 000000000000..35aa6c0641f7 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/.travis.yml @@ -0,0 +1,20 @@ +language: rust + +rust: + - stable + - beta + - nightly + - 1.12.1 + +script: + - cargo build + - cargo build --release + - cargo build --no-default-features + - cargo build --release --no-default-features + - cargo test + - cargo test --release + - | + if [ $TRAVIS_RUST_VERSION == nightly ]; then + cargo test --features nightly + cargo test --features nightly --release + fi diff --git a/third_party/rust/crossbeam-utils-0.2.2/CHANGELOG.md b/third_party/rust/crossbeam-utils-0.2.2/CHANGELOG.md new file mode 100644 index 000000000000..d2c55a8773ff --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] +### Added +- Support for Rust 1.12.1. + +### Fixed +- Call `T::clone` when cloning a `CachePadded`. + +## [0.2.1] - 2017-11-26 +### Added +- Add `use_std` feature. + +## [0.2.0] - 2017-11-17 +### Added +- Add `nightly` feature. +- Use `repr(align(64))` on `CachePadded` with the `nightly` feature. +- Implement `Drop` for `CachePadded`. +- Implement `Clone` for `CachePadded`. +- Implement `From` for `CachePadded`. +- Implement better `Debug` for `CachePadded`. +- Write more tests. +- Add this changelog. + +### Changed +- Change cache line length to 64 bytes. + +### Removed +- Remove `ZerosValid`. + +## 0.1.0 - 2017-08-27 +### Added +- Old implementation of `CachePadded` from `crossbeam` version 0.3.0 + +[Unreleased]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.1...HEAD +[0.2.1]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.1.0...v0.2.0 diff --git a/third_party/rust/mime/Cargo.toml b/third_party/rust/crossbeam-utils-0.2.2/Cargo.toml similarity index 50% rename from third_party/rust/mime/Cargo.toml rename to third_party/rust/crossbeam-utils-0.2.2/Cargo.toml index 083231380e25..581fabff37c5 100644 --- a/third_party/rust/mime/Cargo.toml +++ b/third_party/rust/crossbeam-utils-0.2.2/Cargo.toml @@ -11,27 +11,21 @@ # will likely look very different (and much more reasonable) [package] -name = "mime" -version = "0.2.6" -authors = ["Sean McArthur "] -description = "Strongly Typed Mimes" -documentation = "http://hyperium.github.io/mime.rs" -keywords = ["mime", "media-extensions", "media-types"] -license = "MIT" -repository = "https://github.com/hyperium/mime.rs" -[dependencies.heapsize] -version = ">=0.2.0, <0.4" -optional = true - -[dependencies.log] -version = "0.3" - -[dependencies.serde] -version = ">=0.7, <0.9" -optional = true -[dev-dependencies.serde_json] -version = ">=0.7, <0.9" +name = "crossbeam-utils" +version = "0.2.2" +authors = ["The Crossbeam Project Developers"] +description = "Utilities for concurrent programming" +homepage = "https://github.com/crossbeam-rs/crossbeam-utils" +documentation = "https://docs.rs/crossbeam-utils" +readme = "README.md" +keywords = ["scoped", "thread", "atomic", "cache"] +categories = ["algorithms", "concurrency", "data-structures"] +license = "MIT/Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam-utils" +[dependencies.cfg-if] +version = "0.1" [features] -heap_size = ["heapsize"] +default = ["use_std"] nightly = [] +use_std = [] diff --git a/third_party/rust/crossbeam-utils-0.2.2/LICENSE-APACHE b/third_party/rust/crossbeam-utils-0.2.2/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/crossbeam-utils-0.2.2/LICENSE-MIT b/third_party/rust/crossbeam-utils-0.2.2/LICENSE-MIT new file mode 100644 index 000000000000..25597d5838fa --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2010 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/crossbeam-utils-0.2.2/README.md b/third_party/rust/crossbeam-utils-0.2.2/README.md new file mode 100644 index 000000000000..860519a0388a --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/README.md @@ -0,0 +1,29 @@ +# Utilities for concurrent programming + +[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-utils.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-utils) +[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-utils) +[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)](https://crates.io/crates/crossbeam-utils) +[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)](https://docs.rs/crossbeam-utils) + +This crate provides utilities for concurrent programming. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-utils = "0.2" +``` + +Next, add this to your crate: + +```rust +extern crate crossbeam_utils; +``` + +## License + +Licensed under the terms of MIT license and the Apache License (Version 2.0). + +See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. diff --git a/third_party/rust/crossbeam-utils/src/atomic_option.rs b/third_party/rust/crossbeam-utils-0.2.2/src/atomic_option.rs similarity index 100% rename from third_party/rust/crossbeam-utils/src/atomic_option.rs rename to third_party/rust/crossbeam-utils-0.2.2/src/atomic_option.rs diff --git a/third_party/rust/crossbeam-utils-0.2.2/src/cache_padded.rs b/third_party/rust/crossbeam-utils-0.2.2/src/cache_padded.rs new file mode 100644 index 000000000000..32482b133b14 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/src/cache_padded.rs @@ -0,0 +1,290 @@ +use core::fmt; +use core::mem; +use core::ops::{Deref, DerefMut}; +use core::ptr; + + +cfg_if! { + if #[cfg(feature = "nightly")] { + // This trick allows use to support rustc 1.12.1, which does not support the + // #[repr(align(n))] syntax. Using the attribute makes the parser fail over. + // It is, however, okay to use it within a macro, since it would be parsed + // in a later stage, but that never occurs due to the cfg_if. + // TODO(Vtec234): remove this crap when we drop support for 1.12. + macro_rules! nightly_inner { + () => ( + #[derive(Clone)] + #[repr(align(64))] + pub(crate) struct Inner { + value: T, + } + ) + } + nightly_inner!(); + + impl Inner { + pub(crate) fn new(t: T) -> Inner { + Self { + value: t + } + } + } + + impl Deref for Inner { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } + } + + impl DerefMut for Inner { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } + } + } else { + use core::marker::PhantomData; + + struct Inner { + bytes: [u8; 64], + + /// `[T; 0]` ensures alignment is at least that of `T`. + /// `PhantomData` signals that `CachePadded` contains a `T`. + _marker: ([T; 0], PhantomData), + } + + impl Inner { + fn new(t: T) -> Inner { + assert!(mem::size_of::() <= mem::size_of::()); + assert!(mem::align_of::() <= mem::align_of::()); + + unsafe { + let mut inner: Self = mem::uninitialized(); + let p: *mut T = &mut *inner; + ptr::write(p, t); + inner + } + } + } + + impl Deref for Inner { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*(self.bytes.as_ptr() as *const T) } + } + } + + impl DerefMut for Inner { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *(self.bytes.as_ptr() as *mut T) } + } + } + + impl Drop for CachePadded { + fn drop(&mut self) { + let p: *mut T = self.deref_mut(); + unsafe { + ptr::drop_in_place(p); + } + } + } + + impl Clone for Inner { + fn clone(&self) -> Inner { + let val = self.deref().clone(); + Self::new(val) + } + } + } +} + +/// Pads `T` to the length of a cache line. +/// +/// Sometimes concurrent programming requires a piece of data to be padded out to the size of a +/// cacheline to avoid "false sharing": cache lines being invalidated due to unrelated concurrent +/// activity. Use this type when you want to *avoid* cache locality. +/// +/// At the moment, cache lines are assumed to be 64 bytes on all architectures. +/// +/// # Size and alignment +/// +/// By default, the size of `CachePadded` is 64 bytes. If `T` is larger than that, then +/// `CachePadded::::new` will panic. Alignment of `CachePadded` is the same as that of `T`. +/// +/// However, if the `nightly` feature is enabled, arbitrarily large types `T` can be stored inside +/// a `CachePadded`. The size will then be a multiple of 64 at least the size of `T`, and the +/// alignment will be the maximum of 64 and the alignment of `T`. +pub struct CachePadded { + inner: Inner, +} + +unsafe impl Send for CachePadded {} +unsafe impl Sync for CachePadded {} + +impl CachePadded { + /// Pads a value to the length of a cache line. + /// + /// # Panics + /// + /// If `nightly` is not enabled and `T` is larger than 64 bytes, this function will panic. + pub fn new(t: T) -> CachePadded { + CachePadded:: { inner: Inner::new(t) } + } +} + +impl Deref for CachePadded { + type Target = T; + + fn deref(&self) -> &T { + self.inner.deref() + } +} + +impl DerefMut for CachePadded { + fn deref_mut(&mut self) -> &mut T { + self.inner.deref_mut() + } +} + +impl Default for CachePadded { + fn default() -> Self { + Self::new(Default::default()) + } +} + +impl Clone for CachePadded { + fn clone(&self) -> Self { + CachePadded { inner: self.inner.clone() } + } +} + +impl fmt::Debug for CachePadded { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let inner: &T = &*self; + write!(f, "CachePadded {{ {:?} }}", inner) + } +} + +impl From for CachePadded { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::cell::Cell; + + #[test] + fn store_u64() { + let x: CachePadded = CachePadded::new(17); + assert_eq!(*x, 17); + } + + #[test] + fn store_pair() { + let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37)); + assert_eq!(x.0, 17); + assert_eq!(x.1, 37); + } + + #[test] + fn distance() { + let arr = [CachePadded::new(17u8), CachePadded::new(37u8)]; + let a = &*arr[0] as *const u8; + let b = &*arr[1] as *const u8; + assert!(unsafe { a.offset(64) } <= b); + } + + #[test] + fn different_sizes() { + CachePadded::new(17u8); + CachePadded::new(17u16); + CachePadded::new(17u32); + CachePadded::new([17u64; 0]); + CachePadded::new([17u64; 1]); + CachePadded::new([17u64; 2]); + CachePadded::new([17u64; 3]); + CachePadded::new([17u64; 4]); + CachePadded::new([17u64; 5]); + CachePadded::new([17u64; 6]); + CachePadded::new([17u64; 7]); + CachePadded::new([17u64; 8]); + } + + cfg_if! { + if #[cfg(feature = "nightly")] { + #[test] + fn large() { + let a = [17u64; 9]; + let b = CachePadded::new(a); + assert!(mem::size_of_val(&a) <= mem::size_of_val(&b)); + } + } else { + #[test] + #[should_panic] + fn large() { + CachePadded::new([17u64; 9]); + } + } + } + + #[test] + fn debug() { + assert_eq!( + format!("{:?}", CachePadded::new(17u64)), + "CachePadded { 17 }" + ); + } + + #[test] + fn drops() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Drop for Foo<'a> { + fn drop(&mut self) { + self.0.set(self.0.get() + 1); + } + } + + let a = CachePadded::new(Foo(&count)); + let b = CachePadded::new(Foo(&count)); + + assert_eq!(count.get(), 0); + drop(a); + assert_eq!(count.get(), 1); + drop(b); + assert_eq!(count.get(), 2); + } + + #[test] + fn clone() { + let a = CachePadded::new(17); + let b = a.clone(); + assert_eq!(*a, *b); + } + + #[test] + fn runs_custom_clone() { + let count = Cell::new(0); + + struct Foo<'a>(&'a Cell); + + impl<'a> Clone for Foo<'a> { + fn clone(&self) -> Foo<'a> { + self.0.set(self.0.get() + 1); + Foo::<'a>(self.0) + } + } + + let a = CachePadded::new(Foo(&count)); + a.clone(); + + assert_eq!(count.get(), 1); + } +} diff --git a/third_party/rust/crossbeam-utils-0.2.2/src/lib.rs b/third_party/rust/crossbeam-utils-0.2.2/src/lib.rs new file mode 100644 index 000000000000..50b3a56beeab --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/src/lib.rs @@ -0,0 +1,14 @@ +#![cfg_attr(feature = "nightly", feature(attr_literals, repr_align))] +#![cfg_attr(not(feature = "use_std"), no_std)] + +#[cfg(feature = "use_std")] +extern crate core; + +#[macro_use] +extern crate cfg_if; + +pub mod cache_padded; +#[cfg(feature = "use_std")] +pub mod atomic_option; +#[cfg(feature = "use_std")] +pub mod scoped; diff --git a/third_party/rust/crossbeam-utils-0.2.2/src/scoped.rs b/third_party/rust/crossbeam-utils-0.2.2/src/scoped.rs new file mode 100644 index 000000000000..a571a9006312 --- /dev/null +++ b/third_party/rust/crossbeam-utils-0.2.2/src/scoped.rs @@ -0,0 +1,364 @@ +/// Scoped thread. +/// +/// # Examples +/// +/// A basic scoped thread: +/// +/// ``` +/// crossbeam_utils::scoped::scope(|scope| { +/// scope.spawn(|| { +/// println!("Hello from a scoped thread!"); +/// }); +/// }); +/// ``` +/// +/// When writing concurrent Rust programs, you'll sometimes see a pattern like this, using +/// [`std::thread::spawn`][spawn]: +/// +/// ```ignore +/// let array = [1, 2, 3]; +/// let mut guards = vec![]; +/// +/// for i in &array { +/// let guard = std::thread::spawn(move || { +/// println!("element: {}", i); +/// }); +/// +/// guards.push(guard); +/// } +/// +/// for guard in guards { +/// guard.join().unwrap(); +/// } +/// ``` +/// +/// The basic pattern is: +/// +/// 1. Iterate over some collection. +/// 2. Spin up a thread to operate on each part of the collection. +/// 3. Join all the threads. +/// +/// However, this code actually gives an error: +/// +/// ```text +/// error: `array` does not live long enough +/// for i in &array { +/// ^~~~~ +/// in expansion of for loop expansion +/// note: expansion site +/// note: reference must be valid for the static lifetime... +/// note: ...but borrowed value is only valid for the block suffix following statement 0 at ... +/// let array = [1, 2, 3]; +/// let mut guards = vec![]; +/// +/// for i in &array { +/// let guard = std::thread::spawn(move || { +/// println!("element: {}", i); +/// ... +/// error: aborting due to previous error +/// ``` +/// +/// Because [`std::thread::spawn`][spawn] doesn't know about this scope, it requires a +/// `'static` lifetime. One way of giving it a proper lifetime is to use an [`Arc`][arc]: +/// +/// [arc]: http://doc.rust-lang.org/stable/std/sync/struct.Arc.html +/// +/// ``` +/// use std::sync::Arc; +/// +/// let array = Arc::new([1, 2, 3]); +/// let mut guards = vec![]; +/// +/// for i in 0..array.len() { +/// let a = array.clone(); +/// +/// let guard = std::thread::spawn(move || { +/// println!("element: {}", a[i]); +/// }); +/// +/// guards.push(guard); +/// } +/// +/// for guard in guards { +/// guard.join().unwrap(); +/// } +/// ``` +/// +/// But this introduces unnecessary allocation, as `Arc` puts its data on the heap, and we +/// also end up dealing with reference counts. We know that we're joining the threads before +/// our function returns, so just taking a reference _should_ be safe. Rust can't know that, +/// though. +/// +/// Enter scoped threads. Here's our original example, using `spawn` from crossbeam rather +/// than from `std::thread`: +/// +/// ``` +/// let array = [1, 2, 3]; +/// +/// crossbeam_utils::scoped::scope(|scope| { +/// for i in &array { +/// scope.spawn(move || { +/// println!("element: {}", i); +/// }); +/// } +/// }); +/// ``` +/// +/// Much more straightforward. +// FIXME(jeehoonkang): maybe we should create a new crate for scoped threads. + +use std::cell::RefCell; +use std::fmt; +use std::mem; +use std::rc::Rc; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::thread; +use std::io; + +use atomic_option::AtomicOption; + +#[doc(hidden)] +trait FnBox { + fn call_box(self: Box); +} + +impl FnBox for F { + fn call_box(self: Box) { + (*self)() + } +} + +/// Like `std::thread::spawn`, but without the closure bounds. +pub unsafe fn spawn_unsafe<'a, F>(f: F) -> thread::JoinHandle<()> +where + F: FnOnce() + Send + 'a, +{ + let builder = thread::Builder::new(); + builder_spawn_unsafe(builder, f).unwrap() +} + +/// Like `std::thread::Builder::spawn`, but without the closure bounds. +pub unsafe fn builder_spawn_unsafe<'a, F>( + builder: thread::Builder, + f: F, +) -> io::Result> +where + F: FnOnce() + Send + 'a, +{ + use std::mem; + + let closure: Box = Box::new(f); + let closure: Box = mem::transmute(closure); + builder.spawn(move || closure.call_box()) +} + + +pub struct Scope<'a> { + dtors: RefCell>>, +} + +struct DtorChain<'a> { + dtor: Box, + next: Option>>, +} + +enum JoinState { + Running(thread::JoinHandle<()>), + Joined, +} + +impl JoinState { + fn join(&mut self) { + let mut state = JoinState::Joined; + mem::swap(self, &mut state); + if let JoinState::Running(handle) = state { + let res = handle.join(); + + if !thread::panicking() { + res.unwrap(); + } + } + } +} + +/// A handle to a scoped thread +pub struct ScopedJoinHandle { + inner: Rc>, + packet: Arc>, + thread: thread::Thread, +} + +/// Create a new `scope`, for deferred destructors. +/// +/// Scopes, in particular, support [*scoped thread spawning*](struct.Scope.html#method.spawn). +/// +/// # Examples +/// +/// Creating and using a scope: +/// +/// ``` +/// crossbeam_utils::scoped::scope(|scope| { +/// scope.defer(|| println!("Exiting scope")); +/// scope.spawn(|| println!("Running child thread in scope")) +/// }); +/// // Prints messages in the reverse order written +/// ``` +pub fn scope<'a, F, R>(f: F) -> R +where + F: FnOnce(&Scope<'a>) -> R, +{ + let mut scope = Scope { dtors: RefCell::new(None) }; + let ret = f(&scope); + scope.drop_all(); + ret +} + +impl<'a> fmt::Debug for Scope<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Scope {{ ... }}") + } +} + +impl fmt::Debug for ScopedJoinHandle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ScopedJoinHandle {{ ... }}") + } +} + +impl<'a> Scope<'a> { + // This method is carefully written in a transactional style, so + // that it can be called directly and, if any dtor panics, can be + // resumed in the unwinding this causes. By initially running the + // method outside of any destructor, we avoid any leakage problems + // due to @rust-lang/rust#14875. + fn drop_all(&mut self) { + loop { + // use a separate scope to ensure that the RefCell borrow + // is relinquished before running `dtor` + let dtor = { + let mut dtors = self.dtors.borrow_mut(); + if let Some(mut node) = dtors.take() { + *dtors = node.next.take().map(|b| *b); + node.dtor + } else { + return; + } + }; + dtor.call_box() + } + } + + /// Schedule code to be executed when exiting the scope. + /// + /// This is akin to having a destructor on the stack, except that it is + /// *guaranteed* to be run. + pub fn defer(&self, f: F) + where + F: FnOnce() + 'a, + { + let mut dtors = self.dtors.borrow_mut(); + *dtors = Some(DtorChain { + dtor: Box::new(f), + next: dtors.take().map(Box::new), + }); + } + + /// Create a scoped thread. + /// + /// `spawn` is similar to the [`spawn`][spawn] function in Rust's standard library. The + /// difference is that this thread is scoped, meaning that it's guaranteed to terminate + /// before the current stack frame goes away, allowing you to reference the parent stack frame + /// directly. This is ensured by having the parent thread join on the child thread before the + /// scope exits. + /// + /// [spawn]: http://doc.rust-lang.org/std/thread/fn.spawn.html + pub fn spawn(&self, f: F) -> ScopedJoinHandle + where + F: FnOnce() -> T + Send + 'a, + T: Send + 'a, + { + self.builder().spawn(f).unwrap() + } + + /// Generates the base configuration for spawning a scoped thread, from which configuration + /// methods can be chained. + pub fn builder<'s>(&'s self) -> ScopedThreadBuilder<'s, 'a> { + ScopedThreadBuilder { + scope: self, + builder: thread::Builder::new(), + } + } +} + +/// Scoped thread configuration. Provides detailed control over the properties and behavior of new +/// scoped threads. +pub struct ScopedThreadBuilder<'s, 'a: 's> { + scope: &'s Scope<'a>, + builder: thread::Builder, +} + +impl<'s, 'a: 's> ScopedThreadBuilder<'s, 'a> { + /// Names the thread-to-be. Currently the name is used for identification only in panic + /// messages. + pub fn name(mut self, name: String) -> ScopedThreadBuilder<'s, 'a> { + self.builder = self.builder.name(name); + self + } + + /// Sets the size of the stack for the new thread. + pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'s, 'a> { + self.builder = self.builder.stack_size(size); + self + } + + /// Spawns a new thread, and returns a join handle for it. + pub fn spawn(self, f: F) -> io::Result> + where + F: FnOnce() -> T + Send + 'a, + T: Send + 'a, + { + let their_packet = Arc::new(AtomicOption::new()); + let my_packet = their_packet.clone(); + + let join_handle = try!(unsafe { + builder_spawn_unsafe(self.builder, move || { + their_packet.swap(f(), Ordering::Relaxed); + }) + }); + + let thread = join_handle.thread().clone(); + let deferred_handle = Rc::new(RefCell::new(JoinState::Running(join_handle))); + let my_handle = deferred_handle.clone(); + + self.scope.defer(move || { + let mut state = deferred_handle.borrow_mut(); + state.join(); + }); + + Ok(ScopedJoinHandle { + inner: my_handle, + packet: my_packet, + thread: thread, + }) + } +} + +impl ScopedJoinHandle { + /// Join the scoped thread, returning the result it produced. + pub fn join(self) -> T { + self.inner.borrow_mut().join(); + self.packet.take(Ordering::Relaxed).unwrap() + } + + /// Get the underlying thread handle. + pub fn thread(&self) -> &thread::Thread { + &self.thread + } +} + +impl<'a> Drop for Scope<'a> { + fn drop(&mut self) { + self.drop_all() + } +} diff --git a/third_party/rust/crossbeam-utils/.cargo-checksum.json b/third_party/rust/crossbeam-utils/.cargo-checksum.json index 875a79ebb315..d2f36227e6de 100644 --- a/third_party/rust/crossbeam-utils/.cargo-checksum.json +++ b/third_party/rust/crossbeam-utils/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"945485d3f79a1912bfa6944ed7b07a9c60915fae992f7abcbb1de44ec147953e","Cargo.toml":"2c8f106920b27ebe60616933c4bf04cf2a6515d65f87fafa216febc4d6e1164b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/atomic_option.rs":"0ed05d26d8980c761c4972a0f37f5b507462ed6dff5d688ef92444560e7b9c69","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/lib.rs":"ea79e01d2c2f55d27d365e8cd45e377b313f53f27c705d4e4f6a4f19d7e11a98","src/scoped.rs":"5af1b54ca167c634e4c206aeab53e6ca78682633ad0009af220b17de385b3080"},"package":"2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"} \ No newline at end of file +{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"6b764c44d2f0ddb3a10101f738673685992bbd894152c0fc354d571f5115f85a","Cargo.toml":"48f3a37f7267b76120aa309e4e2d4e13df6e2994b5b2b402177640957dbcb18b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/consume.rs":"422c6006dca162a80d39f1abcf1fe26dae6d69772111b3e8824c7f9b335c3ec2","src/lib.rs":"81273b19bd30f6f20084ff01af1acedadcf9ac88db89137d59cb7ee24c226588","src/scoped.rs":"1b7eaaf1fd6033875e4e368e4318a93430bedeb6f68a11c10221ace0243cd83b"},"package":"d636a8b3bcc1b409d7ffd3facef8f21dcb4009626adbd0c5e6c4305c07253c7b"} \ No newline at end of file diff --git a/third_party/rust/crossbeam-utils/CHANGELOG.md b/third_party/rust/crossbeam-utils/CHANGELOG.md index d2c55a8773ff..0633ecea60f0 100644 --- a/third_party/rust/crossbeam-utils/CHANGELOG.md +++ b/third_party/rust/crossbeam-utils/CHANGELOG.md @@ -5,6 +5,24 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] + +## [0.3.2] - 2018-03-12 +### Fixed +- Mark `load_consume` with `#[inline]`. + +## [0.3.1] - 2018-03-12 +### Fixed +- `load_consume` on ARM and AArch64. + +## [0.3.0] - 2018-03-11 +### Added +- `join` for scoped thread API. +- `load_consume` for atomic load-consume memory ordering. + +### Removed +- `AtomicOption`. + +## [0.2.2] - 2018-01-14 ### Added - Support for Rust 1.12.1. diff --git a/third_party/rust/crossbeam-utils/Cargo.toml b/third_party/rust/crossbeam-utils/Cargo.toml index 581fabff37c5..c22036761d24 100644 --- a/third_party/rust/crossbeam-utils/Cargo.toml +++ b/third_party/rust/crossbeam-utils/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "crossbeam-utils" -version = "0.2.2" +version = "0.3.2" authors = ["The Crossbeam Project Developers"] description = "Utilities for concurrent programming" homepage = "https://github.com/crossbeam-rs/crossbeam-utils" diff --git a/third_party/rust/crossbeam-utils/src/consume.rs b/third_party/rust/crossbeam-utils/src/consume.rs new file mode 100644 index 000000000000..7501040d2ef8 --- /dev/null +++ b/third_party/rust/crossbeam-utils/src/consume.rs @@ -0,0 +1,82 @@ +use core::sync::atomic::Ordering; +#[cfg(any(target_arch = "arm", target_arch = "aarch64"))] +use core::sync::atomic::compiler_fence; + +/// Trait which allows reading from an atomic type with "consume" ordering. +pub trait AtomicConsume { + /// Type returned by `load_consume`. + type Val; + + /// Loads a value from the atomic using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// This is currently only implemented on ARM and AArch64, where a fence + /// can be avoided. On other architectures this will fall back to a simple + /// `load(Ordering::Acquire)`. + fn load_consume(&self) -> Self::Val; +} + +#[cfg(any(target_arch = "arm", target_arch = "aarch64"))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + let result = self.load(Ordering::Relaxed); + compiler_fence(Ordering::Acquire); + result + } + }; +} + +#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] +macro_rules! impl_consume { + () => { + #[inline] + fn load_consume(&self) -> Self::Val { + self.load(Ordering::Acquire) + } + }; +} + +macro_rules! impl_atomic { + ($atomic:ident, $val:ty) => { + impl AtomicConsume for ::core::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } + }; +} + +impl_atomic!(AtomicBool, bool); +impl_atomic!(AtomicUsize, usize); +impl_atomic!(AtomicIsize, isize); +#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +impl_atomic!(AtomicU8, u8); +#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +impl_atomic!(AtomicI8, i8); +#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +impl_atomic!(AtomicU16, u16); +#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +impl_atomic!(AtomicI16, i16); +#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +impl_atomic!(AtomicU32, u32); +#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +impl_atomic!(AtomicI32, i32); +#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +impl_atomic!(AtomicU64, u64); +#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +impl_atomic!(AtomicI64, i64); + +impl AtomicConsume for ::core::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/third_party/rust/crossbeam-utils/src/lib.rs b/third_party/rust/crossbeam-utils/src/lib.rs index 50b3a56beeab..fa0827bdc3f9 100644 --- a/third_party/rust/crossbeam-utils/src/lib.rs +++ b/third_party/rust/crossbeam-utils/src/lib.rs @@ -1,4 +1,5 @@ -#![cfg_attr(feature = "nightly", feature(attr_literals, repr_align))] +#![cfg_attr(feature = "nightly", + feature(attr_literals, repr_align, cfg_target_has_atomic, integer_atomics))] #![cfg_attr(not(feature = "use_std"), no_std)] #[cfg(feature = "use_std")] @@ -9,6 +10,5 @@ extern crate cfg_if; pub mod cache_padded; #[cfg(feature = "use_std")] -pub mod atomic_option; -#[cfg(feature = "use_std")] pub mod scoped; +pub mod consume; diff --git a/third_party/rust/crossbeam-utils/src/scoped.rs b/third_party/rust/crossbeam-utils/src/scoped.rs index a571a9006312..8b0e61b1cea5 100644 --- a/third_party/rust/crossbeam-utils/src/scoped.rs +++ b/third_party/rust/crossbeam-utils/src/scoped.rs @@ -109,22 +109,20 @@ use std::cell::RefCell; use std::fmt; +use std::marker::PhantomData; use std::mem; +use std::ops::DerefMut; use std::rc::Rc; -use std::sync::atomic::Ordering; -use std::sync::Arc; use std::thread; use std::io; -use atomic_option::AtomicOption; - #[doc(hidden)] -trait FnBox { - fn call_box(self: Box); +trait FnBox { + fn call_box(self: Box) -> T; } -impl FnBox for F { - fn call_box(self: Box) { +impl T> FnBox for F { + fn call_box(self: Box) -> T { (*self)() } } @@ -146,47 +144,61 @@ pub unsafe fn builder_spawn_unsafe<'a, F>( where F: FnOnce() + Send + 'a, { - use std::mem; - - let closure: Box = Box::new(f); - let closure: Box = mem::transmute(closure); + let closure: Box + 'a> = Box::new(f); + let closure: Box + Send> = mem::transmute(closure); builder.spawn(move || closure.call_box()) } - pub struct Scope<'a> { - dtors: RefCell>>, + /// The list of the deferred functions and thread join jobs. + dtors: RefCell>>, + // !Send + !Sync + _marker: PhantomData<*const ()>, } -struct DtorChain<'a> { - dtor: Box, - next: Option>>, +struct DtorChain<'a, T> { + dtor: Box + 'a>, + next: Option>>, } -enum JoinState { - Running(thread::JoinHandle<()>), - Joined, +impl<'a, T> DtorChain<'a, T> { + pub fn pop(chain: &mut Option>) -> Option + 'a>> { + chain.take().map(|mut node| { + *chain = node.next.take().map(|b| *b); + node.dtor + }) + } } -impl JoinState { - fn join(&mut self) { - let mut state = JoinState::Joined; - mem::swap(self, &mut state); - if let JoinState::Running(handle) = state { - let res = handle.join(); +struct JoinState { + join_handle: thread::JoinHandle<()>, + result: usize, + _marker: PhantomData, +} - if !thread::panicking() { - res.unwrap(); - } +impl JoinState { + fn new(join_handle: thread::JoinHandle<()>, result: usize) -> JoinState { + JoinState { + join_handle: join_handle, + result: result, + _marker: PhantomData, } } + + fn join(self) -> thread::Result { + let result = self.result; + self.join_handle.join().map(|_| { + unsafe { *Box::from_raw(result as *mut T) } + }) + } } /// A handle to a scoped thread -pub struct ScopedJoinHandle { - inner: Rc>, - packet: Arc>, +pub struct ScopedJoinHandle<'a, T: 'a> { + // !Send + !Sync + inner: Rc>>>, thread: thread::Thread, + _marker: PhantomData<&'a T>, } /// Create a new `scope`, for deferred destructors. @@ -204,11 +216,18 @@ pub struct ScopedJoinHandle { /// }); /// // Prints messages in the reverse order written /// ``` +/// +/// # Panics +/// +/// `scoped::scope()` panics if a spawned thread panics but it is not joined inside the scope. pub fn scope<'a, F, R>(f: F) -> R where F: FnOnce(&Scope<'a>) -> R, { - let mut scope = Scope { dtors: RefCell::new(None) }; + let mut scope = Scope { + dtors: RefCell::new(None), + _marker: PhantomData, + }; let ret = f(&scope); scope.drop_all(); ret @@ -220,7 +239,7 @@ impl<'a> fmt::Debug for Scope<'a> { } } -impl fmt::Debug for ScopedJoinHandle { +impl<'a, T> fmt::Debug for ScopedJoinHandle<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ScopedJoinHandle {{ ... }}") } @@ -233,26 +252,16 @@ impl<'a> Scope<'a> { // method outside of any destructor, we avoid any leakage problems // due to @rust-lang/rust#14875. fn drop_all(&mut self) { - loop { - // use a separate scope to ensure that the RefCell borrow - // is relinquished before running `dtor` - let dtor = { - let mut dtors = self.dtors.borrow_mut(); - if let Some(mut node) = dtors.take() { - *dtors = node.next.take().map(|b| *b); - node.dtor - } else { - return; - } - }; - dtor.call_box() + while let Some(dtor) = DtorChain::pop(&mut self.dtors.borrow_mut()) { + dtor.call_box(); } } /// Schedule code to be executed when exiting the scope. /// /// This is akin to having a destructor on the stack, except that it is - /// *guaranteed* to be run. + /// *guaranteed* to be run. It is guaranteed that the function is called + /// after all the spawned threads are joined. pub fn defer(&self, f: F) where F: FnOnce() + 'a, @@ -273,8 +282,9 @@ impl<'a> Scope<'a> { /// scope exits. /// /// [spawn]: http://doc.rust-lang.org/std/thread/fn.spawn.html - pub fn spawn(&self, f: F) -> ScopedJoinHandle + pub fn spawn<'s, F, T>(&'s self, f: F) -> ScopedJoinHandle<'a, T> where + 'a: 's, F: FnOnce() -> T + Send + 'a, T: Send + 'a, { @@ -313,42 +323,49 @@ impl<'s, 'a: 's> ScopedThreadBuilder<'s, 'a> { } /// Spawns a new thread, and returns a join handle for it. - pub fn spawn(self, f: F) -> io::Result> + pub fn spawn(self, f: F) -> io::Result> where F: FnOnce() -> T + Send + 'a, T: Send + 'a, { - let their_packet = Arc::new(AtomicOption::new()); - let my_packet = their_packet.clone(); + // The `Box` constructed below is written only by the spawned thread, + // and read by the current thread only after the spawned thread is + // joined (`JoinState::join()`). Thus there are no data races. + let result = Box::into_raw(Box::::new(unsafe { mem::uninitialized() })) as usize; let join_handle = try!(unsafe { builder_spawn_unsafe(self.builder, move || { - their_packet.swap(f(), Ordering::Relaxed); + let mut result = Box::from_raw(result as *mut T); + *result = f(); + mem::forget(result); }) }); - let thread = join_handle.thread().clone(); - let deferred_handle = Rc::new(RefCell::new(JoinState::Running(join_handle))); + + let join_state = JoinState::::new(join_handle, result); + let deferred_handle = Rc::new(RefCell::new(Some(join_state))); let my_handle = deferred_handle.clone(); self.scope.defer(move || { - let mut state = deferred_handle.borrow_mut(); - state.join(); + let state = mem::replace(deferred_handle.borrow_mut().deref_mut(), None); + if let Some(state) = state { + state.join().unwrap(); + } }); Ok(ScopedJoinHandle { inner: my_handle, - packet: my_packet, thread: thread, + _marker: PhantomData, }) } } -impl ScopedJoinHandle { +impl<'a, T: Send + 'a> ScopedJoinHandle<'a, T> { /// Join the scoped thread, returning the result it produced. - pub fn join(self) -> T { - self.inner.borrow_mut().join(); - self.packet.take(Ordering::Relaxed).unwrap() + pub fn join(self) -> thread::Result { + let state = mem::replace(self.inner.borrow_mut().deref_mut(), None); + state.unwrap().join() } /// Get the underlying thread handle. diff --git a/third_party/rust/fuchsia-zircon-sys/.cargo-checksum.json b/third_party/rust/fuchsia-zircon-sys/.cargo-checksum.json index 99dbe2225703..3a0bedd26455 100644 --- a/third_party/rust/fuchsia-zircon-sys/.cargo-checksum.json +++ b/third_party/rust/fuchsia-zircon-sys/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"BUILD.gn":"862eccf06f706db5af1e0041b10b6b637dbc77257b6c4f536a72fc7d21d3dfb9","Cargo.toml":"d05607413136805977463d1c2b979e7692ea1ac06e051d598de440c64603e886","examples/hello.rs":"66c6a147b98b913fef8e7a7da6387fb735f7f1e2c00abc8794a32a8cf0320851","src/definitions.rs":"0b13b741cad9ba42c1da5b654c6d60f03183a7c79a5843e7734a95b4f934d81f","src/lib.rs":"83c8b96c64b442d72a7b87455f182e6ffef6bf2cd7aa2c0c88db992ac9060bda"},"package":"43f3795b4bae048dc6123a6b972cadde2e676f9ded08aef6bb77f5f157684a82"} \ No newline at end of file +{"files":{"BUILD.gn":"0a1529e038693c7d136f277d0987ccc35ebeaa46e5a8238ebce22da927e51bb2","Cargo.toml":"a952982a5ffbbb97d0e095c07903436dcc8fce0d4bf6c925877e751a98904967","examples/hello.rs":"66c6a147b98b913fef8e7a7da6387fb735f7f1e2c00abc8794a32a8cf0320851","src/definitions.rs":"557c48c3ae148158901abbc479cfefe4852141bafb0b926faa8669440bf6123b","src/lib.rs":"a3d9eb3d2243fea31ee2cfd2bf1bbf8ed77e15accf0542f133b83a97c87f6ca7"},"package":"3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"} \ No newline at end of file diff --git a/third_party/rust/fuchsia-zircon-sys/BUILD.gn b/third_party/rust/fuchsia-zircon-sys/BUILD.gn index 008dee6d8b6d..dc4701d3f9e7 100644 --- a/third_party/rust/fuchsia-zircon-sys/BUILD.gn +++ b/third_party/rust/fuchsia-zircon-sys/BUILD.gn @@ -5,7 +5,5 @@ import("//build/rust/rust_library.gni") rust_library("fuchsia-zircon-sys") { - deps = [ - "//third_party/rust-crates:bitflags-0.7.0", - ] + deps = [] } diff --git a/third_party/rust/fuchsia-zircon-sys/Cargo.toml b/third_party/rust/fuchsia-zircon-sys/Cargo.toml index 4fd808e6aefc..274be529bd24 100644 --- a/third_party/rust/fuchsia-zircon-sys/Cargo.toml +++ b/third_party/rust/fuchsia-zircon-sys/Cargo.toml @@ -12,10 +12,8 @@ [package] name = "fuchsia-zircon-sys" -version = "0.2.0" +version = "0.3.3" authors = ["Raph Levien "] description = "Low-level Rust bindings for the Zircon kernel" license = "BSD-3-Clause" repository = "https://fuchsia.googlesource.com/garnet/" -[dependencies.bitflags] -version = "0.7.0" diff --git a/third_party/rust/fuchsia-zircon-sys/src/definitions.rs b/third_party/rust/fuchsia-zircon-sys/src/definitions.rs index 13586f4b6129..956313dc86e3 100644 --- a/third_party/rust/fuchsia-zircon-sys/src/definitions.rs +++ b/third_party/rust/fuchsia-zircon-sys/src/definitions.rs @@ -359,14 +359,14 @@ extern { pub fn zx_port_queue( handle: zx_handle_t, - packet: *const u8, + packet: *const zx_port_packet_t, size: usize ) -> zx_status_t; pub fn zx_port_wait( handle: zx_handle_t, deadline: zx_time_t, - packet: *mut u8, + packet: *mut zx_port_packet_t, size: usize ) -> zx_status_t; diff --git a/third_party/rust/fuchsia-zircon-sys/src/lib.rs b/third_party/rust/fuchsia-zircon-sys/src/lib.rs index a6e7c857843f..e9d51f8ad393 100644 --- a/third_party/rust/fuchsia-zircon-sys/src/lib.rs +++ b/third_party/rust/fuchsia-zircon-sys/src/lib.rs @@ -3,184 +3,227 @@ // found in the LICENSE file. #![allow(non_camel_case_types)] - -#[macro_use] -extern crate bitflags; +#![deny(warnings)] use std::{cmp, fmt}; -pub type zx_handle_t = i32; - -pub type zx_status_t = i32; - -pub type zx_futex_t = isize; pub type zx_addr_t = usize; -pub type zx_paddr_t = usize; -pub type zx_vaddr_t = usize; -pub type zx_off_t = u64; - -// Auto-generated using tools/gen_status.py -pub const ZX_OK : zx_status_t = 0; -pub const ZX_ERR_INTERNAL : zx_status_t = -1; -pub const ZX_ERR_NOT_SUPPORTED : zx_status_t = -2; -pub const ZX_ERR_NO_RESOURCES : zx_status_t = -3; -pub const ZX_ERR_NO_MEMORY : zx_status_t = -4; -pub const ZX_ERR_CALL_FAILED : zx_status_t = -5; -pub const ZX_ERR_INTERRUPTED_RETRY : zx_status_t = -6; -pub const ZX_ERR_INVALID_ARGS : zx_status_t = -10; -pub const ZX_ERR_BAD_HANDLE : zx_status_t = -11; -pub const ZX_ERR_WRONG_TYPE : zx_status_t = -12; -pub const ZX_ERR_BAD_SYSCALL : zx_status_t = -13; -pub const ZX_ERR_OUT_OF_RANGE : zx_status_t = -14; -pub const ZX_ERR_BUFFER_TOO_SMALL : zx_status_t = -15; -pub const ZX_ERR_BAD_STATE : zx_status_t = -20; -pub const ZX_ERR_TIMED_OUT : zx_status_t = -21; -pub const ZX_ERR_SHOULD_WAIT : zx_status_t = -22; -pub const ZX_ERR_CANCELED : zx_status_t = -23; -pub const ZX_ERR_PEER_CLOSED : zx_status_t = -24; -pub const ZX_ERR_NOT_FOUND : zx_status_t = -25; -pub const ZX_ERR_ALREADY_EXISTS : zx_status_t = -26; -pub const ZX_ERR_ALREADY_BOUND : zx_status_t = -27; -pub const ZX_ERR_UNAVAILABLE : zx_status_t = -28; -pub const ZX_ERR_ACCESS_DENIED : zx_status_t = -30; -pub const ZX_ERR_IO : zx_status_t = -40; -pub const ZX_ERR_IO_REFUSED : zx_status_t = -41; -pub const ZX_ERR_IO_DATA_INTEGRITY : zx_status_t = -42; -pub const ZX_ERR_IO_DATA_LOSS : zx_status_t = -43; -pub const ZX_ERR_BAD_PATH : zx_status_t = -50; -pub const ZX_ERR_NOT_DIR : zx_status_t = -51; -pub const ZX_ERR_NOT_FILE : zx_status_t = -52; -pub const ZX_ERR_FILE_BIG : zx_status_t = -53; -pub const ZX_ERR_NO_SPACE : zx_status_t = -54; -pub const ZX_ERR_STOP : zx_status_t = -60; -pub const ZX_ERR_NEXT : zx_status_t = -61; - -pub type zx_time_t = u64; pub type zx_duration_t = u64; -pub const ZX_TIME_INFINITE : zx_time_t = ::std::u64::MAX; - -bitflags! { - #[repr(C)] - pub flags zx_signals_t: u32 { - const ZX_SIGNAL_NONE = 0, - const ZX_OBJECT_SIGNAL_ALL = 0x00ffffff, - const ZX_USER_SIGNAL_ALL = 0xff000000, - const ZX_OBJECT_SIGNAL_0 = 1 << 0, - const ZX_OBJECT_SIGNAL_1 = 1 << 1, - const ZX_OBJECT_SIGNAL_2 = 1 << 2, - const ZX_OBJECT_SIGNAL_3 = 1 << 3, - const ZX_OBJECT_SIGNAL_4 = 1 << 4, - const ZX_OBJECT_SIGNAL_5 = 1 << 5, - const ZX_OBJECT_SIGNAL_6 = 1 << 6, - const ZX_OBJECT_SIGNAL_7 = 1 << 7, - const ZX_OBJECT_SIGNAL_8 = 1 << 8, - const ZX_OBJECT_SIGNAL_9 = 1 << 9, - const ZX_OBJECT_SIGNAL_10 = 1 << 10, - const ZX_OBJECT_SIGNAL_11 = 1 << 11, - const ZX_OBJECT_SIGNAL_12 = 1 << 12, - const ZX_OBJECT_SIGNAL_13 = 1 << 13, - const ZX_OBJECT_SIGNAL_14 = 1 << 14, - const ZX_OBJECT_SIGNAL_15 = 1 << 15, - const ZX_OBJECT_SIGNAL_16 = 1 << 16, - const ZX_OBJECT_SIGNAL_17 = 1 << 17, - const ZX_OBJECT_SIGNAL_18 = 1 << 18, - const ZX_OBJECT_SIGNAL_19 = 1 << 19, - const ZX_OBJECT_SIGNAL_20 = 1 << 20, - const ZX_OBJECT_SIGNAL_21 = 1 << 21, - const ZX_OBJECT_LAST_HANDLE = 1 << 22, - const ZX_OBJECT_HANDLE_CLOSED = 1 << 23, - const ZX_USER_SIGNAL_0 = 1 << 24, - const ZX_USER_SIGNAL_1 = 1 << 25, - const ZX_USER_SIGNAL_2 = 1 << 26, - const ZX_USER_SIGNAL_3 = 1 << 27, - const ZX_USER_SIGNAL_4 = 1 << 28, - const ZX_USER_SIGNAL_5 = 1 << 29, - const ZX_USER_SIGNAL_6 = 1 << 30, - const ZX_USER_SIGNAL_7 = 1 << 31, - - const ZX_OBJECT_READABLE = ZX_OBJECT_SIGNAL_0.bits, - const ZX_OBJECT_WRITABLE = ZX_OBJECT_SIGNAL_1.bits, - const ZX_OBJECT_PEER_CLOSED = ZX_OBJECT_SIGNAL_2.bits, - - // Cancelation (handle was closed while waiting with it) - const ZX_SIGNAL_HANDLE_CLOSED = ZX_OBJECT_HANDLE_CLOSED.bits, - - // Only one user-more reference (handle) to the object exists. - const ZX_SIGNAL_LAST_HANDLE = ZX_OBJECT_LAST_HANDLE.bits, - - // Event - const ZX_EVENT_SIGNALED = ZX_OBJECT_SIGNAL_3.bits, - - // EventPair - const ZX_EPAIR_SIGNALED = ZX_OBJECT_SIGNAL_3.bits, - const ZX_EPAIR_CLOSED = ZX_OBJECT_SIGNAL_2.bits, - - // Task signals (process, thread, job) - const ZX_TASK_TERMINATED = ZX_OBJECT_SIGNAL_3.bits, - - // Channel - const ZX_CHANNEL_READABLE = ZX_OBJECT_SIGNAL_0.bits, - const ZX_CHANNEL_WRITABLE = ZX_OBJECT_SIGNAL_1.bits, - const ZX_CHANNEL_PEER_CLOSED = ZX_OBJECT_SIGNAL_2.bits, - - // Socket - const ZX_SOCKET_READABLE = ZX_OBJECT_SIGNAL_0.bits, - const ZX_SOCKET_WRITABLE = ZX_OBJECT_SIGNAL_1.bits, - const ZX_SOCKET_PEER_CLOSED = ZX_OBJECT_SIGNAL_2.bits, - - // Port - const ZX_PORT_READABLE = ZX_OBJECT_READABLE.bits, - - // Resource - const ZX_RESOURCE_DESTROYED = ZX_OBJECT_SIGNAL_3.bits, - const ZX_RESOURCE_READABLE = ZX_OBJECT_READABLE.bits, - const ZX_RESOURCE_WRITABLE = ZX_OBJECT_WRITABLE.bits, - const ZX_RESOURCE_CHILD_ADDED = ZX_OBJECT_SIGNAL_4.bits, - - // Fifo - const ZX_FIFO_READABLE = ZX_OBJECT_READABLE.bits, - const ZX_FIFO_WRITABLE = ZX_OBJECT_WRITABLE.bits, - const ZX_FIFO_PEER_CLOSED = ZX_OBJECT_PEER_CLOSED.bits, - - // Job - const ZX_JOB_NO_PROCESSES = ZX_OBJECT_SIGNAL_3.bits, - const ZX_JOB_NO_JOBS = ZX_OBJECT_SIGNAL_4.bits, - - // Process - const ZX_PROCESS_TERMINATED = ZX_OBJECT_SIGNAL_3.bits, - - // Thread - const ZX_THREAD_TERMINATED = ZX_OBJECT_SIGNAL_3.bits, - - // Log - const ZX_LOG_READABLE = ZX_OBJECT_READABLE.bits, - const ZX_LOG_WRITABLE = ZX_OBJECT_WRITABLE.bits, - - // Timer - const ZX_TIMER_SIGNALED = ZX_OBJECT_SIGNAL_3.bits, - } -} - +pub type zx_futex_t = i32; +pub type zx_handle_t = u32; +pub type zx_off_t = u64; +pub type zx_paddr_t = usize; +pub type zx_rights_t = u32; +pub type zx_signals_t = u32; pub type zx_size_t = usize; pub type zx_ssize_t = isize; +pub type zx_status_t = i32; +pub type zx_time_t = u64; +pub type zx_vaddr_t = usize; -bitflags! { - #[repr(C)] - pub flags zx_rights_t: u32 { - const ZX_RIGHT_NONE = 0, - const ZX_RIGHT_DUPLICATE = 1 << 0, - const ZX_RIGHT_TRANSFER = 1 << 1, - const ZX_RIGHT_READ = 1 << 2, - const ZX_RIGHT_WRITE = 1 << 3, - const ZX_RIGHT_EXECUTE = 1 << 4, - const ZX_RIGHT_MAP = 1 << 5, - const ZX_RIGHT_GET_PROPERTY = 1 << 6, - const ZX_RIGHT_SET_PROPERTY = 1 << 7, - const ZX_RIGHT_DEBUG = 1 << 8, - const ZX_RIGHT_SAME_RIGHTS = 1 << 31, +// TODO: combine these macros with the bitflags and assoc consts macros below +// so that we only have to do one macro invocation. +// The result would look something like: +// multiconst!(bitflags, zx_rights_t, Rights, [RIGHT_NONE => ZX_RIGHT_NONE = 0; ...]); +// multiconst!(assoc_consts, zx_status_t, Status, [OK => ZX_OK = 0; ...]); +// Note that the actual name of the inner macro (e.g. `bitflags`) can't be a variable. +// It'll just have to be matched on manually +macro_rules! multiconst { + ($typename:ident, [$($rawname:ident = $value:expr;)*]) => { + $( + pub const $rawname: $typename = $value; + )* } } +multiconst!(zx_handle_t, [ + ZX_HANDLE_INVALID = 0; +]); + +multiconst!(zx_time_t, [ + ZX_TIME_INFINITE = ::std::u64::MAX; +]); + +multiconst!(zx_rights_t, [ + ZX_RIGHT_NONE = 0; + ZX_RIGHT_DUPLICATE = 1 << 0; + ZX_RIGHT_TRANSFER = 1 << 1; + ZX_RIGHT_READ = 1 << 2; + ZX_RIGHT_WRITE = 1 << 3; + ZX_RIGHT_EXECUTE = 1 << 4; + ZX_RIGHT_MAP = 1 << 5; + ZX_RIGHT_GET_PROPERTY = 1 << 6; + ZX_RIGHT_SET_PROPERTY = 1 << 7; + ZX_RIGHT_ENUMERATE = 1 << 8; + ZX_RIGHT_DESTROY = 1 << 9; + ZX_RIGHT_SET_POLICY = 1 << 10; + ZX_RIGHT_GET_POLICY = 1 << 11; + ZX_RIGHT_SIGNAL = 1 << 12; + ZX_RIGHT_SIGNAL_PEER = 1 << 13; + ZX_RIGHT_WAIT = 0 << 14; // Coming Soon! + ZX_RIGHT_SAME_RIGHTS = 1 << 31; +]); + +// TODO: add an alias for this type in the C headers. +multiconst!(u32, [ + ZX_VMO_OP_COMMIT = 1; + ZX_VMO_OP_DECOMMIT = 2; + ZX_VMO_OP_LOCK = 3; + ZX_VMO_OP_UNLOCK = 4; + ZX_VMO_OP_LOOKUP = 5; + ZX_VMO_OP_CACHE_SYNC = 6; + ZX_VMO_OP_CACHE_INVALIDATE = 7; + ZX_VMO_OP_CACHE_CLEAN = 8; + ZX_VMO_OP_CACHE_CLEAN_INVALIDATE = 9; +]); + +// TODO: add an alias for this type in the C headers. +multiconst!(u32, [ + ZX_VM_FLAG_PERM_READ = 1 << 0; + ZX_VM_FLAG_PERM_WRITE = 1 << 1; + ZX_VM_FLAG_PERM_EXECUTE = 1 << 2; + ZX_VM_FLAG_COMPACT = 1 << 3; + ZX_VM_FLAG_SPECIFIC = 1 << 4; + ZX_VM_FLAG_SPECIFIC_OVERWRITE = 1 << 5; + ZX_VM_FLAG_CAN_MAP_SPECIFIC = 1 << 6; + ZX_VM_FLAG_CAN_MAP_READ = 1 << 7; + ZX_VM_FLAG_CAN_MAP_WRITE = 1 << 8; + ZX_VM_FLAG_CAN_MAP_EXECUTE = 1 << 9; +]); + +multiconst!(zx_status_t, [ + ZX_OK = 0; + ZX_ERR_INTERNAL = -1; + ZX_ERR_NOT_SUPPORTED = -2; + ZX_ERR_NO_RESOURCES = -3; + ZX_ERR_NO_MEMORY = -4; + ZX_ERR_CALL_FAILED = -5; + ZX_ERR_INTERRUPTED_RETRY = -6; + ZX_ERR_INVALID_ARGS = -10; + ZX_ERR_BAD_HANDLE = -11; + ZX_ERR_WRONG_TYPE = -12; + ZX_ERR_BAD_SYSCALL = -13; + ZX_ERR_OUT_OF_RANGE = -14; + ZX_ERR_BUFFER_TOO_SMALL = -15; + ZX_ERR_BAD_STATE = -20; + ZX_ERR_TIMED_OUT = -21; + ZX_ERR_SHOULD_WAIT = -22; + ZX_ERR_CANCELED = -23; + ZX_ERR_PEER_CLOSED = -24; + ZX_ERR_NOT_FOUND = -25; + ZX_ERR_ALREADY_EXISTS = -26; + ZX_ERR_ALREADY_BOUND = -27; + ZX_ERR_UNAVAILABLE = -28; + ZX_ERR_ACCESS_DENIED = -30; + ZX_ERR_IO = -40; + ZX_ERR_IO_REFUSED = -41; + ZX_ERR_IO_DATA_INTEGRITY = -42; + ZX_ERR_IO_DATA_LOSS = -43; + ZX_ERR_BAD_PATH = -50; + ZX_ERR_NOT_DIR = -51; + ZX_ERR_NOT_FILE = -52; + ZX_ERR_FILE_BIG = -53; + ZX_ERR_NO_SPACE = -54; + ZX_ERR_STOP = -60; + ZX_ERR_NEXT = -61; +]); + +multiconst!(zx_signals_t, [ + ZX_SIGNAL_NONE = 0; + ZX_OBJECT_SIGNAL_ALL = 0x00ffffff; + ZX_USER_SIGNAL_ALL = 0xff000000; + ZX_OBJECT_SIGNAL_0 = 1 << 0; + ZX_OBJECT_SIGNAL_1 = 1 << 1; + ZX_OBJECT_SIGNAL_2 = 1 << 2; + ZX_OBJECT_SIGNAL_3 = 1 << 3; + ZX_OBJECT_SIGNAL_4 = 1 << 4; + ZX_OBJECT_SIGNAL_5 = 1 << 5; + ZX_OBJECT_SIGNAL_6 = 1 << 6; + ZX_OBJECT_SIGNAL_7 = 1 << 7; + ZX_OBJECT_SIGNAL_8 = 1 << 8; + ZX_OBJECT_SIGNAL_9 = 1 << 9; + ZX_OBJECT_SIGNAL_10 = 1 << 10; + ZX_OBJECT_SIGNAL_11 = 1 << 11; + ZX_OBJECT_SIGNAL_12 = 1 << 12; + ZX_OBJECT_SIGNAL_13 = 1 << 13; + ZX_OBJECT_SIGNAL_14 = 1 << 14; + ZX_OBJECT_SIGNAL_15 = 1 << 15; + ZX_OBJECT_SIGNAL_16 = 1 << 16; + ZX_OBJECT_SIGNAL_17 = 1 << 17; + ZX_OBJECT_SIGNAL_18 = 1 << 18; + ZX_OBJECT_SIGNAL_19 = 1 << 19; + ZX_OBJECT_SIGNAL_20 = 1 << 20; + ZX_OBJECT_SIGNAL_21 = 1 << 21; + ZX_OBJECT_SIGNAL_22 = 1 << 22; + ZX_OBJECT_HANDLE_CLOSED = 1 << 23; + ZX_USER_SIGNAL_0 = 1 << 24; + ZX_USER_SIGNAL_1 = 1 << 25; + ZX_USER_SIGNAL_2 = 1 << 26; + ZX_USER_SIGNAL_3 = 1 << 27; + ZX_USER_SIGNAL_4 = 1 << 28; + ZX_USER_SIGNAL_5 = 1 << 29; + ZX_USER_SIGNAL_6 = 1 << 30; + ZX_USER_SIGNAL_7 = 1 << 31; + + ZX_OBJECT_READABLE = ZX_OBJECT_SIGNAL_0; + ZX_OBJECT_WRITABLE = ZX_OBJECT_SIGNAL_1; + ZX_OBJECT_PEER_CLOSED = ZX_OBJECT_SIGNAL_2; + + // Cancelation (handle was closed while waiting with it) + ZX_SIGNAL_HANDLE_CLOSED = ZX_OBJECT_HANDLE_CLOSED; + + // Event + ZX_EVENT_SIGNALED = ZX_OBJECT_SIGNAL_3; + + // EventPair + ZX_EPAIR_SIGNALED = ZX_OBJECT_SIGNAL_3; + ZX_EPAIR_CLOSED = ZX_OBJECT_SIGNAL_2; + + // Task signals (process, thread, job) + ZX_TASK_TERMINATED = ZX_OBJECT_SIGNAL_3; + + // Channel + ZX_CHANNEL_READABLE = ZX_OBJECT_SIGNAL_0; + ZX_CHANNEL_WRITABLE = ZX_OBJECT_SIGNAL_1; + ZX_CHANNEL_PEER_CLOSED = ZX_OBJECT_SIGNAL_2; + + // Socket + ZX_SOCKET_READABLE = ZX_OBJECT_SIGNAL_0; + ZX_SOCKET_WRITABLE = ZX_OBJECT_SIGNAL_1; + ZX_SOCKET_PEER_CLOSED = ZX_OBJECT_SIGNAL_2; + + // Port + ZX_PORT_READABLE = ZX_OBJECT_READABLE; + + // Resource + ZX_RESOURCE_DESTROYED = ZX_OBJECT_SIGNAL_3; + ZX_RESOURCE_READABLE = ZX_OBJECT_READABLE; + ZX_RESOURCE_WRITABLE = ZX_OBJECT_WRITABLE; + ZX_RESOURCE_CHILD_ADDED = ZX_OBJECT_SIGNAL_4; + + // Fifo + ZX_FIFO_READABLE = ZX_OBJECT_READABLE; + ZX_FIFO_WRITABLE = ZX_OBJECT_WRITABLE; + ZX_FIFO_PEER_CLOSED = ZX_OBJECT_PEER_CLOSED; + + // Job + ZX_JOB_NO_PROCESSES = ZX_OBJECT_SIGNAL_3; + ZX_JOB_NO_JOBS = ZX_OBJECT_SIGNAL_4; + + // Process + ZX_PROCESS_TERMINATED = ZX_OBJECT_SIGNAL_3; + + // Thread + ZX_THREAD_TERMINATED = ZX_OBJECT_SIGNAL_3; + + // Log + ZX_LOG_READABLE = ZX_OBJECT_READABLE; + ZX_LOG_WRITABLE = ZX_OBJECT_WRITABLE; + + // Timer + ZX_TIMER_SIGNALED = ZX_OBJECT_SIGNAL_3; +]); + // clock ids pub const ZX_CLOCK_MONOTONIC: u32 = 0; @@ -191,38 +234,9 @@ pub const ZX_CPRNG_ADD_ENTROPY_MAX_LEN: usize = 256; // Socket flags and limits. pub const ZX_SOCKET_HALF_CLOSE: u32 = 1; -// VM Object opcodes -pub const ZX_VMO_OP_COMMIT: u32 = 1; -pub const ZX_VMO_OP_DECOMMIT: u32 = 2; -pub const ZX_VMO_OP_LOCK: u32 = 3; -pub const ZX_VMO_OP_UNLOCK: u32 = 4; -pub const ZX_VMO_OP_LOOKUP: u32 = 5; -pub const ZX_VMO_OP_CACHE_SYNC: u32 = 6; -pub const ZX_VMO_OP_CACHE_INVALIDATE: u32 = 7; -pub const ZX_VMO_OP_CACHE_CLEAN: u32 = 8; -pub const ZX_VMO_OP_CACHE_CLEAN_INVALIDATE: u32 = 9; - // VM Object clone flags pub const ZX_VMO_CLONE_COPY_ON_WRITE: u32 = 1; -// Mapping flags to vmar routines -bitflags! { - #[repr(C)] - pub flags zx_vmar_flags_t: u32 { - // flags to vmar routines - const ZX_VM_FLAG_PERM_READ = 1 << 0, - const ZX_VM_FLAG_PERM_WRITE = 1 << 1, - const ZX_VM_FLAG_PERM_EXECUTE = 1 << 2, - const ZX_VM_FLAG_COMPACT = 1 << 3, - const ZX_VM_FLAG_SPECIFIC = 1 << 4, - const ZX_VM_FLAG_SPECIFIC_OVERWRITE = 1 << 5, - const ZX_VM_FLAG_CAN_MAP_SPECIFIC = 1 << 6, - const ZX_VM_FLAG_CAN_MAP_READ = 1 << 7, - const ZX_VM_FLAG_CAN_MAP_WRITE = 1 << 8, - const ZX_VM_FLAG_CAN_MAP_EXECUTE = 1 << 9, - } -} - #[repr(C)] #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum zx_cache_policy_t { @@ -457,4 +471,4 @@ pub struct zx_vcpu_create_args_t { pub ip: zx_vaddr_t, } -include!("definitions.rs"); \ No newline at end of file +include!("definitions.rs"); diff --git a/third_party/rust/fuchsia-zircon/.cargo-checksum.json b/third_party/rust/fuchsia-zircon/.cargo-checksum.json index d297545f6409..482d8e4f3b21 100644 --- a/third_party/rust/fuchsia-zircon/.cargo-checksum.json +++ b/third_party/rust/fuchsia-zircon/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"BUILD.gn":"1d49d75a432f5b4587b59a50a9b86a21e2a3faf1fff60876b4e486e43cffca35","Cargo.toml":"263dce41c44c34a70fb9803acbfb77c1801ff2b23f0fa46bb2c90f14f33f0ac9","LICENSE":"f82f9062a6dff28145c185f90f94c485eebdc2bb4c6f1c840513709e6d228453","README.md":"f4bff6efc5d888460e2d1eaf62fa0eaca8afe7b51df528ad7829340ec397b32c","examples/BUILD.gn":"fb7a491a26e5f3d48b8796db80d475be2d361bada7311213363dcce4efa9d4fc","src/channel.rs":"0b3c3761a831c9211e327f5332e58becc287cf2444b44164a4f680dc5bdded50","src/event.rs":"9b11c6c0c9fcdbe4e45c03f4a671ef66c31a1be540d3f50a5d0602314fdc1294","src/eventpair.rs":"aca1c6a15450badbfe71c33e287bab8aa05d6fff5968513b191436b853929ca8","src/fifo.rs":"ecc49463cc28713c1375ecafc8008b806767417da72fcc8685b42078ec0308df","src/job.rs":"827db2e4ea1bbf5ecabec9fb279f2721792032e4223b6bd15b56922d80c7ac01","src/lib.rs":"5bc01f9c7d1f3316826bf86cc5c8488523ca7f2ad2a58a8b539c3b49aed3f1a2","src/port.rs":"695aa7114d88d476954fde689180502f22ea6b70339855ebff89dce6694abb9e","src/process.rs":"0b5e42c4eb79b2a7fff9c70f0d99c8b805cefab99285e94fabf2542290b4b990","src/socket.rs":"6e8b799a8f605d42681660129989c9c6427b9039b83de9954c61aa351596218f","src/thread.rs":"d703414c440b5fa597dbafe7b8be925a94d1fe0cf8b47366c786b45eaaec4c60","src/timer.rs":"8fc50736e6a928cabccf78b18aec3e57ac7e5a57c1c519a1cd34158f59e1ff65","src/vmo.rs":"0f219777d5abffcfbc49a43f7eff3ef92b854d1d964579dc9a01d33ba57341c1","tools/BUILD.gn":"f4ce07b2906e6cde15a9d2ec253c58fbfe88ea1819083f864c154a0f1c50c14f","tools/README.md":"0217d58913c32b7e8aa231da852d96307d8387f99e2352a026196150cb180d07","tools/clang_wrapper.cc":"c62dcc1f71cab03f7e215c8800d798bd05af56fa7510ea8d59d6b15dce2b6a6f","tools/gen_status.py":"a2330db86564e12412af2dce60d4c605c0ab203fcdea8039d5c6a8e7f218a3c3"},"package":"f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159"} \ No newline at end of file +{"files":{"BUILD.gn":"651b841730c01aa677f22906fd5eee96234a01d13139e4be12943698dd486a17","Cargo.toml":"0f744f8a2ee868ebcb30356bebc5dcf9be09927f9b72fd7c0625d4c4a35b1803","LICENSE":"f82f9062a6dff28145c185f90f94c485eebdc2bb4c6f1c840513709e6d228453","README.md":"f4bff6efc5d888460e2d1eaf62fa0eaca8afe7b51df528ad7829340ec397b32c","examples/BUILD.gn":"51c9291631e59d368709d9959a88bc8be1fe7375c65f2e1fc6e26184e8971137","src/channel.rs":"15c2a404f760d1594b9b8cfbaf8a828f5c506c6cb09a3408b4d5e894360eb1fc","src/cprng.rs":"cd8a163f7e5e75e536ee898a00c1c377b10c748c7dc574c7c3582bb86fdc86c5","src/event.rs":"bbf0c91d154f01ec1618182efbaaa8c71580f05a807cac1d12208b5cbe8f6b74","src/eventpair.rs":"0bf0b1137c2fb08398edb4d09b89ded5990d07f94ffe55b6ec917c7bc9059ebe","src/fifo.rs":"9ac29d11330fdea847902b8dba6b2004ad44878d8ef65d26a197780cd443ebb8","src/handle.rs":"8741c4b5056a200cfb4237af2d2d2c1db083075b112df68166e351e6d81eb3f3","src/job.rs":"827db2e4ea1bbf5ecabec9fb279f2721792032e4223b6bd15b56922d80c7ac01","src/lib.rs":"9f65dd6ba52e56190825542d6d2dfeca2a5610257513f373fa2cdb4d45d9fc6b","src/port.rs":"32501f17218ec9ad4d97741f4ff2d5ca8e89b7da7226473c82441fd06adbecc4","src/process.rs":"0b5e42c4eb79b2a7fff9c70f0d99c8b805cefab99285e94fabf2542290b4b990","src/rights.rs":"679422da3c0ff9f4e4928b8f41098ef0f5ec09af098496e088e2bac82fc9568d","src/signals.rs":"c07501df58b3c6264e37ebc0f5fd28f44ced040273a5ab5e839e3a204d351ea7","src/socket.rs":"cfb2f6ecb5ba9d9e354c18088061d6e5330dd420a20d0ced4a11f05d3332e3b8","src/status.rs":"4831adf17c1fbd4d52a0aacc63eebd98e49f6c0e28c407e8a0e40f380b3f3b2b","src/thread.rs":"d703414c440b5fa597dbafe7b8be925a94d1fe0cf8b47366c786b45eaaec4c60","src/time.rs":"33d9662c58b921ebe701d927d30ebc01d95594d081c38824355093206f29fba0","src/vmar.rs":"e69a51287e3cb016fa39bb8d884c88ffba4799a452c17e881af9d63a507b37f7","src/vmo.rs":"377968eec57b79a7f4b117dff2f59f26a57ba97ca7f2f0334bd27b99fe87b299","tools/gen_status.py":"a2330db86564e12412af2dce60d4c605c0ab203fcdea8039d5c6a8e7f218a3c3"},"package":"2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"} \ No newline at end of file diff --git a/third_party/rust/fuchsia-zircon/BUILD.gn b/third_party/rust/fuchsia-zircon/BUILD.gn index f09705660ed5..1edb419d8f20 100644 --- a/third_party/rust/fuchsia-zircon/BUILD.gn +++ b/third_party/rust/fuchsia-zircon/BUILD.gn @@ -7,6 +7,7 @@ import("//build/rust/rust_library.gni") rust_library("fuchsia-zircon") { deps = [ "//garnet/public/rust/crates/fuchsia-zircon/fuchsia-zircon-sys", + "//third_party/rust-crates:bitflags-0.7.0", ] with_tests = true diff --git a/third_party/rust/fuchsia-zircon/Cargo.toml b/third_party/rust/fuchsia-zircon/Cargo.toml index d8f9b47a7fee..4f8cd92b0ff6 100644 --- a/third_party/rust/fuchsia-zircon/Cargo.toml +++ b/third_party/rust/fuchsia-zircon/Cargo.toml @@ -12,10 +12,13 @@ [package] name = "fuchsia-zircon" -version = "0.2.1" +version = "0.3.3" authors = ["Raph Levien "] description = "Rust bindings for the Zircon kernel" license = "BSD-3-Clause" repository = "https://fuchsia.googlesource.com/garnet/" +[dependencies.bitflags] +version = "1.0.0" + [dependencies.fuchsia-zircon-sys] -version = "0.2.0" +version = "0.3.3" diff --git a/third_party/rust/fuchsia-zircon/examples/BUILD.gn b/third_party/rust/fuchsia-zircon/examples/BUILD.gn index d2f0125c8ef8..dee61f5829bb 100644 --- a/third_party/rust/fuchsia-zircon/examples/BUILD.gn +++ b/third_party/rust/fuchsia-zircon/examples/BUILD.gn @@ -2,8 +2,16 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -group("examples") { +import("//build/package.gni") + +package("zircon_rust_examples") { + system_image = true + deps = [ - "//garnet/public/rust/crates/fuchsia-zircon/examples/zx_toy", + "zx_toy", ] + + binaries = [ { + name = "example_zx_toy" + } ] } diff --git a/third_party/rust/fuchsia-zircon/src/channel.rs b/third_party/rust/fuchsia-zircon/src/channel.rs index 7f558309cc06..44ffc6cd9cf1 100644 --- a/third_party/rust/fuchsia-zircon/src/channel.rs +++ b/third_party/rust/fuchsia-zircon/src/channel.rs @@ -4,15 +4,15 @@ //! Type-safe bindings for Zircon channel objects. -use {AsHandleRef, HandleBased, Handle, HandleRef, INVALID_HANDLE, Peered, Status, Time, usize_into_u32, size_to_u32_sat}; -use {sys, handle_drop, into_result}; +use {AsHandleRef, HandleBased, Handle, HandleRef, Peered, Status, Time, usize_into_u32, size_to_u32_sat}; +use {sys, ok}; use std::mem; /// An object representing a Zircon /// [channel](https://fuchsia.googlesource.com/zircon/+/master/docs/objects/channel.md). /// /// As essentially a subtype of `Handle`, it can be freely interconverted. -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Hash)] pub struct Channel(Handle); impl_handle_based!(Channel); impl Peered for Channel {} @@ -24,14 +24,16 @@ impl Channel { /// Wraps the /// [zx_channel_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/channel_create.md) /// syscall. - pub fn create(opts: ChannelOpts) -> Result<(Channel, Channel), Status> { + pub fn create() -> Result<(Channel, Channel), Status> { unsafe { let mut handle0 = 0; let mut handle1 = 0; - let status = sys::zx_channel_create(opts as u32, &mut handle0, &mut handle1); - into_result(status, || - (Self::from(Handle(handle0)), - Self::from(Handle(handle1)))) + let opts = 0; + ok(sys::zx_channel_create(opts, &mut handle0, &mut handle1))?; + Ok(( + Self::from(Handle::from_raw(handle0)), + Self::from(Handle::from_raw(handle1)) + )) } } @@ -42,21 +44,22 @@ impl Channel { /// If the `MessageBuf` lacks the capacity to hold the pending message, /// returns an `Err` with the number of bytes and number of handles needed. /// Otherwise returns an `Ok` with the result as usual. - pub fn read_raw(&self, opts: u32, buf: &mut MessageBuf) + pub fn read_raw(&self, buf: &mut MessageBuf) -> Result, (usize, usize)> { + let opts = 0; unsafe { - buf.reset_handles(); + buf.clear(); let raw_handle = self.raw_handle(); let mut num_bytes: u32 = size_to_u32_sat(buf.bytes.capacity()); let mut num_handles: u32 = size_to_u32_sat(buf.handles.capacity()); - let status = sys::zx_channel_read(raw_handle, opts, - buf.bytes.as_mut_ptr(), buf.handles.as_mut_ptr(), - num_bytes, num_handles, &mut num_bytes, &mut num_handles); - if status == sys::ZX_ERR_BUFFER_TOO_SMALL { + let status = ok(sys::zx_channel_read(raw_handle, opts, + buf.bytes.as_mut_ptr(), buf.handles.as_mut_ptr() as *mut _, + num_bytes, num_handles, &mut num_bytes, &mut num_handles)); + if status == Err(Status::BUFFER_TOO_SMALL) { Err((num_bytes as usize, num_handles as usize)) } else { - Ok(into_result(status, || { + Ok(status.map(|()| { buf.bytes.set_len(num_bytes as usize); buf.handles.set_len(num_handles as usize); })) @@ -69,9 +72,9 @@ impl Channel { /// Note that this method can cause internal reallocations in the `MessageBuf` /// if it is lacks capacity to hold the full message. If such reallocations /// are not desirable, use `read_raw` instead. - pub fn read(&self, opts: u32, buf: &mut MessageBuf) -> Result<(), Status> { + pub fn read(&self, buf: &mut MessageBuf) -> Result<(), Status> { loop { - match self.read_raw(opts, buf) { + match self.read_raw(buf) { Ok(result) => return result, Err((num_bytes, num_handles)) => { buf.ensure_capacity_bytes(num_bytes); @@ -84,18 +87,19 @@ impl Channel { /// Write a message to a channel. Wraps the /// [zx_channel_write](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/channel_write.md) /// syscall. - pub fn write(&self, bytes: &[u8], handles: &mut Vec, opts: u32) + pub fn write(&self, bytes: &[u8], handles: &mut Vec) -> Result<(), Status> { - let n_bytes = try!(usize_into_u32(bytes.len()).map_err(|_| Status::ErrOutOfRange)); - let n_handles = try!(usize_into_u32(handles.len()).map_err(|_| Status::ErrOutOfRange)); + let opts = 0; + let n_bytes = try!(usize_into_u32(bytes.len()).map_err(|_| Status::OUT_OF_RANGE)); + let n_handles = try!(usize_into_u32(handles.len()).map_err(|_| Status::OUT_OF_RANGE)); unsafe { let status = sys::zx_channel_write(self.raw_handle(), opts, bytes.as_ptr(), n_bytes, handles.as_ptr() as *const sys::zx_handle_t, n_handles); - into_result(status, || { - // Handles were successfully transferred, forget them on sender side - handles.set_len(0); - }) + ok(status)?; + // Handles were successfully transferred, forget them on sender side + handles.set_len(0); + Ok(()) } } @@ -113,21 +117,21 @@ impl Channel { /// On failure returns the both the main and read status. /// /// [read]: struct.Channel.html#method.read - pub fn call(&self, options: u32, timeout: Time, bytes: &[u8], handles: &mut Vec, + pub fn call(&self, timeout: Time, bytes: &[u8], handles: &mut Vec, buf: &mut MessageBuf) -> Result<(), (Status, Status)> { let write_num_bytes = try!(usize_into_u32(bytes.len()).map_err( - |_| (Status::ErrOutOfRange, Status::NoError))); + |_| (Status::OUT_OF_RANGE, Status::OK))); let write_num_handles = try!(usize_into_u32(handles.len()).map_err( - |_| (Status::ErrOutOfRange, Status::NoError))); - buf.reset_handles(); + |_| (Status::OUT_OF_RANGE, Status::OK))); + buf.clear(); let read_num_bytes: u32 = size_to_u32_sat(buf.bytes.capacity()); let read_num_handles: u32 = size_to_u32_sat(buf.handles.capacity()); let args = sys::zx_channel_call_args_t { wr_bytes: bytes.as_ptr(), wr_handles: handles.as_ptr() as *const sys::zx_handle_t, rd_bytes: buf.bytes.as_mut_ptr(), - rd_handles: buf.handles.as_mut_ptr(), + rd_handles: buf.handles.as_mut_ptr() as *mut _, wr_num_bytes: write_num_bytes, wr_num_handles: write_num_handles, rd_num_bytes: read_num_bytes, @@ -135,40 +139,64 @@ impl Channel { }; let mut actual_read_bytes: u32 = 0; let mut actual_read_handles: u32 = 0; - let mut read_status = sys::ZX_OK; + let mut read_status = Status::OK.into_raw(); + let options = 0; let status = unsafe { - sys::zx_channel_call(self.raw_handle(), options, timeout, &args, &mut actual_read_bytes, - &mut actual_read_handles, &mut read_status) + Status::from_raw( + sys::zx_channel_call( + self.raw_handle(), options, timeout.nanos(), &args, &mut actual_read_bytes, + &mut actual_read_handles, &mut read_status)) }; - if status == sys::ZX_OK || status == sys::ZX_ERR_TIMED_OUT || status == sys::ZX_ERR_CALL_FAILED - { - // Handles were successfully transferred, even if we didn't get a response, so forget - // them on the sender side. - unsafe { handles.set_len(0); } + + match status { + Status::OK | + Status::TIMED_OUT | + Status::CALL_FAILED => { + // Handles were successfully transferred, + // even if we didn't get a response, so forget + // them on the sender side. + unsafe { handles.set_len(0); } + } + _ => {} } + unsafe { buf.bytes.set_len(actual_read_bytes as usize); buf.handles.set_len(actual_read_handles as usize); } - if status == sys::ZX_OK { + if Status::OK == status { Ok(()) } else { - Err((Status::from_raw(status), Status::from_raw(read_status))) + Err((status, Status::from_raw(read_status))) } } } -/// Options for creating a channel. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum ChannelOpts { - /// A normal channel. - Normal = 0, +#[test] +pub fn test_handle_repr() { + assert_eq!(::std::mem::size_of::(), 4); + assert_eq!(::std::mem::size_of::(), 4); + assert_eq!(::std::mem::align_of::(), ::std::mem::align_of::()); + + // This test asserts that repr(transparent) still works for Handle -> zx_handle_t + + let n: Vec = vec![0, 100, 2<<32-1]; + let v: Vec = n.iter().map(|h| unsafe { Handle::from_raw(*h) } ).collect(); + + for (handle, raw) in v.iter().zip(n.iter()) { + unsafe { + assert_eq!(*(handle as *const _ as *const [u8; 4]), *(raw as *const _ as *const [u8; 4])); + } + } + + for h in v.into_iter() { + ::std::mem::forget(h); + } } -impl Default for ChannelOpts { - fn default() -> Self { - ChannelOpts::Normal +impl AsRef for Channel { + fn as_ref(&self) -> &Self { + &self } } @@ -183,7 +211,7 @@ impl Default for ChannelOpts { #[derive(Debug)] pub struct MessageBuf { bytes: Vec, - handles: Vec, + handles: Vec, } impl MessageBuf { @@ -192,6 +220,14 @@ impl MessageBuf { Default::default() } + /// Create a new non-empty message buffer. + pub fn new_with(v: Vec, h: Vec) -> Self { + Self{ + bytes: v, + handles: h, + } + } + /// Ensure that the buffer has the capacity to hold at least `n_bytes` bytes. pub fn ensure_capacity_bytes(&mut self, n_bytes: usize) { ensure_capacity(&mut self.bytes, n_bytes); @@ -202,6 +238,14 @@ impl MessageBuf { ensure_capacity(&mut self.handles, n_handles); } + /// Ensure that at least n_bytes bytes are initialized (0 fill). + pub fn ensure_initialized_bytes(&mut self, n_bytes: usize) { + if n_bytes <= self.bytes.len() { + return; + } + self.bytes.resize(n_bytes, 0); + } + /// Get a reference to the bytes of the message buffer, as a `&[u8]` slice. pub fn bytes(&self) -> &[u8] { self.bytes.as_slice() @@ -218,35 +262,23 @@ impl MessageBuf { /// method is called again with the same index, it will return `None`, as /// will happen if the index exceeds the number of handles available. pub fn take_handle(&mut self, index: usize) -> Option { - self.handles.get_mut(index).and_then(|handleref| - if *handleref == INVALID_HANDLE { + self.handles.get_mut(index).and_then(|handle| + if handle.is_invalid() { None } else { - Some(Handle(mem::replace(handleref, INVALID_HANDLE))) + Some(mem::replace(handle, Handle::invalid())) } ) } - fn drop_handles(&mut self) { - for &handle in &self.handles { - if handle != 0 { - handle_drop(handle); - } - } - } - - fn reset_handles(&mut self) { - self.drop_handles(); + /// Clear the bytes and handles contained in the buf. This will drop any + /// contained handles, resulting in their resources being freed. + pub fn clear(&mut self) { + self.bytes.clear(); self.handles.clear(); } } -impl Drop for MessageBuf { - fn drop(&mut self) { - self.drop_handles(); - } -} - fn ensure_capacity(vec: &mut Vec, size: usize) { let len = vec.len(); if size > len { @@ -257,31 +289,30 @@ fn ensure_capacity(vec: &mut Vec, size: usize) { #[cfg(test)] mod tests { use super::*; - use {Duration, ZX_CHANNEL_READABLE, ZX_CHANNEL_WRITABLE, ZX_RIGHT_SAME_RIGHTS, ZX_SIGNAL_LAST_HANDLE, Vmo, VmoOpts}; - use deadline_after; + use {DurationNum, Rights, Signals, Vmo}; use std::thread; #[test] fn channel_basic() { - let (p1, p2) = Channel::create(ChannelOpts::Normal).unwrap(); + let (p1, p2) = Channel::create().unwrap(); let mut empty = vec![]; - assert!(p1.write(b"hello", &mut empty, 0).is_ok()); + assert!(p1.write(b"hello", &mut empty).is_ok()); let mut buf = MessageBuf::new(); - assert!(p2.read(0, &mut buf).is_ok()); + assert!(p2.read(&mut buf).is_ok()); assert_eq!(buf.bytes(), b"hello"); } #[test] fn channel_read_raw_too_small() { - let (p1, p2) = Channel::create(ChannelOpts::Normal).unwrap(); + let (p1, p2) = Channel::create().unwrap(); let mut empty = vec![]; - assert!(p1.write(b"hello", &mut empty, 0).is_ok()); + assert!(p1.write(b"hello", &mut empty).is_ok()); let mut buf = MessageBuf::new(); - let result = p2.read_raw(0, &mut buf); + let result = p2.read_raw(&mut buf); assert_eq!(result, Err((5, 0))); assert_eq!(buf.bytes(), b""); } @@ -291,19 +322,19 @@ mod tests { let hello_length: usize = 5; // Create a pair of channels and a virtual memory object. - let (p1, p2) = Channel::create(ChannelOpts::Normal).unwrap(); - let vmo = Vmo::create(hello_length as u64, VmoOpts::Default).unwrap(); + let (p1, p2) = Channel::create().unwrap(); + let vmo = Vmo::create(hello_length as u64).unwrap(); // Duplicate VMO handle and send it down the channel. - let duplicate_vmo_handle = vmo.duplicate_handle(ZX_RIGHT_SAME_RIGHTS).unwrap().into(); + let duplicate_vmo_handle = vmo.duplicate_handle(Rights::SAME_RIGHTS).unwrap().into(); let mut handles_to_send: Vec = vec![duplicate_vmo_handle]; - assert!(p1.write(b"", &mut handles_to_send, 0).is_ok()); + assert!(p1.write(b"", &mut handles_to_send).is_ok()); // Handle should be removed from vector. assert!(handles_to_send.is_empty()); // Read the handle from the receiving channel. let mut buf = MessageBuf::new(); - assert!(p2.read(0, &mut buf).is_ok()); + assert!(p2.read(&mut buf).is_ok()); assert_eq!(buf.n_handles(), 1); // Take the handle from the buffer. let received_handle = buf.take_handle(0).unwrap(); @@ -324,57 +355,64 @@ mod tests { #[test] fn channel_call_timeout() { - let ten_ms: Duration = 10_000_000; + let ten_ms = 10.millis(); // Create a pair of channels and a virtual memory object. - let (p1, p2) = Channel::create(ChannelOpts::Normal).unwrap(); - let vmo = Vmo::create(0 as u64, VmoOpts::Default).unwrap(); + let (p1, p2) = Channel::create().unwrap(); + let vmo = Vmo::create(0 as u64).unwrap(); // Duplicate VMO handle and send it along with the call. - let duplicate_vmo_handle = vmo.duplicate_handle(ZX_RIGHT_SAME_RIGHTS).unwrap().into(); + let duplicate_vmo_handle = vmo.duplicate_handle(Rights::SAME_RIGHTS).unwrap().into(); let mut handles_to_send: Vec = vec![duplicate_vmo_handle]; let mut buf = MessageBuf::new(); - assert_eq!(p1.call(0, deadline_after(ten_ms), b"call", &mut handles_to_send, &mut buf), - Err((Status::ErrTimedOut, Status::NoError))); + assert_eq!(p1.call(ten_ms.after_now(), b"call", &mut handles_to_send, &mut buf), + Err((Status::TIMED_OUT, Status::OK))); // Handle should be removed from vector even though we didn't get a response, as it was // still sent over the channel. assert!(handles_to_send.is_empty()); // Should be able to read call even though it timed out waiting for a response. let mut buf = MessageBuf::new(); - assert!(p2.read(0, &mut buf).is_ok()); + assert!(p2.read(&mut buf).is_ok()); assert_eq!(buf.bytes(), b"call"); assert_eq!(buf.n_handles(), 1); } #[test] fn channel_call() { - let hundred_ms: Duration = 100_000_000; - // Create a pair of channels - let (p1, p2) = Channel::create(ChannelOpts::Normal).unwrap(); + let (p1, p2) = Channel::create().unwrap(); + + // create an mpsc channel for communicating the call data for later assertion + let (tx, rx) = ::std::sync::mpsc::channel(); // Start a new thread to respond to the call. - let server = thread::spawn(move || { - assert_eq!(p2.wait_handle(ZX_CHANNEL_READABLE, deadline_after(hundred_ms)), - Ok(ZX_CHANNEL_READABLE | ZX_CHANNEL_WRITABLE | ZX_SIGNAL_LAST_HANDLE)); + thread::spawn(move || { let mut buf = MessageBuf::new(); - assert_eq!(p2.read(0, &mut buf), Ok(())); - assert_eq!(buf.bytes(), b"txidcall"); - assert_eq!(buf.n_handles(), 0); - let mut empty = vec![]; - assert_eq!(p2.write(b"txidresponse", &mut empty, 0), Ok(())); + // if either the read or the write fail, this thread will panic, + // resulting in tx being dropped, which will be noticed by the rx. + p2.wait_handle(Signals::CHANNEL_READABLE, 1.seconds().after_now()).expect("callee wait error"); + p2.read(&mut buf).expect("callee read error"); + p2.write(b"txidresponse", &mut vec![]).expect("callee write error"); + tx.send(buf).expect("callee mpsc send error"); }); // Make the call. - let mut empty = vec![]; let mut buf = MessageBuf::new(); buf.ensure_capacity_bytes(12); - assert_eq!(p1.call(0, deadline_after(hundred_ms), b"txidcall", &mut empty, &mut buf), - Ok(())); + // NOTE(raggi): CQ has been seeing some long stalls from channel call, + // and it's as yet unclear why. The timeout here has been made much + // larger in order to avoid that, as the issues are not issues with this + // crate's concerns. The timeout is here just to prevent the tests from + // stalling forever if a developer makes a mistake locally in this + // crate. Tests of Zircon behavior or virtualization behavior should be + // covered elsewhere. See ZX-1324. + p1.call(30.seconds().after_now(), b"txidcall", &mut vec![], &mut buf).expect("channel call error"); assert_eq!(buf.bytes(), b"txidresponse"); assert_eq!(buf.n_handles(), 0); - assert!(server.join().is_ok()); + let sbuf = rx.recv().expect("mpsc channel recv error"); + assert_eq!(sbuf.bytes(), b"txidcall"); + assert_eq!(sbuf.n_handles(), 0); } } diff --git a/third_party/rust/fuchsia-zircon/src/cprng.rs b/third_party/rust/fuchsia-zircon/src/cprng.rs new file mode 100644 index 000000000000..433ed26d9e6f --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/cprng.rs @@ -0,0 +1,68 @@ +use {Status, ok, sys}; + +/// Draw random bytes from the kernel's CPRNG to fill the given buffer. Returns the actual number of +/// bytes drawn, which may sometimes be less than the size of the buffer provided. +/// +/// The buffer must have length less than `ZX_CPRNG_DRAW_MAX_LEN`. +/// +/// Wraps the +/// [zx_cprng_draw](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_draw.md) +/// syscall. +pub fn cprng_draw(buffer: &mut [u8]) -> Result { + let mut actual = 0; + let status = unsafe { sys::zx_cprng_draw(buffer.as_mut_ptr(), buffer.len(), &mut actual) }; + ok(status).map(|()| actual) +} + +/// Mix the given entropy into the kernel CPRNG. +/// +/// The buffer must have length less than `ZX_CPRNG_ADD_ENTROPY_MAX_LEN`. +/// +/// Wraps the +/// [zx_cprng_add_entropy](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_add_entropy.md) +/// syscall. +pub fn cprng_add_entropy(buffer: &[u8]) -> Result<(), Status> { + let status = unsafe { sys::zx_cprng_add_entropy(buffer.as_ptr(), buffer.len()) }; + ok(status) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn cprng() { + let mut buffer = [0; 20]; + assert_eq!(cprng_draw(&mut buffer), Ok(20)); + let mut first_zero = 0; + let mut last_zero = 0; + for _ in 0..30 { + let mut buffer = [0; 20]; + assert_eq!(cprng_draw(&mut buffer), Ok(20)); + if buffer[0] == 0 { + first_zero += 1; + } + if buffer[19] == 0 { + last_zero += 1; + } + } + assert_ne!(first_zero, 30); + assert_ne!(last_zero, 30); + } + + #[test] + fn cprng_too_large() { + let mut buffer = [0; sys::ZX_CPRNG_DRAW_MAX_LEN + 1]; + assert_eq!(cprng_draw(&mut buffer), Err(Status::INVALID_ARGS)); + + for mut s in buffer.chunks_mut(sys::ZX_CPRNG_DRAW_MAX_LEN) { + assert_eq!(cprng_draw(&mut s), Ok(s.len())); + } + } + + #[test] + fn cprng_add() { + let buffer = [0, 1, 2]; + assert_eq!(cprng_add_entropy(&buffer), Ok(())); + } +} \ No newline at end of file diff --git a/third_party/rust/fuchsia-zircon/src/event.rs b/third_party/rust/fuchsia-zircon/src/event.rs index ae476ba40eef..533a8aafccc4 100644 --- a/third_party/rust/fuchsia-zircon/src/event.rs +++ b/third_party/rust/fuchsia-zircon/src/event.rs @@ -5,7 +5,7 @@ //! Type-safe bindings for Zircon event objects. use {AsHandleRef, Cookied, HandleBased, Handle, HandleRef, Status}; -use {sys, into_result}; +use {sys, ok}; /// An object representing a Zircon /// [event object](https://fuchsia.googlesource.com/zircon/+/master/docs/objects/event.md). @@ -20,23 +20,13 @@ impl Event { /// Create an event object, an object which is signalable but nothing else. Wraps the /// [zx_event_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/event_create.md) /// syscall. - pub fn create(options: EventOpts) -> Result { + pub fn create() -> Result { let mut out = 0; - let status = unsafe { sys::zx_event_create(options as u32, &mut out) }; - into_result(status, || Self::from(Handle(out))) - } -} - -/// Options for creating an event object. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum EventOpts { - /// Default options. - Default = 0, -} - -impl Default for EventOpts { - fn default() -> Self { - EventOpts::Default + let opts = 0; + let status = unsafe { sys::zx_event_create(opts, &mut out) }; + ok(status)?; + unsafe { + Ok(Self::from(Handle::from_raw(out))) + } } } diff --git a/third_party/rust/fuchsia-zircon/src/eventpair.rs b/third_party/rust/fuchsia-zircon/src/eventpair.rs index 4e12c108b1ca..6f2d29806d8b 100644 --- a/third_party/rust/fuchsia-zircon/src/eventpair.rs +++ b/third_party/rust/fuchsia-zircon/src/eventpair.rs @@ -5,7 +5,7 @@ //! Type-safe bindings for Zircon event pairs. use {AsHandleRef, Cookied, HandleBased, Handle, HandleRef, Peered, Status}; -use {sys, into_result}; +use {sys, ok}; /// An object representing a Zircon /// [event pair](https://fuchsia.googlesource.com/zircon/+/master/docs/concepts.md#Other-IPC_Events_Event-Pairs_and-User-Signals). @@ -21,55 +21,45 @@ impl EventPair { /// Create an event pair, a pair of objects which can signal each other. Wraps the /// [zx_eventpair_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/eventpair_create.md) /// syscall. - pub fn create(options: EventPairOpts) -> Result<(EventPair, EventPair), Status> { + pub fn create() -> Result<(EventPair, EventPair), Status> { let mut out0 = 0; let mut out1 = 0; - let status = unsafe { sys::zx_eventpair_create(options as u32, &mut out0, &mut out1) }; - into_result(status, || - (Self::from(Handle(out0)), - Self::from(Handle(out1)))) - } -} - -/// Options for creating an event pair. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum EventPairOpts { - /// Default options. - Default = 0, -} - -impl Default for EventPairOpts { - fn default() -> Self { - EventPairOpts::Default + let options = 0; + let status = unsafe { sys::zx_eventpair_create(options, &mut out0, &mut out1) }; + ok(status)?; + unsafe { + Ok(( + Self::from(Handle::from_raw(out0)), + Self::from(Handle::from_raw(out1)) + )) + } } } #[cfg(test)] mod tests { use super::*; - use {Duration, ZX_SIGNAL_LAST_HANDLE, ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0}; - use deadline_after; + use {DurationNum, Signals}; #[test] fn wait_and_signal_peer() { - let (p1, p2) = EventPair::create(EventPairOpts::Default).unwrap(); - let eighty_ms: Duration = 80_000_000; + let (p1, p2) = EventPair::create().unwrap(); + let eighty_ms = 80.millis(); // Waiting on one without setting any signal should time out. - assert_eq!(p2.wait_handle(ZX_USER_SIGNAL_0, deadline_after(eighty_ms)), Err(Status::ErrTimedOut)); + assert_eq!(p2.wait_handle(Signals::USER_0, eighty_ms.after_now()), Err(Status::TIMED_OUT)); // If we set a signal, we should be able to wait for it. - assert!(p1.signal_peer(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - assert_eq!(p2.wait_handle(ZX_USER_SIGNAL_0, deadline_after(eighty_ms)).unwrap(), - ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert!(p1.signal_peer(Signals::NONE, Signals::USER_0).is_ok()); + assert_eq!(p2.wait_handle(Signals::USER_0, eighty_ms.after_now()).unwrap(), + Signals::USER_0); // Should still work, signals aren't automatically cleared. - assert_eq!(p2.wait_handle(ZX_USER_SIGNAL_0, deadline_after(eighty_ms)).unwrap(), - ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(p2.wait_handle(Signals::USER_0, eighty_ms.after_now()).unwrap(), + Signals::USER_0); // Now clear it, and waiting should time out again. - assert!(p1.signal_peer(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); - assert_eq!(p2.wait_handle(ZX_USER_SIGNAL_0, deadline_after(eighty_ms)), Err(Status::ErrTimedOut)); + assert!(p1.signal_peer(Signals::USER_0, Signals::NONE).is_ok()); + assert_eq!(p2.wait_handle(Signals::USER_0, eighty_ms.after_now()), Err(Status::TIMED_OUT)); } } diff --git a/third_party/rust/fuchsia-zircon/src/fifo.rs b/third_party/rust/fuchsia-zircon/src/fifo.rs index c948e99a7b65..20af6f523673 100644 --- a/third_party/rust/fuchsia-zircon/src/fifo.rs +++ b/third_party/rust/fuchsia-zircon/src/fifo.rs @@ -5,7 +5,7 @@ //! Type-safe bindings for Zircon fifo objects. use {AsHandleRef, HandleBased, Handle, HandleRef, Status}; -use {sys, into_result}; +use {sys, ok}; /// An object representing a Zircon fifo. /// @@ -19,15 +19,20 @@ impl Fifo { /// element into the fifo from which the opposing endpoint reads. Wraps the /// [zx_fifo_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/fifo_create.md) /// syscall. - pub fn create(elem_count: u32, elem_size: u32, options: FifoOpts) + pub fn create(elem_count: u32, elem_size: u32) -> Result<(Fifo, Fifo), Status> { let mut out0 = 0; let mut out1 = 0; + let options = 0; let status = unsafe { - sys::zx_fifo_create(elem_count, elem_size, options as u32, &mut out0, &mut out1) + sys::zx_fifo_create(elem_count, elem_size, options, &mut out0, &mut out1) }; - into_result(status, || (Self::from(Handle(out0)), Self::from(Handle(out1)))) + ok(status)?; + unsafe { Ok(( + Self::from(Handle::from_raw(out0)), + Self::from(Handle::from_raw(out1)) + ))} } /// Attempts to write some number of elements into the fifo. The number of bytes written will be @@ -42,7 +47,7 @@ impl Fifo { sys::zx_fifo_write(self.raw_handle(), bytes.as_ptr(), bytes.len(), &mut num_entries_written) }; - into_result(status, || num_entries_written) + ok(status).map(|()| num_entries_written) } /// Attempts to read some number of elements out of the fifo. The number of bytes read will @@ -57,21 +62,7 @@ impl Fifo { sys::zx_fifo_read(self.raw_handle(), bytes.as_mut_ptr(), bytes.len(), &mut num_entries_read) }; - into_result(status, || num_entries_read) - } -} - -/// Options for creating a fifo pair. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum FifoOpts { - /// Default options. - Default = 0, -} - -impl Default for FifoOpts { - fn default() -> Self { - FifoOpts::Default + ok(status).map(|()| num_entries_read) } } @@ -81,11 +72,11 @@ mod tests { #[test] fn fifo_basic() { - let (fifo1, fifo2) = Fifo::create(4, 2, FifoOpts::Default).unwrap(); + let (fifo1, fifo2) = Fifo::create(4, 2).unwrap(); // Trying to write less than one element should fail. - assert_eq!(fifo1.write(b""), Err(Status::ErrOutOfRange)); - assert_eq!(fifo1.write(b"h"), Err(Status::ErrOutOfRange)); + assert_eq!(fifo1.write(b""), Err(Status::OUT_OF_RANGE)); + assert_eq!(fifo1.write(b"h"), Err(Status::OUT_OF_RANGE)); // Should write one element "he" and ignore the last half-element as it rounds down. assert_eq!(fifo1.write(b"hex").unwrap(), 1); @@ -94,7 +85,7 @@ mod tests { assert_eq!(fifo1.write(b"llo worlds").unwrap(), 3); // Now that the fifo is full any further attempts to write should fail. - assert_eq!(fifo1.write(b"blah blah"), Err(Status::ErrShouldWait)); + assert_eq!(fifo1.write(b"blah blah"), Err(Status::SHOULD_WAIT)); // Read all 4 entries from the other end. let mut read_vec = vec![0; 8]; @@ -102,6 +93,6 @@ mod tests { assert_eq!(read_vec, b"hello wo"); // Reading again should fail as the fifo is empty. - assert_eq!(fifo2.read(&mut read_vec), Err(Status::ErrShouldWait)); + assert_eq!(fifo2.read(&mut read_vec), Err(Status::SHOULD_WAIT)); } } diff --git a/third_party/rust/fuchsia-zircon/src/handle.rs b/third_party/rust/fuchsia-zircon/src/handle.rs new file mode 100644 index 000000000000..5c50f29f0e47 --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/handle.rs @@ -0,0 +1,243 @@ +use {Port, Rights, Signals, Status, Time, WaitAsyncOpts, ok, sys}; +use std::marker::PhantomData; +use std::mem; + +/// An object representing a Zircon +/// [handle](https://fuchsia.googlesource.com/zircon/+/master/docs/handles.md). +/// +/// Internally, it is represented as a 32-bit integer, but this wrapper enforces +/// strict ownership semantics. The `Drop` implementation closes the handle. +/// +/// This type represents the most general reference to a kernel object, and can +/// be interconverted to and from more specific types. Those conversions are not +/// enforced in the type system; attempting to use them will result in errors +/// returned by the kernel. These conversions don't change the underlying +/// representation, but do change the type and thus what operations are available. +#[derive(Debug, Eq, PartialEq, Hash)] +pub struct Handle(sys::zx_handle_t); + +impl AsHandleRef for Handle { + fn as_handle_ref(&self) -> HandleRef { + HandleRef { handle: self.0, phantom: Default::default() } + } +} + +impl HandleBased for Handle {} + +impl Drop for Handle { + fn drop(&mut self) { + if self.0 != sys::ZX_HANDLE_INVALID { + unsafe { sys::zx_handle_close(self.0) }; + } + } +} + +impl Handle { + /// Initialize a handle backed by ZX_HANDLE_INVALID, the only safe non-handle. + pub fn invalid() -> Handle { + Handle(sys::ZX_HANDLE_INVALID) + } + + /// If a raw handle is obtained from some other source, this method converts + /// it into a type-safe owned handle. + pub unsafe fn from_raw(raw: sys::zx_handle_t) -> Handle { + Handle(raw) + } + + pub fn is_invalid(&self) -> bool { + self.0 == sys::ZX_HANDLE_INVALID + } + + pub fn replace(self, rights: Rights) -> Result { + let handle = self.0; + let mut out = 0; + let status = unsafe { sys::zx_handle_replace(handle, rights.bits(), &mut out) }; + ok(status).map(|()| Handle(out)) + } +} + +/// A borrowed reference to a `Handle`. +/// +/// Mostly useful as part of a `WaitItem`. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] +pub struct HandleRef<'a> { + handle: sys::zx_handle_t, + phantom: PhantomData<&'a sys::zx_handle_t>, +} + +impl<'a> HandleRef<'a> { + pub fn raw_handle(&self) -> sys::zx_handle_t { + self.handle + } + + pub fn duplicate(&self, rights: Rights) -> Result { + let handle = self.handle; + let mut out = 0; + let status = unsafe { sys::zx_handle_duplicate(handle, rights.bits(), &mut out) }; + ok(status).map(|()| Handle(out)) + } + + pub fn signal(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { + let handle = self.handle; + let status = unsafe { sys::zx_object_signal(handle, clear_mask.bits(), set_mask.bits()) }; + ok(status) + } + + pub fn wait(&self, signals: Signals, deadline: Time) -> Result { + let handle = self.handle; + let mut pending = Signals::empty().bits(); + let status = unsafe { + sys::zx_object_wait_one(handle, signals.bits(), deadline.nanos(), &mut pending) + }; + ok(status).map(|()| Signals::from_bits_truncate(pending)) + } + + pub fn wait_async(&self, port: &Port, key: u64, signals: Signals, options: WaitAsyncOpts) + -> Result<(), Status> + { + let handle = self.handle; + let status = unsafe { + sys::zx_object_wait_async( + handle, port.raw_handle(), key, signals.bits(), options as u32) + }; + ok(status) + } +} + +/// A trait to get a reference to the underlying handle of an object. +pub trait AsHandleRef { + /// Get a reference to the handle. One important use of such a reference is + /// for `object_wait_many`. + fn as_handle_ref(&self) -> HandleRef; + + /// Interpret the reference as a raw handle (an integer type). Two distinct + /// handles will have different raw values (so it can perhaps be used as a + /// key in a data structure). + fn raw_handle(&self) -> sys::zx_handle_t { + self.as_handle_ref().raw_handle() + } + + /// Set and clear userspace-accessible signal bits on an object. Wraps the + /// [zx_object_signal](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_signal.md) + /// syscall. + fn signal_handle(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { + self.as_handle_ref().signal(clear_mask, set_mask) + } + + /// Waits on a handle. Wraps the + /// [zx_object_wait_one](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_wait_one.md) + /// syscall. + fn wait_handle(&self, signals: Signals, deadline: Time) -> Result { + self.as_handle_ref().wait(signals, deadline) + } + + /// Causes packet delivery on the given port when the object changes state and matches signals. + /// [zx_object_wait_async](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_wait_async.md) + /// syscall. + fn wait_async_handle(&self, port: &Port, key: u64, signals: Signals, options: WaitAsyncOpts) + -> Result<(), Status> + { + self.as_handle_ref().wait_async(port, key, signals, options) + } +} + +impl<'a> AsHandleRef for HandleRef<'a> { + fn as_handle_ref(&self) -> HandleRef { *self } +} + +/// A trait implemented by all handle-based types. +/// +/// Note: it is reasonable for user-defined objects wrapping a handle to implement +/// this trait. For example, a specific interface in some protocol might be +/// represented as a newtype of `Channel`, and implement the `as_handle_ref` +/// method and the `From` trait to facilitate conversion from and to the +/// interface. +pub trait HandleBased: AsHandleRef + From + Into { + /// Duplicate a handle, possibly reducing the rights available. Wraps the + /// [zx_handle_duplicate](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/handle_duplicate.md) + /// syscall. + fn duplicate_handle(&self, rights: Rights) -> Result { + self.as_handle_ref().duplicate(rights).map(|handle| Self::from(handle)) + } + + /// Create a replacement for a handle, possibly reducing the rights available. This invalidates + /// the original handle. Wraps the + /// [zx_handle_replace](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/handle_replace.md) + /// syscall. + fn replace_handle(self, rights: Rights) -> Result { + >::into(self) + .replace(rights).map(|handle| Self::from(handle)) + } + + /// Converts the value into its inner handle. + /// + /// This is a convenience function which simply forwards to the `Into` trait. + fn into_handle(self) -> Handle { + self.into() + } + + /// Converts the handle into it's raw representation. + /// + /// The caller takes ownership over the raw handle, and must close or transfer it to avoid a handle leak. + fn into_raw(self) -> sys::zx_handle_t { + let h = self.into_handle(); + let r = h.0; + mem::forget(h); + r + } + + /// Creates an instance of this type from a handle. + /// + /// This is a convenience function which simply forwards to the `From` trait. + fn from_handle(handle: Handle) -> Self { + Self::from(handle) + } + + /// Creates an instance of another handle-based type from this value's inner handle. + fn into_handle_based(self) -> H { + H::from_handle(self.into_handle()) + } + + /// Creates an instance of this type from the inner handle of another + /// handle-based type. + fn from_handle_based(h: H) -> Self { + Self::from_handle(h.into_handle()) + } +} + +/// A trait implemented by all handles for objects which have a peer. +pub trait Peered: HandleBased { + /// Set and clear userspace-accessible signal bits on the object's peer. Wraps the + /// [zx_object_signal_peer](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_signal.md) + /// syscall. + fn signal_peer(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { + let handle = self.as_handle_ref().handle; + let status = unsafe { + sys::zx_object_signal_peer(handle, clear_mask.bits(), set_mask.bits()) + }; + ok(status) + } +} + +/// A trait implemented by all handles for objects which can have a cookie attached. +pub trait Cookied: HandleBased { + /// Get the cookie attached to this object, if any. Wraps the + /// [zx_object_get_cookie](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/object_get_cookie.md) + /// syscall. + fn get_cookie(&self, scope: &HandleRef) -> Result { + let handle = self.as_handle_ref().handle; + let mut cookie = 0; + let status = unsafe { sys::zx_object_get_cookie(handle, scope.handle, &mut cookie) }; + ok(status).map(|()| cookie) + } + + /// Attach an opaque cookie to this object with the given scope. The cookie may be read or + /// changed in future only with the same scope. Wraps the + /// [zx_object_set_cookie](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/object_set_cookie.md) + /// syscall. + fn set_cookie(&self, scope: &HandleRef, cookie: u64) -> Result<(), Status> { + let handle = self.as_handle_ref().handle; + let status = unsafe { sys::zx_object_set_cookie(handle, scope.handle, cookie) }; + ok(status) + } +} diff --git a/third_party/rust/fuchsia-zircon/src/lib.rs b/third_party/rust/fuchsia-zircon/src/lib.rs index b226c87d5960..26444402ccf0 100644 --- a/third_party/rust/fuchsia-zircon/src/lib.rs +++ b/third_party/rust/fuchsia-zircon/src/lib.rs @@ -5,11 +5,18 @@ //! Type-safe bindings for Zircon kernel //! [syscalls](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls.md). -extern crate fuchsia_zircon_sys as zircon_sys; +#![deny(warnings)] -use std::io; -use std::marker::PhantomData; +#[macro_use] +extern crate bitflags; +pub extern crate fuchsia_zircon_sys as sys; + +#[deprecated(note="use fuchsia_zircon::sys::ZX_CPRNG_DRAW_MAX_LEN instead")] +#[doc(hidden)] +pub use sys::ZX_CPRNG_DRAW_MAX_LEN; + +// Implements the HandleBased traits for a Handle newtype struct macro_rules! impl_handle_based { ($type_name:path) => { impl AsHandleRef for $type_name { @@ -34,296 +41,72 @@ macro_rules! impl_handle_based { } } +// Creates associated constants of TypeName of the form +// `pub const NAME: TypeName = TypeName(value);` +macro_rules! assoc_consts { + ($typename:ident, [$($name:ident = $num:expr;)*]) => { + #[allow(non_upper_case_globals)] + impl $typename { + $( + pub const $name: $typename = $typename($num); + )* + } + } +} + mod channel; +mod cprng; mod event; mod eventpair; mod fifo; +mod handle; mod job; mod port; mod process; +mod rights; mod socket; -mod timer; +mod signals; +mod status; +mod time; mod thread; +mod vmar; mod vmo; -pub use channel::{Channel, ChannelOpts, MessageBuf}; -pub use event::{Event, EventOpts}; -pub use eventpair::{EventPair, EventPairOpts}; -pub use fifo::{Fifo, FifoOpts}; -pub use job::Job; -pub use port::{Packet, PacketContents, Port, PortOpts, SignalPacket, UserPacket, WaitAsyncOpts}; -pub use process::Process; -pub use socket::{Socket, SocketOpts, SocketReadOpts, SocketWriteOpts}; -pub use timer::{Timer, TimerOpts}; -pub use thread::Thread; -pub use vmo::{Vmo, VmoCloneOpts, VmoOp, VmoOpts}; +pub use channel::*; +pub use cprng::*; +pub use event::*; +pub use eventpair::*; +pub use fifo::*; +pub use handle::*; +pub use job::*; +pub use port::*; +pub use process::*; +pub use rights::*; +pub use socket::*; +pub use signals::*; +pub use status::*; +pub use thread::*; +pub use time::*; +pub use vmar::*; +pub use vmo::*; -use zircon_sys as sys; - -type Duration = sys::zx_duration_t; -type Time = sys::zx_time_t; -pub use zircon_sys::ZX_TIME_INFINITE; - -// A placeholder value used for handles that have been taken from the message buf. -// We rely on the kernel never to produce any actual handles with this value. -const INVALID_HANDLE: sys::zx_handle_t = 0; - -/// A status code returned from the Zircon kernel. -/// -/// See -/// [errors.md](https://fuchsia.googlesource.com/zircon/+/master/docs/errors.md) -/// in the Zircon documentation for more information about the meaning of these -/// codes. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -#[repr(i32)] -// Auto-generated using tools/gen_status.py -pub enum Status { - NoError = 0, - ErrInternal = -1, - ErrNotSupported = -2, - ErrNoResources = -3, - ErrNoMemory = -4, - ErrCallFailed = -5, - ErrInterruptedRetry = -6, - ErrInvalidArgs = -10, - ErrBadHandle = -11, - ErrWrongType = -12, - ErrBadSyscall = -13, - ErrOutOfRange = -14, - ErrBufferTooSmall = -15, - ErrBadState = -20, - ErrTimedOut = -21, - ErrShouldWait = -22, - ErrCanceled = -23, - ErrPeerClosed = -24, - ErrNotFound = -25, - ErrAlreadyExists = -26, - ErrAlreadyBound = -27, - ErrUnavailable = -28, - ErrAccessDenied = -30, - ErrIo = -40, - ErrIoRefused = -41, - ErrIoDataIntegrity = -42, - ErrIoDataLoss = -43, - ErrBadPath = -50, - ErrNotDir = -51, - ErrNotFile = -52, - ErrFileBig = -53, - ErrNoSpace = -54, - ErrStop = -60, - ErrNext = -61, - - /// Any zx_status_t not in the set above will map to the following: - UnknownOther = -32768, - - // used to prevent exhaustive matching - #[doc(hidden)] - __Nonexhaustive = -32787, +/// Prelude containing common utility traits. +/// Designed for use like `use fuchsia_zircon::prelude::*;` +pub mod prelude { + pub use { + AsHandleRef, + Cookied, + DurationNum, + HandleBased, + Peered, + }; } -impl Status { - pub fn from_raw(raw: sys::zx_status_t) -> Self { - match raw { - // Auto-generated using tools/gen_status.py - sys::ZX_OK => Status::NoError, - sys::ZX_ERR_INTERNAL => Status::ErrInternal, - sys::ZX_ERR_NOT_SUPPORTED => Status::ErrNotSupported, - sys::ZX_ERR_NO_RESOURCES => Status::ErrNoResources, - sys::ZX_ERR_NO_MEMORY => Status::ErrNoMemory, - sys::ZX_ERR_CALL_FAILED => Status::ErrCallFailed, - sys::ZX_ERR_INTERRUPTED_RETRY => Status::ErrInterruptedRetry, - sys::ZX_ERR_INVALID_ARGS => Status::ErrInvalidArgs, - sys::ZX_ERR_BAD_HANDLE => Status::ErrBadHandle, - sys::ZX_ERR_WRONG_TYPE => Status::ErrWrongType, - sys::ZX_ERR_BAD_SYSCALL => Status::ErrBadSyscall, - sys::ZX_ERR_OUT_OF_RANGE => Status::ErrOutOfRange, - sys::ZX_ERR_BUFFER_TOO_SMALL => Status::ErrBufferTooSmall, - sys::ZX_ERR_BAD_STATE => Status::ErrBadState, - sys::ZX_ERR_TIMED_OUT => Status::ErrTimedOut, - sys::ZX_ERR_SHOULD_WAIT => Status::ErrShouldWait, - sys::ZX_ERR_CANCELED => Status::ErrCanceled, - sys::ZX_ERR_PEER_CLOSED => Status::ErrPeerClosed, - sys::ZX_ERR_NOT_FOUND => Status::ErrNotFound, - sys::ZX_ERR_ALREADY_EXISTS => Status::ErrAlreadyExists, - sys::ZX_ERR_ALREADY_BOUND => Status::ErrAlreadyBound, - sys::ZX_ERR_UNAVAILABLE => Status::ErrUnavailable, - sys::ZX_ERR_ACCESS_DENIED => Status::ErrAccessDenied, - sys::ZX_ERR_IO => Status::ErrIo, - sys::ZX_ERR_IO_REFUSED => Status::ErrIoRefused, - sys::ZX_ERR_IO_DATA_INTEGRITY => Status::ErrIoDataIntegrity, - sys::ZX_ERR_IO_DATA_LOSS => Status::ErrIoDataLoss, - sys::ZX_ERR_BAD_PATH => Status::ErrBadPath, - sys::ZX_ERR_NOT_DIR => Status::ErrNotDir, - sys::ZX_ERR_NOT_FILE => Status::ErrNotFile, - sys::ZX_ERR_FILE_BIG => Status::ErrFileBig, - sys::ZX_ERR_NO_SPACE => Status::ErrNoSpace, - sys::ZX_ERR_STOP => Status::ErrStop, - sys::ZX_ERR_NEXT => Status::ErrNext, - _ => Status::UnknownOther, - } - } - - pub fn into_io_err(self) -> io::Error { - self.into() - } - - // Note: no to_raw, even though it's easy to implement, partly because - // handling of UnknownOther would be tricky. +/// Convenience re-export of `Status::ok`. +pub fn ok(raw: sys::zx_status_t) -> Result<(), Status> { + Status::ok(raw) } -impl From for Status { - fn from(kind: io::ErrorKind) -> Self { - use io::ErrorKind::*; - use Status::*; - - match kind { - NotFound => ErrNotFound, - PermissionDenied => ErrAccessDenied, - ConnectionRefused => ErrIoRefused, - ConnectionAborted => ErrPeerClosed, - AddrInUse => ErrAlreadyBound, - AddrNotAvailable => ErrUnavailable, - BrokenPipe => ErrPeerClosed, - AlreadyExists => ErrAlreadyExists, - WouldBlock => ErrShouldWait, - InvalidInput => ErrInvalidArgs, - TimedOut => ErrTimedOut, - Interrupted => ErrInterruptedRetry, - - UnexpectedEof | - WriteZero | - ConnectionReset | - NotConnected | - Other | _ => ErrIo, - } - } -} - -impl From for io::ErrorKind { - fn from(status: Status) -> io::ErrorKind { - use io::ErrorKind::*; - use Status::*; - - match status { - ErrInterruptedRetry => Interrupted, - ErrBadHandle => BrokenPipe, - ErrTimedOut => TimedOut, - ErrShouldWait => WouldBlock, - ErrPeerClosed => ConnectionAborted, - ErrNotFound => NotFound, - ErrAlreadyExists => AlreadyExists, - ErrAlreadyBound => AlreadyExists, - ErrUnavailable => AddrNotAvailable, - ErrAccessDenied => PermissionDenied, - ErrIoRefused => ConnectionRefused, - ErrIoDataIntegrity => InvalidData, - - ErrBadPath | - ErrInvalidArgs | - ErrOutOfRange | - ErrWrongType => InvalidInput, - - Status::__Nonexhaustive | - UnknownOther | - NoError | - ErrNext | - ErrStop | - ErrNoSpace | - ErrFileBig | - ErrNotFile | - ErrNotDir | - ErrIoDataLoss | - ErrIo | - ErrCanceled | - ErrBadState | - ErrBufferTooSmall | - ErrBadSyscall | - ErrInternal | - ErrNotSupported | - ErrNoResources | - ErrNoMemory | - ErrCallFailed => Other, - } - } -} - -impl From for Status { - fn from(err: io::Error) -> Status { - err.kind().into() - } -} - -impl From for io::Error { - fn from(status: Status) -> io::Error { - io::Error::from(io::ErrorKind::from(status)) - } -} - -/// Rights associated with a handle. -/// -/// See [rights.md](https://fuchsia.googlesource.com/zircon/+/master/docs/rights.md) -/// for more information. -pub type Rights = sys::zx_rights_t; -pub use zircon_sys::{ - ZX_RIGHT_NONE, - ZX_RIGHT_DUPLICATE, - ZX_RIGHT_TRANSFER, - ZX_RIGHT_READ, - ZX_RIGHT_WRITE, - ZX_RIGHT_EXECUTE, - ZX_RIGHT_MAP, - ZX_RIGHT_GET_PROPERTY, - ZX_RIGHT_SET_PROPERTY, - ZX_RIGHT_DEBUG, - ZX_RIGHT_SAME_RIGHTS, -}; - -/// Signals that can be waited upon. -/// -/// See -/// [Objects and signals](https://fuchsia.googlesource.com/zircon/+/master/docs/concepts.md#Objects-and-Signals) -/// in the Zircon kernel documentation. Note: the names of signals are still in flux. -pub type Signals = sys::zx_signals_t; - -pub use zircon_sys::{ - ZX_SIGNAL_NONE, - - ZX_SIGNAL_HANDLE_CLOSED, - ZX_SIGNAL_LAST_HANDLE, - - ZX_USER_SIGNAL_0, - ZX_USER_SIGNAL_1, - ZX_USER_SIGNAL_2, - ZX_USER_SIGNAL_3, - ZX_USER_SIGNAL_4, - ZX_USER_SIGNAL_5, - ZX_USER_SIGNAL_6, - ZX_USER_SIGNAL_7, - - // Event - ZX_EVENT_SIGNALED, - - // EventPair - ZX_EPAIR_SIGNALED, - ZX_EPAIR_CLOSED, - - // Task signals (process, thread, job) - ZX_TASK_TERMINATED, - - // Channel - ZX_CHANNEL_READABLE, - ZX_CHANNEL_WRITABLE, - ZX_CHANNEL_PEER_CLOSED, - - // Socket - ZX_SOCKET_READABLE, - ZX_SOCKET_WRITABLE, - ZX_SOCKET_PEER_CLOSED, - - // Timer - ZX_TIMER_SIGNALED, -}; - /// A "wait item" containing a handle reference and information about what signals /// to wait on, and, on return from `object_wait_many`, which are pending. #[repr(C)] @@ -337,7 +120,6 @@ pub struct WaitItem<'a> { pub pending: Signals, } - /// An identifier to select a particular clock. See /// [zx_time_get](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/time_get.md) /// for more information about the possible values. @@ -355,276 +137,6 @@ pub enum ClockId { Thread = 2, } -/// Get the current time, from the specific clock id. -/// -/// Wraps the -/// [zx_time_get](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/time_get.md) -/// syscall. -pub fn time_get(clock_id: ClockId) -> Time { - unsafe { sys::zx_time_get(clock_id as u32) } -} - -/// Read the number of high-precision timer ticks since boot. These ticks may be processor cycles, -/// high speed timer, profiling timer, etc. They are not guaranteed to continue advancing when the -/// system is asleep. -/// -/// Wraps the -/// [zx_ticks_get](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/ticks_get.md) -/// syscall. -pub fn ticks_get() -> u64 { - unsafe { sys::zx_ticks_get() } -} - -/// Compute a deadline for the time in the future that is the given `Duration` away. -/// -/// Wraps the -/// [zx_deadline_after](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/deadline_after.md) -/// syscall. -pub fn deadline_after(nanos: Duration) -> Time { - unsafe { sys::zx_deadline_after(nanos) } -} - -/// Sleep until the given deadline. -/// -/// Wraps the -/// [zx_nanosleep](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/nanosleep.md) -/// syscall. -pub fn nanosleep(deadline: Time) { - unsafe { sys::zx_nanosleep(deadline); } -} - -/// Return the number of high-precision timer ticks in a second. -/// -/// Wraps the -/// [zx_ticks_per_second](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/ticks_per_second.md) -/// syscall. -pub fn ticks_per_second() -> u64 { - unsafe { sys::zx_ticks_per_second() } -} - -pub use zircon_sys::{ - ZX_CPRNG_DRAW_MAX_LEN, - ZX_CPRNG_ADD_ENTROPY_MAX_LEN, -}; - -/// Draw random bytes from the kernel's CPRNG to fill the given buffer. Returns the actual number of -/// bytes drawn, which may sometimes be less than the size of the buffer provided. -/// -/// The buffer must have length less than `ZX_CPRNG_DRAW_MAX_LEN`. -/// -/// Wraps the -/// [zx_cprng_draw](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_draw.md) -/// syscall. -pub fn cprng_draw(buffer: &mut [u8]) -> Result { - let mut actual = 0; - let status = unsafe { sys::zx_cprng_draw(buffer.as_mut_ptr(), buffer.len(), &mut actual) }; - into_result(status, || actual) -} - -/// Mix the given entropy into the kernel CPRNG. -/// -/// The buffer must have length less than `ZX_CPRNG_ADD_ENTROPY_MAX_LEN`. -/// -/// Wraps the -/// [zx_cprng_add_entropy](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/cprng_add_entropy.md) -/// syscall. -pub fn cprng_add_entropy(buffer: &[u8]) -> Result<(), Status> { - let status = unsafe { sys::zx_cprng_add_entropy(buffer.as_ptr(), buffer.len()) }; - into_result(status, || ()) -} - -fn into_result(status: sys::zx_status_t, f: F) -> Result - where F: FnOnce() -> T { - // All non-negative values are assumed successful. Note: calls that don't try - // to multiplex success values into status return could be more strict here. - if status >= 0 { - Ok(f()) - } else { - Err(Status::from_raw(status)) - } -} - -// Handles - -/// A borrowed reference to a `Handle`. -/// -/// Mostly useful as part of a `WaitItem`. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] -pub struct HandleRef<'a> { - handle: sys::zx_handle_t, - phantom: PhantomData<&'a sys::zx_handle_t>, -} - -impl<'a> HandleRef<'a> { - pub fn raw_handle(&self) -> sys::zx_handle_t { - self.handle - } - - pub fn duplicate(&self, rights: Rights) -> Result { - let handle = self.handle; - let mut out = 0; - let status = unsafe { sys::zx_handle_duplicate(handle, rights, &mut out) }; - into_result(status, || Handle(out)) - } - - pub fn signal(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { - let handle = self.handle; - let status = unsafe { sys::zx_object_signal(handle, clear_mask.bits(), set_mask.bits()) }; - into_result(status, || ()) - } - - pub fn wait(&self, signals: Signals, deadline: Time) -> Result { - let handle = self.handle; - let mut pending = sys::zx_signals_t::empty(); - let status = unsafe { - sys::zx_object_wait_one(handle, signals, deadline, &mut pending) - }; - into_result(status, || pending) - } - - pub fn wait_async(&self, port: &Port, key: u64, signals: Signals, options: WaitAsyncOpts) - -> Result<(), Status> - { - let handle = self.handle; - let status = unsafe { - sys::zx_object_wait_async(handle, port.raw_handle(), key, signals, options as u32) - }; - into_result(status, || ()) - } -} - -/// A trait to get a reference to the underlying handle of an object. -pub trait AsHandleRef { - /// Get a reference to the handle. One important use of such a reference is - /// for `object_wait_many`. - fn as_handle_ref(&self) -> HandleRef; - - /// Interpret the reference as a raw handle (an integer type). Two distinct - /// handles will have different raw values (so it can perhaps be used as a - /// key in a data structure). - fn raw_handle(&self) -> sys::zx_handle_t { - self.as_handle_ref().raw_handle() - } - - /// Set and clear userspace-accessible signal bits on an object. Wraps the - /// [zx_object_signal](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_signal.md) - /// syscall. - fn signal_handle(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { - self.as_handle_ref().signal(clear_mask, set_mask) - } - - /// Waits on a handle. Wraps the - /// [zx_object_wait_one](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_wait_one.md) - /// syscall. - fn wait_handle(&self, signals: Signals, deadline: Time) -> Result { - self.as_handle_ref().wait(signals, deadline) - } - - /// Causes packet delivery on the given port when the object changes state and matches signals. - /// [zx_object_wait_async](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_wait_async.md) - /// syscall. - fn wait_async_handle(&self, port: &Port, key: u64, signals: Signals, options: WaitAsyncOpts) - -> Result<(), Status> - { - self.as_handle_ref().wait_async(port, key, signals, options) - } -} - -impl<'a> AsHandleRef for HandleRef<'a> { - fn as_handle_ref(&self) -> HandleRef { *self } -} - -/// A trait implemented by all handle-based types. -/// -/// Note: it is reasonable for user-defined objects wrapping a handle to implement -/// this trait. For example, a specific interface in some protocol might be -/// represented as a newtype of `Channel`, and implement the `as_handle_ref` -/// method and the `From` trait to facilitate conversion from and to the -/// interface. -pub trait HandleBased: AsHandleRef + From + Into { - /// Duplicate a handle, possibly reducing the rights available. Wraps the - /// [zx_handle_duplicate](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/handle_duplicate.md) - /// syscall. - fn duplicate_handle(&self, rights: Rights) -> Result { - self.as_handle_ref().duplicate(rights).map(|handle| Self::from(handle)) - } - - /// Create a replacement for a handle, possibly reducing the rights available. This invalidates - /// the original handle. Wraps the - /// [zx_handle_replace](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/handle_replace.md) - /// syscall. - fn replace_handle(self, rights: Rights) -> Result { - >::into(self) - .replace(rights).map(|handle| Self::from(handle)) - } - - /// Converts the value into its inner handle. - /// - /// This is a convenience function which simply forwards to the `Into` trait. - fn into_handle(self) -> Handle { - self.into() - } - - /// Creates an instance of this type from a handle. - /// - /// This is a convenience function which simply forwards to the `From` trait. - fn from_handle(handle: Handle) -> Self { - Self::from(handle) - } - - /// Creates an instance of another handle-based type from this value's inner handle. - fn into_handle_based(self) -> H { - H::from_handle(self.into_handle()) - } - - /// Creates an instance of this type from the inner handle of another - /// handle-based type. - fn from_handle_based(h: H) -> Self { - Self::from_handle(h.into_handle()) - } -} - -/// A trait implemented by all handles for objects which have a peer. -pub trait Peered: HandleBased { - /// Set and clear userspace-accessible signal bits on the object's peer. Wraps the - /// [zx_object_signal_peer](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/object_signal.md) - /// syscall. - fn signal_peer(&self, clear_mask: Signals, set_mask: Signals) -> Result<(), Status> { - let handle = self.as_handle_ref().handle; - let status = unsafe { - sys::zx_object_signal_peer(handle, clear_mask.bits(), set_mask.bits()) - }; - into_result(status, || ()) - } -} - -/// A trait implemented by all handles for objects which can have a cookie attached. -pub trait Cookied: HandleBased { - /// Get the cookie attached to this object, if any. Wraps the - /// [zx_object_get_cookie](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/object_get_cookie.md) - /// syscall. - fn get_cookie(&self, scope: &HandleRef) -> Result { - let handle = self.as_handle_ref().handle; - let mut cookie = 0; - let status = unsafe { sys::zx_object_get_cookie(handle, scope.handle, &mut cookie) }; - into_result(status, || cookie) - } - - /// Attach an opaque cookie to this object with the given scope. The cookie may be read or - /// changed in future only with the same scope. Wraps the - /// [zx_object_set_cookie](https://fuchsia.googlesource.com/zircon/+/HEAD/docs/syscalls/object_set_cookie.md) - /// syscall. - fn set_cookie(&self, scope: &HandleRef, cookie: u64) -> Result<(), Status> { - let handle = self.as_handle_ref().handle; - let status = unsafe { sys::zx_object_set_cookie(handle, scope.handle, cookie) }; - into_result(status, || ()) - } -} - -fn handle_drop(handle: sys::zx_handle_t) { - let _ = unsafe { sys::zx_handle_close(handle) }; -} - /// Wait on multiple handles. /// The success return value is a bool indicating whether one or more of the /// provided handle references was closed during the wait. @@ -634,113 +146,79 @@ fn handle_drop(handle: sys::zx_handle_t) { /// syscall. pub fn object_wait_many(items: &mut [WaitItem], deadline: Time) -> Result { - let len = try!(usize_into_u32(items.len()).map_err(|_| Status::ErrOutOfRange)); + let len = try!(usize_into_u32(items.len()).map_err(|_| Status::OUT_OF_RANGE)); let items_ptr = items.as_mut_ptr() as *mut sys::zx_wait_item_t; - let status = unsafe { sys::zx_object_wait_many( items_ptr, len, deadline) }; + let status = unsafe { sys::zx_object_wait_many( items_ptr, len, deadline.nanos()) }; if status == sys::ZX_ERR_CANCELED { - return Ok((true)) - } - into_result(status, || false) -} - -// An untyped handle - -/// An object representing a Zircon -/// [handle](https://fuchsia.googlesource.com/zircon/+/master/docs/handles.md). -/// -/// Internally, it is represented as a 32-bit integer, but this wrapper enforces -/// strict ownership semantics. The `Drop` implementation closes the handle. -/// -/// This type represents the most general reference to a kernel object, and can -/// be interconverted to and from more specific types. Those conversions are not -/// enforced in the type system; attempting to use them will result in errors -/// returned by the kernel. These conversions don't change the underlying -/// representation, but do change the type and thus what operations are available. -#[derive(Debug, Eq, PartialEq, Hash)] -pub struct Handle(sys::zx_handle_t); - -impl AsHandleRef for Handle { - fn as_handle_ref(&self) -> HandleRef { - HandleRef { handle: self.0, phantom: Default::default() } - } -} - -impl HandleBased for Handle {} - -impl Drop for Handle { - fn drop(&mut self) { - handle_drop(self.0) - } -} - -impl Handle { - /// If a raw handle is obtained from some other source, this method converts - /// it into a type-safe owned handle. - pub unsafe fn from_raw(raw: sys::zx_handle_t) -> Handle { - Handle(raw) - } - - pub fn replace(self, rights: Rights) -> Result { - let handle = self.0; - let mut out = 0; - let status = unsafe { sys::zx_handle_replace(handle, rights, &mut out) }; - into_result(status, || Handle(out)) + return Ok(true) } + ok(status).map(|()| false) } #[cfg(test)] mod tests { use super::*; + #[allow(unused_imports)] + use super::prelude::*; #[test] fn monotonic_time_increases() { - let time1 = time_get(ClockId::Monotonic); - nanosleep(deadline_after(1_000)); - let time2 = time_get(ClockId::Monotonic); + let time1 = Time::get(ClockId::Monotonic); + 1_000.nanos().sleep(); + let time2 = Time::get(ClockId::Monotonic); assert!(time2 > time1); } #[test] fn utc_time_increases() { - let time1 = time_get(ClockId::UTC); - nanosleep(deadline_after(1_000)); - let time2 = time_get(ClockId::UTC); + let time1 = Time::get(ClockId::UTC); + 1_000.nanos().sleep(); + let time2 = Time::get(ClockId::UTC); assert!(time2 > time1); } #[test] fn thread_time_increases() { - let time1 = time_get(ClockId::Thread); - nanosleep(deadline_after(1_000)); - let time2 = time_get(ClockId::Thread); + let time1 = Time::get(ClockId::Thread); + 1_000.nanos().sleep(); + let time2 = Time::get(ClockId::Thread); assert!(time2 > time1); } #[test] fn ticks_increases() { let ticks1 = ticks_get(); - nanosleep(deadline_after(1_000)); + 1_000.nanos().sleep(); let ticks2 = ticks_get(); assert!(ticks2 > ticks1); } #[test] fn tick_length() { - let sleep_ns = 1_000_000; // 1ms - let one_second_ns = 1_000_000_000; // 1 second in ns + let sleep_time = 1.milli(); let ticks1 = ticks_get(); - nanosleep(deadline_after(sleep_ns)); + sleep_time.sleep(); let ticks2 = ticks_get(); + // The number of ticks should have increased by at least 1 ms worth - assert!(ticks2 > ticks1 + sleep_ns * ticks_per_second() / one_second_ns); + let sleep_ticks = sleep_time.millis() * ticks_per_second() / 1000; + assert!(ticks2 >= (ticks1 + sleep_ticks)); + } + + #[test] + fn into_raw() { + let vmo = Vmo::create(1).unwrap(); + let h = vmo.into_raw(); + let vmo2 = Vmo::from(unsafe { Handle::from_raw(h) }); + assert!(vmo2.write(b"1", 0).is_ok()); } #[test] fn sleep() { - let sleep_ns = 1_000_000; // 1ms - let time1 = time_get(ClockId::Monotonic); - nanosleep(deadline_after(sleep_ns)); - let time2 = time_get(ClockId::Monotonic); + let sleep_ns = 1.millis(); + let time1 = Time::get(ClockId::Monotonic); + sleep_ns.sleep(); + let time2 = Time::get(ClockId::Monotonic); assert!(time2 > time1 + sleep_ns); } @@ -750,16 +228,16 @@ mod tests { let hello_length: usize = 5; // Create a VMO and write some data to it. - let vmo = Vmo::create(hello_length as u64, VmoOpts::Default).unwrap(); + let vmo = Vmo::create(hello_length as u64).unwrap(); assert!(vmo.write(b"hello", 0).is_ok()); // Replace, reducing rights to read. - let readonly_vmo = vmo.duplicate_handle(ZX_RIGHT_READ).unwrap(); + let readonly_vmo = vmo.duplicate_handle(Rights::READ).unwrap(); // Make sure we can read but not write. let mut read_vec = vec![0; hello_length]; assert_eq!(readonly_vmo.read(&mut read_vec, 0).unwrap(), hello_length); assert_eq!(read_vec, b"hello"); - assert_eq!(readonly_vmo.write(b"", 0), Err(Status::ErrAccessDenied)); + assert_eq!(readonly_vmo.write(b"", 0), Err(Status::ACCESS_DENIED)); // Write new data to the original handle, and read it from the new handle assert!(vmo.write(b"bye", 0).is_ok()); @@ -773,84 +251,84 @@ mod tests { let hello_length: usize = 5; // Create a VMO and write some data to it. - let vmo = Vmo::create(hello_length as u64, VmoOpts::Default).unwrap(); + let vmo = Vmo::create(hello_length as u64).unwrap(); assert!(vmo.write(b"hello", 0).is_ok()); // Replace, reducing rights to read. - let readonly_vmo = vmo.replace_handle(ZX_RIGHT_READ).unwrap(); + let readonly_vmo = vmo.replace_handle(Rights::READ).unwrap(); // Make sure we can read but not write. let mut read_vec = vec![0; hello_length]; assert_eq!(readonly_vmo.read(&mut read_vec, 0).unwrap(), hello_length); assert_eq!(read_vec, b"hello"); - assert_eq!(readonly_vmo.write(b"", 0), Err(Status::ErrAccessDenied)); + assert_eq!(readonly_vmo.write(b"", 0), Err(Status::ACCESS_DENIED)); } #[test] fn wait_and_signal() { - let event = Event::create(EventOpts::Default).unwrap(); - let ten_ms: Duration = 10_000_000; + let event = Event::create().unwrap(); + let ten_ms = 10.millis(); // Waiting on it without setting any signal should time out. assert_eq!(event.wait_handle( - ZX_USER_SIGNAL_0, deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + Signals::USER_0, ten_ms.after_now()), Err(Status::TIMED_OUT)); // If we set a signal, we should be able to wait for it. - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - assert_eq!(event.wait_handle(ZX_USER_SIGNAL_0, deadline_after(ten_ms)).unwrap(), - ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + assert_eq!(event.wait_handle(Signals::USER_0, ten_ms.after_now()).unwrap(), + Signals::USER_0); // Should still work, signals aren't automatically cleared. - assert_eq!(event.wait_handle(ZX_USER_SIGNAL_0, deadline_after(ten_ms)).unwrap(), - ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(event.wait_handle(Signals::USER_0, ten_ms.after_now()).unwrap(), + Signals::USER_0); // Now clear it, and waiting should time out again. - assert!(event.signal_handle(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); + assert!(event.signal_handle(Signals::USER_0, Signals::NONE).is_ok()); assert_eq!(event.wait_handle( - ZX_USER_SIGNAL_0, deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + Signals::USER_0, ten_ms.after_now()), Err(Status::TIMED_OUT)); } #[test] fn wait_many_and_signal() { - let ten_ms: Duration = 10_000_000; - let e1 = Event::create(EventOpts::Default).unwrap(); - let e2 = Event::create(EventOpts::Default).unwrap(); + let ten_ms = 10.millis(); + let e1 = Event::create().unwrap(); + let e2 = Event::create().unwrap(); // Waiting on them now should time out. let mut items = vec![ - WaitItem { handle: e1.as_handle_ref(), waitfor: ZX_USER_SIGNAL_0, pending: ZX_SIGNAL_NONE }, - WaitItem { handle: e2.as_handle_ref(), waitfor: ZX_USER_SIGNAL_1, pending: ZX_SIGNAL_NONE }, + WaitItem { handle: e1.as_handle_ref(), waitfor: Signals::USER_0, pending: Signals::NONE }, + WaitItem { handle: e2.as_handle_ref(), waitfor: Signals::USER_1, pending: Signals::NONE }, ]; - assert_eq!(object_wait_many(&mut items, deadline_after(ten_ms)), Err(Status::ErrTimedOut)); - assert_eq!(items[0].pending, ZX_SIGNAL_LAST_HANDLE); - assert_eq!(items[1].pending, ZX_SIGNAL_LAST_HANDLE); + assert_eq!(object_wait_many(&mut items, ten_ms.after_now()), Err(Status::TIMED_OUT)); + assert_eq!(items[0].pending, Signals::NONE); + assert_eq!(items[1].pending, Signals::NONE); // Signal one object and it should return success. - assert!(e1.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - assert!(object_wait_many(&mut items, deadline_after(ten_ms)).is_ok()); - assert_eq!(items[0].pending, ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); - assert_eq!(items[1].pending, ZX_SIGNAL_LAST_HANDLE); + assert!(e1.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + assert!(object_wait_many(&mut items, ten_ms.after_now()).is_ok()); + assert_eq!(items[0].pending, Signals::USER_0); + assert_eq!(items[1].pending, Signals::NONE); // Signal the other and it should return both. - assert!(e2.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_1).is_ok()); - assert!(object_wait_many(&mut items, deadline_after(ten_ms)).is_ok()); - assert_eq!(items[0].pending, ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); - assert_eq!(items[1].pending, ZX_USER_SIGNAL_1 | ZX_SIGNAL_LAST_HANDLE); + assert!(e2.signal_handle(Signals::NONE, Signals::USER_1).is_ok()); + assert!(object_wait_many(&mut items, ten_ms.after_now()).is_ok()); + assert_eq!(items[0].pending, Signals::USER_0); + assert_eq!(items[1].pending, Signals::USER_1); // Clear signals on both; now it should time out again. - assert!(e1.signal_handle(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); - assert!(e2.signal_handle(ZX_USER_SIGNAL_1, ZX_SIGNAL_NONE).is_ok()); - assert_eq!(object_wait_many(&mut items, deadline_after(ten_ms)), Err(Status::ErrTimedOut)); - assert_eq!(items[0].pending, ZX_SIGNAL_LAST_HANDLE); - assert_eq!(items[1].pending, ZX_SIGNAL_LAST_HANDLE); + assert!(e1.signal_handle(Signals::USER_0, Signals::NONE).is_ok()); + assert!(e2.signal_handle(Signals::USER_1, Signals::NONE).is_ok()); + assert_eq!(object_wait_many(&mut items, ten_ms.after_now()), Err(Status::TIMED_OUT)); + assert_eq!(items[0].pending, Signals::NONE); + assert_eq!(items[1].pending, Signals::NONE); } #[test] fn cookies() { - let event = Event::create(EventOpts::Default).unwrap(); - let scope = Event::create(EventOpts::Default).unwrap(); + let event = Event::create().unwrap(); + let scope = Event::create().unwrap(); // Getting a cookie when none has been set should fail. - assert_eq!(event.get_cookie(&scope.as_handle_ref()), Err(Status::ErrAccessDenied)); + assert_eq!(event.get_cookie(&scope.as_handle_ref()), Err(Status::ACCESS_DENIED)); // Set a cookie. assert_eq!(event.set_cookie(&scope.as_handle_ref(), 42), Ok(())); @@ -859,37 +337,13 @@ mod tests { assert_eq!(event.get_cookie(&scope.as_handle_ref()), Ok(42)); // but not with the wrong scope! - assert_eq!(event.get_cookie(&event.as_handle_ref()), Err(Status::ErrAccessDenied)); + assert_eq!(event.get_cookie(&event.as_handle_ref()), Err(Status::ACCESS_DENIED)); // Can change it, with the same scope... assert_eq!(event.set_cookie(&scope.as_handle_ref(), 123), Ok(())); // but not with a different scope. - assert_eq!(event.set_cookie(&event.as_handle_ref(), 123), Err(Status::ErrAccessDenied)); - } - - #[test] - fn cprng() { - let mut buffer = [0; 20]; - assert_eq!(cprng_draw(&mut buffer), Ok(20)); - assert_ne!(buffer[0], 0); - assert_ne!(buffer[19], 0); - } - - #[test] - fn cprng_too_large() { - let mut buffer = [0; ZX_CPRNG_DRAW_MAX_LEN + 1]; - assert_eq!(cprng_draw(&mut buffer), Err(Status::ErrInvalidArgs)); - - for mut s in buffer.chunks_mut(ZX_CPRNG_DRAW_MAX_LEN) { - assert_eq!(cprng_draw(&mut s), Ok(s.len())); - } - } - - #[test] - fn cprng_add() { - let buffer = [0, 1, 2]; - assert_eq!(cprng_add_entropy(&buffer), Ok(())); + assert_eq!(event.set_cookie(&event.as_handle_ref(), 123), Err(Status::ACCESS_DENIED)); } } diff --git a/third_party/rust/fuchsia-zircon/src/port.rs b/third_party/rust/fuchsia-zircon/src/port.rs index 74e3ea69b8c3..6a9e8a8f7fbf 100644 --- a/third_party/rust/fuchsia-zircon/src/port.rs +++ b/third_party/rust/fuchsia-zircon/src/port.rs @@ -7,7 +7,7 @@ use std::mem; use {AsHandleRef, HandleBased, Handle, HandleRef, Signals, Status, Time}; -use {sys, into_result}; +use {sys, ok}; /// An object representing a Zircon /// [port](https://fuchsia.googlesource.com/zircon/+/master/docs/objects/port.md). @@ -18,7 +18,7 @@ pub struct Port(Handle); impl_handle_based!(Port); /// A packet sent through a port. This is a type-safe wrapper for -/// [zx_port_packet_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait2.md). +/// [zx_port_packet_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait.md). #[derive(PartialEq, Eq, Debug)] pub struct Packet(sys::zx_port_packet_t); @@ -31,15 +31,18 @@ pub enum PacketContents { SignalOne(SignalPacket), /// A repeating signal packet generated via `object_wait_async`. SignalRep(SignalPacket), + + #[doc(hidden)] + __Nonexhaustive } /// Contents of a user packet (one sent by `port_queue`). This is a type-safe wrapper for -/// [zx_packet_user_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait2.md). +/// [zx_packet_user_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait.md). #[derive(Debug, Copy, Clone)] pub struct UserPacket(sys::zx_packet_user_t); /// Contents of a signal packet (one generated by the kernel). This is a type-safe wrapper for -/// [zx_packet_signal_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait2.md). +/// [zx_packet_signal_t](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait.md). #[derive(Debug, Copy, Clone)] pub struct SignalPacket(sys::zx_packet_signal_t); @@ -98,12 +101,12 @@ impl UserPacket { impl SignalPacket { /// The signals used in the call to `object_wait_async`. pub fn trigger(&self) -> Signals { - self.0.trigger + Signals::from_bits_truncate(self.0.trigger) } /// The observed signals. pub fn observed(&self) -> Signals { - self.0.observed + Signals::from_bits_truncate(self.0.observed) } /// A per object count of pending operations. @@ -118,11 +121,13 @@ impl Port { /// Wraps the /// [zx_port_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_create.md) /// syscall. - pub fn create(opts: PortOpts) -> Result { + pub fn create() -> Result { unsafe { let mut handle = 0; - let status = sys::zx_port_create(opts as u32, &mut handle); - into_result(status, || Self::from(Handle(handle))) + let opts = 0; + let status = sys::zx_port_create(opts, &mut handle); + ok(status)?; + Ok(Handle::from_raw(handle).into()) } } @@ -134,23 +139,24 @@ impl Port { pub fn queue(&self, packet: &Packet) -> Result<(), Status> { let status = unsafe { sys::zx_port_queue(self.raw_handle(), - &packet.0 as *const sys::zx_port_packet_t as *const u8, 0) + &packet.0 as *const sys::zx_port_packet_t, 0) }; - into_result(status, || ()) + ok(status) } /// Wait for a packet to arrive on a (V2) port. /// /// Wraps the - /// [zx_port_wait](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait2.md) + /// [zx_port_wait](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/port_wait.md) /// syscall. pub fn wait(&self, deadline: Time) -> Result { let mut packet = Default::default(); let status = unsafe { - sys::zx_port_wait(self.raw_handle(), deadline, - &mut packet as *mut sys::zx_port_packet_t as *mut u8, 0) + sys::zx_port_wait(self.raw_handle(), deadline.nanos(), + &mut packet as *mut sys::zx_port_packet_t, 0) }; - into_result(status, || Packet(packet)) + ok(status)?; + Ok(Packet(packet)) } /// Cancel pending wait_async calls for an object with the given key. @@ -162,21 +168,7 @@ impl Port { let status = unsafe { sys::zx_port_cancel(self.raw_handle(), source.raw_handle(), key) }; - into_result(status, || ()) - } -} - -/// Options for creating a port. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum PortOpts { - /// Default options. - Default = 0, -} - -impl Default for PortOpts { - fn default() -> Self { - PortOpts::Default + ok(status) } } @@ -191,18 +183,16 @@ pub enum WaitAsyncOpts { #[cfg(test)] mod tests { use super::*; - use {Duration, Event, EventOpts}; - use {ZX_SIGNAL_LAST_HANDLE, ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0, ZX_USER_SIGNAL_1}; - use deadline_after; + use {DurationNum, Event}; #[test] fn port_basic() { - let ten_ms: Duration = 10_000_000; + let ten_ms = 10.millis(); - let port = Port::create(PortOpts::Default).unwrap(); + let port = Port::create().unwrap(); // Waiting now should time out. - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // Send a valid packet. let packet = Packet::from_user_packet( @@ -213,50 +203,50 @@ mod tests { assert!(port.queue(&packet).is_ok()); // Waiting should succeed this time. We should get back the packet we sent. - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet, packet); } #[test] fn wait_async_once() { - let ten_ms: Duration = 10_000_000; + let ten_ms = 10.millis(); let key = 42; - let port = Port::create(PortOpts::Default).unwrap(); - let event = Event::create(EventOpts::Default).unwrap(); + let port = Port::create().unwrap(); + let event = Event::create().unwrap(); - assert!(event.wait_async_handle(&port, key, ZX_USER_SIGNAL_0 | ZX_USER_SIGNAL_1, + assert!(event.wait_async_handle(&port, key, Signals::USER_0 | Signals::USER_1, WaitAsyncOpts::Once).is_ok()); // Waiting without setting any signal should time out. - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // If we set a signal, we should be able to wait for it. - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet.key(), key); assert_eq!(read_packet.status(), 0); match read_packet.contents() { PacketContents::SignalOne(sig) => { - assert_eq!(sig.trigger(), ZX_USER_SIGNAL_0 | ZX_USER_SIGNAL_1); - assert_eq!(sig.observed(), ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(sig.trigger(), Signals::USER_0 | Signals::USER_1); + assert_eq!(sig.observed(), Signals::USER_0); assert_eq!(sig.count(), 1); } _ => panic!("wrong packet type"), } // Shouldn't get any more packets. - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // Calling wait_async again should result in another packet. - assert!(event.wait_async_handle(&port, key, ZX_USER_SIGNAL_0, WaitAsyncOpts::Once).is_ok()); - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + assert!(event.wait_async_handle(&port, key, Signals::USER_0, WaitAsyncOpts::Once).is_ok()); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet.key(), key); assert_eq!(read_packet.status(), 0); match read_packet.contents() { PacketContents::SignalOne(sig) => { - assert_eq!(sig.trigger(), ZX_USER_SIGNAL_0); - assert_eq!(sig.observed(), ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(sig.trigger(), Signals::USER_0); + assert_eq!(sig.observed(), Signals::USER_0); assert_eq!(sig.count(), 1); } _ => panic!("wrong packet type"), @@ -264,41 +254,41 @@ mod tests { // Calling wait_async_handle then cancel, we should not get a packet as cancel will // remove it from the queue. - assert!(event.wait_async_handle(&port, key, ZX_USER_SIGNAL_0, WaitAsyncOpts::Once).is_ok()); + assert!(event.wait_async_handle(&port, key, Signals::USER_0, WaitAsyncOpts::Once).is_ok()); assert!(port.cancel(&event, key).is_ok()); - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // If the event is signalled after the cancel, we also shouldn't get a packet. - assert!(event.signal_handle(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); // clear signal - assert!(event.wait_async_handle(&port, key, ZX_USER_SIGNAL_0, WaitAsyncOpts::Once).is_ok()); + assert!(event.signal_handle(Signals::USER_0, Signals::NONE).is_ok()); // clear signal + assert!(event.wait_async_handle(&port, key, Signals::USER_0, WaitAsyncOpts::Once).is_ok()); assert!(port.cancel(&event, key).is_ok()); - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); } #[test] fn wait_async_repeating() { - let ten_ms: Duration = 10_000_000; + let ten_ms = 10.millis(); let key = 42; - let port = Port::create(PortOpts::Default).unwrap(); - let event = Event::create(EventOpts::Default).unwrap(); + let port = Port::create().unwrap(); + let event = Event::create().unwrap(); - assert!(event.wait_async_handle(&port, key, ZX_USER_SIGNAL_0 | ZX_USER_SIGNAL_1, + assert!(event.wait_async_handle(&port, key, Signals::USER_0 | Signals::USER_1, WaitAsyncOpts::Repeating).is_ok()); // Waiting without setting any signal should time out. - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // If we set a signal, we should be able to wait for it. - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet.key(), key); assert_eq!(read_packet.status(), 0); match read_packet.contents() { PacketContents::SignalRep(sig) => { - assert_eq!(sig.trigger(), ZX_USER_SIGNAL_0 | ZX_USER_SIGNAL_1); - assert_eq!(sig.observed(), ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(sig.trigger(), Signals::USER_0 | Signals::USER_1); + assert_eq!(sig.observed(), Signals::USER_0); assert_eq!(sig.count(), 1); } _ => panic!("wrong packet type"), @@ -306,19 +296,19 @@ mod tests { // Should not get any more packets, as ZX_WAIT_ASYNC_REPEATING is edge triggered rather than // level triggered. - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // If we clear and resignal, we should get the same packet again, // even though we didn't call event.wait_async again. - assert!(event.signal_handle(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); // clear signal - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + assert!(event.signal_handle(Signals::USER_0, Signals::NONE).is_ok()); // clear signal + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet.key(), key); assert_eq!(read_packet.status(), 0); match read_packet.contents() { PacketContents::SignalRep(sig) => { - assert_eq!(sig.trigger(), ZX_USER_SIGNAL_0 | ZX_USER_SIGNAL_1); - assert_eq!(sig.observed(), ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(sig.trigger(), Signals::USER_0 | Signals::USER_1); + assert_eq!(sig.observed(), Signals::USER_0); assert_eq!(sig.count(), 1); } _ => panic!("wrong packet type"), @@ -326,22 +316,22 @@ mod tests { // Cancelling the wait should stop us getting packets... assert!(port.cancel(&event, key).is_ok()); - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // ... even if we clear and resignal - assert!(event.signal_handle(ZX_USER_SIGNAL_0, ZX_SIGNAL_NONE).is_ok()); // clear signal - assert!(event.signal_handle(ZX_SIGNAL_NONE, ZX_USER_SIGNAL_0).is_ok()); - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert!(event.signal_handle(Signals::USER_0, Signals::NONE).is_ok()); // clear signal + assert!(event.signal_handle(Signals::NONE, Signals::USER_0).is_ok()); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); // Calling wait_async again should result in another packet. assert!(event.wait_async_handle( - &port, key, ZX_USER_SIGNAL_0, WaitAsyncOpts::Repeating).is_ok()); - let read_packet = port.wait(deadline_after(ten_ms)).unwrap(); + &port, key, Signals::USER_0, WaitAsyncOpts::Repeating).is_ok()); + let read_packet = port.wait(ten_ms.after_now()).unwrap(); assert_eq!(read_packet.key(), key); assert_eq!(read_packet.status(), 0); match read_packet.contents() { PacketContents::SignalRep(sig) => { - assert_eq!(sig.trigger(), ZX_USER_SIGNAL_0); - assert_eq!(sig.observed(), ZX_USER_SIGNAL_0 | ZX_SIGNAL_LAST_HANDLE); + assert_eq!(sig.trigger(), Signals::USER_0); + assert_eq!(sig.observed(), Signals::USER_0); assert_eq!(sig.count(), 1); } _ => panic!("wrong packet type"), @@ -349,6 +339,6 @@ mod tests { // Closing the handle should stop us getting packets. drop(event); - assert_eq!(port.wait(deadline_after(ten_ms)), Err(Status::ErrTimedOut)); + assert_eq!(port.wait(ten_ms.after_now()), Err(Status::TIMED_OUT)); } } diff --git a/third_party/rust/fuchsia-zircon/src/rights.rs b/third_party/rust/fuchsia-zircon/src/rights.rs new file mode 100644 index 000000000000..a41ad12f5438 --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/rights.rs @@ -0,0 +1,28 @@ +use sys; + +bitflags! { + /// Rights associated with a handle. + /// + /// See [rights.md](https://fuchsia.googlesource.com/zircon/+/master/docs/rights.md) + /// for more information. + #[repr(C)] + pub struct Rights: sys::zx_rights_t { + const NONE = sys::ZX_RIGHT_NONE; + const DUPLICATE = sys::ZX_RIGHT_DUPLICATE; + const TRANSFER = sys::ZX_RIGHT_TRANSFER; + const READ = sys::ZX_RIGHT_READ; + const WRITE = sys::ZX_RIGHT_WRITE; + const EXECUTE = sys::ZX_RIGHT_EXECUTE; + const MAP = sys::ZX_RIGHT_MAP; + const GET_PROPERTY = sys::ZX_RIGHT_GET_PROPERTY; + const SET_PROPERTY = sys::ZX_RIGHT_SET_PROPERTY; + const ENUMERATE = sys::ZX_RIGHT_ENUMERATE; + const DESTROY = sys::ZX_RIGHT_DESTROY; + const SET_POLICY = sys::ZX_RIGHT_SET_POLICY; + const GET_POLICY = sys::ZX_RIGHT_GET_POLICY; + const SIGNAL = sys::ZX_RIGHT_SIGNAL; + const SIGNAL_PEER = sys::ZX_RIGHT_SIGNAL_PEER; + const WAIT = sys::ZX_RIGHT_WAIT; + const SAME_RIGHTS = sys::ZX_RIGHT_SAME_RIGHTS; + } +} \ No newline at end of file diff --git a/third_party/rust/fuchsia-zircon/src/signals.rs b/third_party/rust/fuchsia-zircon/src/signals.rs new file mode 100644 index 000000000000..e5189f5ebcd2 --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/signals.rs @@ -0,0 +1,105 @@ +use sys::*; + +bitflags! { + /// Signals that can be waited upon. + /// + /// See + /// [Objects and signals](https://fuchsia.googlesource.com/zircon/+/master/docs/concepts.md#Objects-and-Signals) + /// in the Zircon kernel documentation. Note: the names of signals are still in flux. + #[repr(C)] + pub struct Signals: zx_signals_t { + const NONE = ZX_SIGNAL_NONE; + const OBJECT_ALL = ZX_OBJECT_SIGNAL_ALL; + const USER_ALL = ZX_USER_SIGNAL_ALL; + const OBJECT_0 = ZX_OBJECT_SIGNAL_0; + const OBJECT_1 = ZX_OBJECT_SIGNAL_1; + const OBJECT_2 = ZX_OBJECT_SIGNAL_2; + const OBJECT_3 = ZX_OBJECT_SIGNAL_3; + const OBJECT_4 = ZX_OBJECT_SIGNAL_4; + const OBJECT_5 = ZX_OBJECT_SIGNAL_5; + const OBJECT_6 = ZX_OBJECT_SIGNAL_6; + const OBJECT_7 = ZX_OBJECT_SIGNAL_7; + const OBJECT_8 = ZX_OBJECT_SIGNAL_8; + const OBJECT_9 = ZX_OBJECT_SIGNAL_9; + const OBJECT_10 = ZX_OBJECT_SIGNAL_10; + const OBJECT_11 = ZX_OBJECT_SIGNAL_11; + const OBJECT_12 = ZX_OBJECT_SIGNAL_12; + const OBJECT_13 = ZX_OBJECT_SIGNAL_13; + const OBJECT_14 = ZX_OBJECT_SIGNAL_14; + const OBJECT_15 = ZX_OBJECT_SIGNAL_15; + const OBJECT_16 = ZX_OBJECT_SIGNAL_16; + const OBJECT_17 = ZX_OBJECT_SIGNAL_17; + const OBJECT_18 = ZX_OBJECT_SIGNAL_18; + const OBJECT_19 = ZX_OBJECT_SIGNAL_19; + const OBJECT_20 = ZX_OBJECT_SIGNAL_20; + const OBJECT_21 = ZX_OBJECT_SIGNAL_21; + const OBJECT_22 = ZX_OBJECT_SIGNAL_22; + const OBJECT_HANDLE_CLOSED = ZX_OBJECT_HANDLE_CLOSED; + const USER_0 = ZX_USER_SIGNAL_0; + const USER_1 = ZX_USER_SIGNAL_1; + const USER_2 = ZX_USER_SIGNAL_2; + const USER_3 = ZX_USER_SIGNAL_3; + const USER_4 = ZX_USER_SIGNAL_4; + const USER_5 = ZX_USER_SIGNAL_5; + const USER_6 = ZX_USER_SIGNAL_6; + const USER_7 = ZX_USER_SIGNAL_7; + + const OBJECT_READABLE = ZX_OBJECT_READABLE; + const OBJECT_WRITABLE = ZX_OBJECT_WRITABLE; + const OBJECT_PEER_CLOSED = ZX_OBJECT_PEER_CLOSED; + + // Cancelation (handle was closed while waiting with it) + const HANDLE_CLOSED = ZX_SIGNAL_HANDLE_CLOSED; + + // Event + const EVENT_SIGNALED = ZX_EVENT_SIGNALED; + + // EventPair + const EVENT_PAIR_SIGNALED = ZX_EPAIR_SIGNALED; + const EVENT_PAIR_CLOSED = ZX_EPAIR_CLOSED; + + // Task signals (process, thread, job) + const TASK_TERMINATED = ZX_TASK_TERMINATED; + + // Channel + const CHANNEL_READABLE = ZX_CHANNEL_READABLE; + const CHANNEL_WRITABLE = ZX_CHANNEL_WRITABLE; + const CHANNEL_PEER_CLOSED = ZX_CHANNEL_PEER_CLOSED; + + // Socket + const SOCKET_READABLE = ZX_SOCKET_READABLE; + const SOCKET_WRITABLE = ZX_SOCKET_WRITABLE; + const SOCKET_PEER_CLOSED = ZX_SOCKET_PEER_CLOSED; + + // Port + const PORT_READABLE = ZX_PORT_READABLE; + + // Resource + const RESOURCE_DESTROYED = ZX_RESOURCE_DESTROYED; + const RESOURCE_READABLE = ZX_RESOURCE_READABLE; + const RESOURCE_WRITABLE = ZX_RESOURCE_WRITABLE; + const RESOURCE_CHILD_ADDED = ZX_RESOURCE_CHILD_ADDED; + + // Fifo + const FIFO_READABLE = ZX_FIFO_READABLE; + const FIFO_WRITABLE = ZX_FIFO_WRITABLE; + const FIFO_PEER_CLOSED = ZX_FIFO_PEER_CLOSED; + + // Job + const JOB_NO_PROCESSES = ZX_JOB_NO_PROCESSES; + const JOB_NO_JOBS = ZX_JOB_NO_JOBS; + + // Process + const PROCESS_TERMINATED = ZX_PROCESS_TERMINATED; + + // Thread + const THREAD_TERMINATED = ZX_THREAD_TERMINATED; + + // Log + const LOG_READABLE = ZX_LOG_READABLE; + const LOG_WRITABLE = ZX_LOG_WRITABLE; + + // Timer + const TIMER_SIGNALED = ZX_TIMER_SIGNALED; + } +} \ No newline at end of file diff --git a/third_party/rust/fuchsia-zircon/src/socket.rs b/third_party/rust/fuchsia-zircon/src/socket.rs index 92a5693645cf..c93e98cb73bb 100644 --- a/third_party/rust/fuchsia-zircon/src/socket.rs +++ b/third_party/rust/fuchsia-zircon/src/socket.rs @@ -5,7 +5,7 @@ //! Type-safe bindings for Zircon sockets. use {AsHandleRef, HandleBased, Handle, HandleRef, Peered}; -use {sys, Status, into_result}; +use {sys, Status, ok}; use std::ptr; @@ -18,63 +18,23 @@ pub struct Socket(Handle); impl_handle_based!(Socket); impl Peered for Socket {} -/// Options for creating a socket pair. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum SocketOpts { - /// Default options. - Default = 0, -} - -impl Default for SocketOpts { - fn default() -> Self { - SocketOpts::Default - } -} - -/// Options for writing into a socket. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum SocketWriteOpts { - /// Default options. - Default = 0, -} - -impl Default for SocketWriteOpts { - fn default() -> Self { - SocketWriteOpts::Default - } -} - -/// Options for reading from a socket. -#[repr(u32)] -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum SocketReadOpts { - /// Default options. - Default = 0, -} - -impl Default for SocketReadOpts { - fn default() -> Self { - SocketReadOpts::Default - } -} - - impl Socket { /// Create a socket, accessed through a pair of endpoints. Data written /// into one may be read from the other. /// /// Wraps /// [zx_socket_create](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/socket_create.md). - pub fn create(opts: SocketOpts) -> Result<(Socket, Socket), Status> { + pub fn create() -> Result<(Socket, Socket), Status> { unsafe { let mut out0 = 0; let mut out1 = 0; - let status = sys::zx_socket_create(opts as u32, &mut out0, &mut out1); - into_result(status, || - (Self::from(Handle(out0)), - Self::from(Handle(out1)))) + let opts = 0; + let status = sys::zx_socket_create(opts, &mut out0, &mut out1); + ok(status)?; + Ok(( + Self::from(Handle::from_raw(out0)), + Self::from(Handle::from_raw(out1)) + )) } } @@ -83,13 +43,14 @@ impl Socket { /// /// Wraps /// [zx_socket_write](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/socket_write.md). - pub fn write(&self, opts: SocketWriteOpts, bytes: &[u8]) -> Result { + pub fn write(&self, bytes: &[u8]) -> Result { let mut actual = 0; + let opts = 0; let status = unsafe { - sys::zx_socket_write(self.raw_handle(), opts as u32, bytes.as_ptr(), bytes.len(), + sys::zx_socket_write(self.raw_handle(), opts, bytes.as_ptr(), bytes.len(), &mut actual) }; - into_result(status, || actual) + ok(status).map(|()| actual) } /// Read the given bytes from the socket. @@ -97,18 +58,21 @@ impl Socket { /// /// Wraps /// [zx_socket_read](https://fuchsia.googlesource.com/zircon/+/master/docs/syscalls/socket_read.md). - pub fn read(&self, opts: SocketReadOpts, bytes: &mut [u8]) -> Result { + pub fn read(&self, bytes: &mut [u8]) -> Result { let mut actual = 0; + let opts = 0; let status = unsafe { - sys::zx_socket_read(self.raw_handle(), opts as u32, bytes.as_mut_ptr(), + sys::zx_socket_read(self.raw_handle(), opts, bytes.as_mut_ptr(), bytes.len(), &mut actual) }; - if status != sys::ZX_OK { - // If an error is returned then actual is undefined, so to be safe we set it to 0 and - // ignore any data that is set in bytes. - actual = 0; - } - into_result(status, || actual) + ok(status) + .map(|()| actual) + .map_err(|status| { + // If an error is returned then actual is undefined, so to be safe + // we set it to 0 and ignore any data that is set in bytes. + actual = 0; + status + }) } /// Close half of the socket, so attempts by the other side to write will fail. @@ -118,7 +82,7 @@ impl Socket { pub fn half_close(&self) -> Result<(), Status> { let status = unsafe { sys::zx_socket_write(self.raw_handle(), sys::ZX_SOCKET_HALF_CLOSE, ptr::null(), 0, ptr::null_mut()) }; - into_result(status, || ()) + ok(status) } pub fn outstanding_read_bytes(&self) -> Result { @@ -126,7 +90,7 @@ impl Socket { let status = unsafe { sys::zx_socket_read(self.raw_handle(), 0, ptr::null_mut(), 0, &mut outstanding) }; - into_result(status, || outstanding) + ok(status).map(|()| outstanding) } } @@ -136,27 +100,27 @@ mod tests { #[test] fn socket_basic() { - let (s1, s2) = Socket::create(SocketOpts::Default).unwrap(); + let (s1, s2) = Socket::create().unwrap(); // Write in one end and read it back out the other. - assert_eq!(s1.write(SocketWriteOpts::Default, b"hello").unwrap(), 5); + assert_eq!(s1.write(b"hello").unwrap(), 5); let mut read_vec = vec![0; 8]; - assert_eq!(s2.read(SocketReadOpts::Default, &mut read_vec).unwrap(), 5); + assert_eq!(s2.read(&mut read_vec).unwrap(), 5); assert_eq!(&read_vec[0..5], b"hello"); // Try reading when there is nothing to read. - assert_eq!(s2.read(SocketReadOpts::Default, &mut read_vec), Err(Status::ErrShouldWait)); + assert_eq!(s2.read(&mut read_vec), Err(Status::SHOULD_WAIT)); // Close the socket from one end. assert!(s1.half_close().is_ok()); - assert_eq!(s2.read(SocketReadOpts::Default, &mut read_vec), Err(Status::ErrBadState)); - assert_eq!(s1.write(SocketWriteOpts::Default, b"fail"), Err(Status::ErrBadState)); + assert_eq!(s2.read(&mut read_vec), Err(Status::BAD_STATE)); + assert_eq!(s1.write(b"fail"), Err(Status::BAD_STATE)); // Writing in the other direction should still work. - assert_eq!(s1.read(SocketReadOpts::Default, &mut read_vec), Err(Status::ErrShouldWait)); - assert_eq!(s2.write(SocketWriteOpts::Default, b"back").unwrap(), 4); - assert_eq!(s1.read(SocketReadOpts::Default, &mut read_vec).unwrap(), 4); + assert_eq!(s1.read(&mut read_vec), Err(Status::SHOULD_WAIT)); + assert_eq!(s2.write(b"back").unwrap(), 4); + assert_eq!(s1.read(&mut read_vec).unwrap(), 4); assert_eq!(&read_vec[0..4], b"back"); } } diff --git a/third_party/rust/fuchsia-zircon/src/status.rs b/third_party/rust/fuchsia-zircon/src/status.rs new file mode 100644 index 000000000000..4f3e38f988af --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/status.rs @@ -0,0 +1,162 @@ +use std::ffi::NulError; +use std::io; +use sys; + +/// Status type indicating the result of a Fuchsia syscall. +/// +/// This type is generally used to indicate the reason for an error. +/// While this type can contain `Status::OK` (`ZX_OK` in C land), elements of this type are +/// generally constructed using the `ok` method, which checks for `ZX_OK` and returns a +/// `Result<(), Status>` appropriately. +#[repr(C)] +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct Status(sys::zx_status_t); +impl Status { + /// Returns `Ok(())` if the status was `OK`, + /// otherwise returns `Err(status)`. + pub fn ok(raw: sys::zx_status_t) -> Result<(), Status> { + if raw == Status::OK.0 { + Ok(()) + } else { + Err(Status(raw)) + } + } + + pub fn from_raw(raw: sys::zx_status_t) -> Self { + Status(raw) + } + + pub fn into_raw(self) -> sys::zx_status_t { + self.0 + } +} +assoc_consts!(Status, [ + OK = sys::ZX_OK; + INTERNAL = sys::ZX_ERR_INTERNAL; + NOT_SUPPORTED = sys::ZX_ERR_NOT_SUPPORTED; + NO_RESOURCES = sys::ZX_ERR_NO_RESOURCES; + NO_MEMORY = sys::ZX_ERR_NO_MEMORY; + CALL_FAILED = sys::ZX_ERR_CALL_FAILED; + INTERRUPTED_RETRY = sys::ZX_ERR_INTERRUPTED_RETRY; + INVALID_ARGS = sys::ZX_ERR_INVALID_ARGS; + BAD_HANDLE = sys::ZX_ERR_BAD_HANDLE; + WRONG_TYPE = sys::ZX_ERR_WRONG_TYPE; + BAD_SYSCALL = sys::ZX_ERR_BAD_SYSCALL; + OUT_OF_RANGE = sys::ZX_ERR_OUT_OF_RANGE; + BUFFER_TOO_SMALL = sys::ZX_ERR_BUFFER_TOO_SMALL; + BAD_STATE = sys::ZX_ERR_BAD_STATE; + TIMED_OUT = sys::ZX_ERR_TIMED_OUT; + SHOULD_WAIT = sys::ZX_ERR_SHOULD_WAIT; + CANCELED = sys::ZX_ERR_CANCELED; + PEER_CLOSED = sys::ZX_ERR_PEER_CLOSED; + NOT_FOUND = sys::ZX_ERR_NOT_FOUND; + ALREADY_EXISTS = sys::ZX_ERR_ALREADY_EXISTS; + ALREADY_BOUND = sys::ZX_ERR_ALREADY_BOUND; + UNAVAILABLE = sys::ZX_ERR_UNAVAILABLE; + ACCESS_DENIED = sys::ZX_ERR_ACCESS_DENIED; + IO = sys::ZX_ERR_IO; + IO_REFUSED = sys::ZX_ERR_IO_REFUSED; + IO_DATA_INTEGRITY = sys::ZX_ERR_IO_DATA_INTEGRITY; + IO_DATA_LOSS = sys::ZX_ERR_IO_DATA_LOSS; + BAD_PATH = sys::ZX_ERR_BAD_PATH; + NOT_DIR = sys::ZX_ERR_NOT_DIR; + NOT_FILE = sys::ZX_ERR_NOT_FILE; + FILE_BIG = sys::ZX_ERR_FILE_BIG; + NO_SPACE = sys::ZX_ERR_NO_SPACE; + STOP = sys::ZX_ERR_STOP; + NEXT = sys::ZX_ERR_NEXT; +]); + +impl Status { + pub fn into_io_error(self) -> io::Error { + self.into() + } +} + +impl From for Status { + fn from(kind: io::ErrorKind) -> Self { + use std::io::ErrorKind::*; + match kind { + NotFound => Status::NOT_FOUND, + PermissionDenied => Status::ACCESS_DENIED, + ConnectionRefused => Status::IO_REFUSED, + ConnectionAborted => Status::PEER_CLOSED, + AddrInUse => Status::ALREADY_BOUND, + AddrNotAvailable => Status::UNAVAILABLE, + BrokenPipe => Status::PEER_CLOSED, + AlreadyExists => Status::ALREADY_EXISTS, + WouldBlock => Status::SHOULD_WAIT, + InvalidInput => Status::INVALID_ARGS, + TimedOut => Status::TIMED_OUT, + Interrupted => Status::INTERRUPTED_RETRY, + UnexpectedEof | + WriteZero | + ConnectionReset | + NotConnected | + Other | _ => Status::IO, + } + } +} + +impl From for io::ErrorKind { + fn from(status: Status) -> io::ErrorKind { + use std::io::ErrorKind::*; + match status { + Status::INTERRUPTED_RETRY => Interrupted, + Status::BAD_HANDLE => BrokenPipe, + Status::TIMED_OUT => TimedOut, + Status::SHOULD_WAIT => WouldBlock, + Status::PEER_CLOSED => ConnectionAborted, + Status::NOT_FOUND => NotFound, + Status::ALREADY_EXISTS => AlreadyExists, + Status::ALREADY_BOUND => AlreadyExists, + Status::UNAVAILABLE => AddrNotAvailable, + Status::ACCESS_DENIED => PermissionDenied, + Status::IO_REFUSED => ConnectionRefused, + Status::IO_DATA_INTEGRITY => InvalidData, + + Status::BAD_PATH | + Status::INVALID_ARGS | + Status::OUT_OF_RANGE | + Status::WRONG_TYPE => InvalidInput, + + Status::OK | + Status::NEXT | + Status::STOP | + Status::NO_SPACE | + Status::FILE_BIG | + Status::NOT_FILE | + Status::NOT_DIR | + Status::IO_DATA_LOSS | + Status::IO | + Status::CANCELED | + Status::BAD_STATE | + Status::BUFFER_TOO_SMALL | + Status::BAD_SYSCALL | + Status::INTERNAL | + Status::NOT_SUPPORTED | + Status::NO_RESOURCES | + Status::NO_MEMORY | + Status::CALL_FAILED | + _ => Other, + } + } +} + +impl From for Status { + fn from(err: io::Error) -> Status { + err.kind().into() + } +} + +impl From for io::Error { + fn from(status: Status) -> io::Error { + io::Error::from(io::ErrorKind::from(status)) + } +} + +impl From for Status { + fn from(_error: NulError) -> Status { + Status::INVALID_ARGS + } +} diff --git a/third_party/rust/fuchsia-zircon/src/time.rs b/third_party/rust/fuchsia-zircon/src/time.rs new file mode 100644 index 000000000000..1b1deaceed6c --- /dev/null +++ b/third_party/rust/fuchsia-zircon/src/time.rs @@ -0,0 +1,346 @@ +// Copyright 2016 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +//! Type-safe bindings for Zircon timer objects. + +use {AsHandleRef, ClockId, HandleBased, Handle, HandleRef, Status}; +use {sys, ok}; +use std::ops; +use std::time as stdtime; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Duration(sys::zx_duration_t); + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Time(sys::zx_time_t); + +impl From for Duration { + fn from(dur: stdtime::Duration) -> Self { + Duration::from_seconds(dur.as_secs()) + + Duration::from_nanos(dur.subsec_nanos() as u64) + } +} + +impl From for stdtime::Duration { + fn from(dur: Duration) -> Self { + let secs = dur.seconds(); + let nanos = (dur.nanos() - (secs * 1_000_000_000)) as u32; + stdtime::Duration::new(secs, nanos) + } +} + +impl ops::Add for Time { + type Output = Time; + fn add(self, dur: Duration) -> Time { + Time::from_nanos(dur.nanos() + self.nanos()) + } +} + +impl ops::Add { } } +impl Fuse { + /// Returns whether the underlying future has finished or not. + /// + /// If this method returns `true`, then all future calls to `poll` + /// are guaranteed to return `Ok(Async::NotReady)`. If this returns + /// false, then the underlying future has not been driven to + /// completion. + pub fn is_done(&self) -> bool { + self.future.is_none() + } +} + impl Future for Fuse { type Item = A::Item; type Error = A::Error; diff --git a/third_party/rust/futures/src/future/mod.rs b/third_party/rust/futures/src/future/mod.rs index 7cccd907b06a..063322e29d90 100644 --- a/third_party/rust/futures/src/future/mod.rs +++ b/third_party/rust/futures/src/future/mod.rs @@ -102,7 +102,7 @@ if_std! { #[doc(hidden)] #[deprecated(note = "removed without replacement, recommended to use a \ local extension trait or function if needed, more \ - details in https://github.com/alexcrichton/futures-rs/issues/228")] + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] pub type BoxFuture = ::std::boxed::Box + Send>; impl Future for ::std::boxed::Box { @@ -323,7 +323,7 @@ pub trait Future { #[doc(hidden)] #[deprecated(note = "removed without replacement, recommended to use a \ local extension trait or function if needed, more \ - details in https://github.com/alexcrichton/futures-rs/issues/228")] + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] #[allow(deprecated)] fn boxed(self) -> BoxFuture where Self: Sized + Send + 'static diff --git a/third_party/rust/futures/src/future/shared.rs b/third_party/rust/futures/src/future/shared.rs index 3f7e3afd2785..25417ef07114 100644 --- a/third_party/rust/futures/src/future/shared.rs +++ b/third_party/rust/futures/src/future/shared.rs @@ -235,8 +235,20 @@ impl Notify for Notifier { } } -unsafe impl Sync for Inner {} -unsafe impl Send for Inner {} +// The `F` is synchronized by a lock, so `F` doesn't need +// to be `Sync`. However, its `Item` or `Error` are exposed +// through an `Arc` but not lock, so they must be `Send + Sync`. +unsafe impl Send for Inner + where F: Future + Send, + F::Item: Send + Sync, + F::Error: Send + Sync, +{} + +unsafe impl Sync for Inner + where F: Future + Send, + F::Item: Send + Sync, + F::Error: Send + Sync, +{} impl fmt::Debug for Inner where F: Future + fmt::Debug, diff --git a/third_party/rust/futures/src/lib.rs b/third_party/rust/futures/src/lib.rs index ac27d3bc5ff2..59e4874b2c0a 100644 --- a/third_party/rust/futures/src/lib.rs +++ b/third_party/rust/futures/src/lib.rs @@ -153,7 +153,7 @@ //! Some more information can also be found in the [README] for now, but //! otherwise feel free to jump in to the docs below! //! -//! [README]: https://github.com/alexcrichton/futures-rs#futures-rs +//! [README]: https://github.com/rust-lang-nursery/futures-rs#futures-rs #![no_std] #![deny(missing_docs, missing_debug_implementations)] diff --git a/third_party/rust/futures/src/sink/buffer.rs b/third_party/rust/futures/src/sink/buffer.rs index 034d571cb2d8..419579d9a045 100644 --- a/third_party/rust/futures/src/sink/buffer.rs +++ b/third_party/rust/futures/src/sink/buffer.rs @@ -49,9 +49,6 @@ impl Buffer { if let AsyncSink::NotReady(item) = self.sink.start_send(item)? { self.buf.push_front(item); - // ensure that we attempt to complete any pushes we've started - self.sink.poll_complete()?; - return Ok(Async::NotReady); } } diff --git a/third_party/rust/futures/src/sink/from_err.rs b/third_party/rust/futures/src/sink/from_err.rs index 92c218fe46a0..4880c30ef4fe 100644 --- a/third_party/rust/futures/src/sink/from_err.rs +++ b/third_party/rust/futures/src/sink/from_err.rs @@ -5,7 +5,7 @@ use {Sink, Poll, StartSend}; /// A sink combinator to change the error type of a sink. /// /// This is created by the `Sink::from_err` method. -#[derive(Debug)] +#[derive(Clone, Debug)] #[must_use = "futures do nothing unless polled"] pub struct SinkFromErr { sink: S, diff --git a/third_party/rust/futures/src/sink/map_err.rs b/third_party/rust/futures/src/sink/map_err.rs index cccad399e5ba..25c168c071e1 100644 --- a/third_party/rust/futures/src/sink/map_err.rs +++ b/third_party/rust/futures/src/sink/map_err.rs @@ -3,7 +3,7 @@ use sink::Sink; use {Poll, StartSend, Stream}; /// Sink for the `Sink::sink_map_err` combinator. -#[derive(Debug)] +#[derive(Clone,Debug)] #[must_use = "sinks do nothing unless polled"] pub struct SinkMapErr { sink: S, diff --git a/third_party/rust/futures/src/sink/with.rs b/third_party/rust/futures/src/sink/with.rs index b0d5c54c658e..3326b6e49ca7 100644 --- a/third_party/rust/futures/src/sink/with.rs +++ b/third_party/rust/futures/src/sink/with.rs @@ -7,7 +7,7 @@ use stream::Stream; /// Sink for the `Sink::with` combinator, chaining a computation to run *prior* /// to pushing a value into the underlying sink. -#[derive(Debug)] +#[derive(Clone, Debug)] #[must_use = "sinks do nothing unless polled"] pub struct With where S: Sink, @@ -20,7 +20,7 @@ pub struct With _phantom: PhantomData, } -#[derive(Debug)] +#[derive(Clone, Debug)] enum State { Empty, Process(Fut), diff --git a/third_party/rust/futures/src/stream/future.rs b/third_party/rust/futures/src/stream/future.rs index 918b2c85b4e7..5b052ee4d37f 100644 --- a/third_party/rust/futures/src/stream/future.rs +++ b/third_party/rust/futures/src/stream/future.rs @@ -14,6 +14,46 @@ pub fn new(s: S) -> StreamFuture { StreamFuture { stream: Some(s) } } +impl StreamFuture { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn get_ref(&self) -> Option<&S> { + self.stream.as_ref() + } + + /// Acquires a mutable reference to the underlying stream that this + /// combinator is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the + /// stream which may otherwise confuse this combinator. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn get_mut(&mut self) -> Option<&mut S> { + self.stream.as_mut() + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so + /// care should be taken to avoid losing resources when this is called. + /// + /// This method returns an `Option` to account for the fact that `StreamFuture`'s + /// implementation of `Future::poll` consumes the underlying stream during polling + /// in order to return it to the caller of `Future::poll` if the stream yielded + /// an element. + pub fn into_inner(self) -> Option { + self.stream + } +} + impl Future for StreamFuture { type Item = (Option, S); type Error = (S::Error, S); diff --git a/third_party/rust/futures/src/stream/mod.rs b/third_party/rust/futures/src/stream/mod.rs index 01ae6d698a1c..b674aefdcd0a 100644 --- a/third_party/rust/futures/src/stream/mod.rs +++ b/third_party/rust/futures/src/stream/mod.rs @@ -115,7 +115,7 @@ if_std! { pub use self::chunks::Chunks; pub use self::collect::Collect; pub use self::wait::Wait; - pub use self::split::{SplitStream, SplitSink}; + pub use self::split::{SplitStream, SplitSink, ReuniteError}; pub use self::futures_unordered::FuturesUnordered; pub use self::futures_ordered::{futures_ordered, FuturesOrdered}; @@ -128,7 +128,7 @@ if_std! { #[doc(hidden)] #[deprecated(note = "removed without replacement, recommended to use a \ local extension trait or function if needed, more \ - details in https://github.com/alexcrichton/futures-rs/issues/228")] + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] pub type BoxStream = ::std::boxed::Box + Send>; impl Stream for ::std::boxed::Box { @@ -179,7 +179,7 @@ if_std! { /// entirely. If one of these use cases suits you perfectly and not the other, /// please feel welcome to comment on [the issue][being considered]! /// -/// [being considered]: https://github.com/alexcrichton/futures-rs/issues/206 +/// [being considered]: https://github.com/rust-lang-nursery/futures-rs/issues/206 pub trait Stream { /// The type of item this stream will yield on success. type Item; @@ -271,7 +271,7 @@ pub trait Stream { #[doc(hidden)] #[deprecated(note = "removed without replacement, recommended to use a \ local extension trait or function if needed, more \ - details in https://github.com/alexcrichton/futures-rs/issues/228")] + details in https://github.com/rust-lang-nursery/futures-rs/issues/228")] #[allow(deprecated)] fn boxed(self) -> BoxStream where Self: Sized + Send + 'static, diff --git a/third_party/rust/futures/src/sync/oneshot.rs b/third_party/rust/futures/src/sync/oneshot.rs index d95883d3baf0..6db7cb5837f4 100644 --- a/third_party/rust/futures/src/sync/oneshot.rs +++ b/third_party/rust/futures/src/sync/oneshot.rs @@ -206,7 +206,7 @@ impl Inner { // under the hood. If it instead used `Release` / `Acquire` ordering, // then it would not necessarily synchronize with `inner.complete` // and deadlock might be possible, as was observed in - // https://github.com/alexcrichton/futures-rs/pull/219. + // https://github.com/rust-lang-nursery/futures-rs/pull/219. self.complete.store(true, SeqCst); if let Some(mut slot) = self.rx_task.try_lock() { if let Some(task) = slot.take() { diff --git a/third_party/rust/futures/src/task_impl/atomic_task.rs b/third_party/rust/futures/src/task_impl/atomic_task.rs index a89e20c70f6a..46881b9eca89 100644 --- a/third_party/rust/futures/src/task_impl/atomic_task.rs +++ b/third_party/rust/futures/src/task_impl/atomic_task.rs @@ -1,11 +1,9 @@ -#![allow(dead_code)] - use super::Task; use core::fmt; use core::cell::UnsafeCell; use core::sync::atomic::AtomicUsize; -use core::sync::atomic::Ordering::{Acquire, Release}; +use core::sync::atomic::Ordering::{Acquire, Release, AcqRel}; /// A synchronization primitive for task notification. /// @@ -31,32 +29,110 @@ pub struct AtomicTask { task: UnsafeCell>, } -/// Initial state, the `AtomicTask` is currently not being used. -/// -/// The value `2` is picked specifically because it between the write lock & -/// read lock values. Since the read lock is represented by an incrementing -/// counter, this enables an atomic fetch_sub operation to be used for releasing -/// a lock. -const WAITING: usize = 2; +// `AtomicTask` is a multi-consumer, single-producer transfer cell. The cell +// stores a `Task` value produced by calls to `register` and many threads can +// race to take the task (to notify it) by calling `notify. +// +// If a new `Task` instance is produced by calling `register` before an existing +// one is consumed, then the existing one is overwritten. +// +// While `AtomicTask` is single-producer, the implementation ensures memory +// safety. In the event of concurrent calls to `register`, there will be a +// single winner whose task will get stored in the cell. The losers will not +// have their tasks notified. As such, callers should ensure to add +// synchronization to calls to `register`. +// +// The implementation uses a single `AtomicUsize` value to coordinate access to +// the `Task` cell. There are two bits that are operated on independently. These +// are represented by `REGISTERING` and `NOTIFYING`. +// +// The `REGISTERING` bit is set when a producer enters the critical section. The +// `NOTIFYING` bit is set when a consumer enters the critical section. Neither +// bit being set is represented by `WAITING`. +// +// A thread obtains an exclusive lock on the task cell by transitioning the +// state from `WAITING` to `REGISTERING` or `NOTIFYING`, depending on the +// operation the thread wishes to perform. When this transition is made, it is +// guaranteed that no other thread will access the task cell. +// +// # Registering +// +// On a call to `register`, an attempt to transition the state from WAITING to +// REGISTERING is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread sets the task cell to the task +// provided as an argument. Then it attempts to transition the state back from +// `REGISTERING` -> `WAITING`. +// +// If this transition is successful, then the registering process is complete +// and the next call to `notify` will observe the task. +// +// If the transition fails, then there was a concurrent call to `notify` that +// was unable to access the task cell (due to the registering thread holding the +// lock). To handle this, the registering thread removes the task it just set +// from the cell and calls `notify` on it. This call to notify represents the +// attempt to notify by the other thread (that set the `NOTIFYING` bit). The +// state is then transitioned from `REGISTERING | NOTIFYING` back to `WAITING`. +// This transition must succeed because, at this point, the state cannot be +// transitioned by another thread. +// +// # Notifying +// +// On a call to `notify`, an attempt to transition the state from `WAITING` to +// `NOTIFYING` is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread takes ownership of the current value +// in teh task cell, and calls `notify` on it. The state is then transitioned +// back to `WAITING`. This transition must succeed as, at this point, the state +// cannot be transitioned by another thread. +// +// If the thread is unable to obtain the lock, the `NOTIFYING` bit is still. +// This is because it has either been set by the current thread but the previous +// value included the `REGISTERING` bit **or** a concurrent thread is in the +// `NOTIFYING` critical section. Either way, no action must be taken. +// +// If the current thread is the only concurrent call to `notify` and another +// thread is in the `register` critical section, when the other thread **exits** +// the `register` critical section, it will observe the `NOTIFYING` bit and +// handle the notify itself. +// +// If another thread is in the `notify` critical section, then it will handle +// notifying the task. +// +// # A potential race (is safely handled). +// +// Imagine the following situation: +// +// * Thread A obtains the `notify` lock and notifies a task. +// +// * Before thread A releases the `notify` lock, the notified task is scheduled. +// +// * Thread B attempts to notify the task. In theory this should result in the +// task being notified, but it cannot because thread A still holds the notify +// lock. +// +// This case is handled by requiring users of `AtomicTask` to call `register` +// **before** attempting to observe the application state change that resulted +// in the task being notified. The notifiers also change the application state +// before calling notify. +// +// Because of this, the task will do one of two things. +// +// 1) Observe the application state change that Thread B is notifying on. In +// this case, it is OK for Thread B's notification to be lost. +// +// 2) Call register before attempting to observe the application state. Since +// Thread A still holds the `notify` lock, the call to `register` will result +// in the task notifying itself and get scheduled again. -/// The `register` function has determined that the task is no longer current. -/// This implies that `AtomicTask::register` is being called from a different -/// task than is represented by the currently stored task. The write lock is -/// obtained to update the task cell. -const LOCKED_WRITE: usize = 0; +/// Idle state +const WAITING: usize = 0; -/// At least one call to `notify` happened concurrently to `register` updating -/// the task cell. This state is detected when `register` exits the mutation -/// code and signals to `register` that it is responsible for notifying its own -/// task. -const LOCKED_WRITE_NOTIFIED: usize = 1; +/// A new task value is being registered with the `AtomicTask` cell. +const REGISTERING: usize = 0b01; - -/// The `notify` function has locked access to the task cell for notification. -/// -/// The constant is left here mostly for documentation reasons. -#[allow(dead_code)] -const LOCKED_READ: usize = 3; +/// The task currently registered with the `AtomicTask` cell is being notified. +const NOTIFYING: usize = 0b10; impl AtomicTask { /// Create an `AtomicTask` initialized with the given `Task` @@ -73,6 +149,13 @@ impl AtomicTask { /// Registers the current task to be notified on calls to `notify`. /// + /// This is the same as calling `register_task` with `task::current()`. + pub fn register(&self) { + self.register_task(super::current()); + } + + /// Registers the provided task to be notified on calls to `notify`. + /// /// The new task will take place of any previous tasks that were registered /// by previous calls to `register`. Any calls to `notify` that happen after /// a call to `register` (as defined by the memory ordering rules), will @@ -86,39 +169,75 @@ impl AtomicTask { /// idea. Concurrent calls to `register` will attempt to register different /// tasks to be notified. One of the callers will win and have its task set, /// but there is no guarantee as to which caller will succeed. - pub fn register(&self) { - // Get a new task handle - let task = super::current(); - - match self.state.compare_and_swap(WAITING, LOCKED_WRITE, Acquire) { + pub fn register_task(&self, task: Task) { + match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { WAITING => { unsafe { - // Locked acquired, update the task cell - *self.task.get() = Some(task); + // Locked acquired, update the waker cell + *self.task.get() = Some(task.clone()); - // Release the lock. If the state transitioned to - // `LOCKED_NOTIFIED`, this means that an notify has been - // signaled, so notify the task. - if LOCKED_WRITE_NOTIFIED == self.state.swap(WAITING, Release) { - (*self.task.get()).as_ref().unwrap().notify(); + // Release the lock. If the state transitioned to include + // the `NOTIFYING` bit, this means that a notify has been + // called concurrently, so we have to remove the task and + // notify it.` + // + // Start by assuming that the state is `REGISTERING` as this + // is what we jut set it to. + let mut curr = REGISTERING; + + // If a task has to be notified, it will be set here. + let mut notify: Option = None; + + loop { + let res = self.state.compare_exchange( + curr, WAITING, AcqRel, Acquire); + + match res { + Ok(_) => { + // The atomic exchange was successful, now + // notify the task (if set) and return. + if let Some(task) = notify { + task.notify(); + } + + return; + } + Err(actual) => { + // This branch can only be reached if a + // concurrent thread called `notify`. In this + // case, `actual` **must** be `REGISTERING | + // `NOTIFYING`. + debug_assert_eq!(actual, REGISTERING | NOTIFYING); + + // Take the task to notify once the atomic operation has + // completed. + notify = (*self.task.get()).take(); + + // Update `curr` for the next iteration of the + // loop + curr = actual; + } + } } } } - LOCKED_WRITE | LOCKED_WRITE_NOTIFIED => { - // A thread is concurrently calling `register`. This shouldn't - // happen as it doesn't really make much sense, but it isn't - // unsafe per se. Since two threads are concurrently trying to - // update the task, it's undefined which one "wins" (no ordering - // guarantees), so we can just do nothing. + NOTIFYING => { + // Currently in the process of notifying the task, i.e., + // `notify` is currently being called on the old task handle. + // So, we call notify on the new task handle + task.notify(); } state => { - debug_assert!(state != LOCKED_WRITE, "unexpected state LOCKED_WRITE"); - debug_assert!(state != LOCKED_WRITE_NOTIFIED, "unexpected state LOCKED_WRITE_NOTIFIED"); - - // Currently in a read locked state, this implies that `notify` - // is currently being called on the old task handle. So, we call - // notify on the new task handle - task.notify(); + // In this case, a concurrent thread is holding the + // "registering" lock. This probably indicates a bug in the + // caller's code as racing to call `register` doesn't make much + // sense. + // + // We just want to maintain memory safety. It is ok to drop the + // call to `register`. + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING); } } } @@ -127,49 +246,33 @@ impl AtomicTask { /// /// If `register` has not been called yet, then this does nothing. pub fn notify(&self) { - let mut curr = WAITING; + // AcqRel ordering is used in order to acquire the value of the `task` + // cell as well as to establish a `release` ordering with whatever + // memory the `AtomicTask` is associated with. + match self.state.fetch_or(NOTIFYING, AcqRel) { + WAITING => { + // The notifying lock has been acquired. + let task = unsafe { (*self.task.get()).take() }; - loop { - if curr == LOCKED_WRITE { - // Transition the state to LOCKED_NOTIFIED - let actual = self.state.compare_and_swap(LOCKED_WRITE, LOCKED_WRITE_NOTIFIED, Release); + // Release the lock + self.state.fetch_and(!NOTIFYING, Release); - if curr == actual { - // Success, return - return; + if let Some(task) = task { + task.notify(); } - - // update current state variable and try again - curr = actual; - - } else if curr == LOCKED_WRITE_NOTIFIED { - // Currently in `LOCKED_WRITE_NOTIFIED` state, nothing else to do. - return; - - } else { - // Currently in a LOCKED_READ state, so attempt to increment the - // lock count. - let actual = self.state.compare_and_swap(curr, curr + 1, Acquire); - - // Locked acquired - if actual == curr { - // Notify the task - unsafe { - if let Some(ref task) = *self.task.get() { - task.notify(); - } - } - - // Release the lock - self.state.fetch_sub(1, Release); - - // Done - return; - } - - // update current state variable and try again - curr = actual; - + } + state => { + // There is a concurrent thread currently updating the + // associated task. + // + // Nothing more to do as the `NOTIFYING` bit has been set. It + // doesn't matter if there are concurrent registering threads or + // not. + // + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING || + state == NOTIFYING); } } } diff --git a/third_party/rust/futures/src/task_impl/mod.rs b/third_party/rust/futures/src/task_impl/mod.rs index 132173459fa9..0a3e917df615 100644 --- a/third_party/rust/futures/src/task_impl/mod.rs +++ b/third_party/rust/futures/src/task_impl/mod.rs @@ -109,7 +109,7 @@ impl Task { /// Indicate that the task should attempt to poll its future in a timely /// fashion. /// - /// It's typically guaranteed that, for each call to `notify`, `poll` will + /// It's typically guaranteed that, after calling `notify`, `poll` will /// be called at least once subsequently (unless the future has terminated). /// If the task is currently polling its future when `notify` is called, it /// must poll the future *again* afterwards, ensuring that all relevant diff --git a/third_party/rust/futures/src/unsync/mpsc.rs b/third_party/rust/futures/src/unsync/mpsc.rs index 68cf6b8cb95b..ba0d52dc98b2 100644 --- a/third_party/rust/futures/src/unsync/mpsc.rs +++ b/third_party/rust/futures/src/unsync/mpsc.rs @@ -110,7 +110,11 @@ impl Drop for Sender { Some(shared) => shared, None => return, }; - if Rc::weak_count(&shared) == 0 { + // The number of existing `Weak` indicates if we are possibly the last + // `Sender`. If we are the last, we possibly must notify a blocked + // `Receiver`. `self.shared` is always one of the `Weak` to this shared + // data. Therefore the smallest possible Rc::weak_count(&shared) is 1. + if Rc::weak_count(&shared) == 1 { if let Some(task) = shared.borrow_mut().blocked_recv.take() { // Wake up receiver as its stream has ended task.notify(); diff --git a/third_party/rust/futures/tests/fuse.rs b/third_party/rust/futures/tests/fuse.rs index a1e6cee2f1a2..177d914e19a0 100644 --- a/third_party/rust/futures/tests/fuse.rs +++ b/third_party/rust/futures/tests/fuse.rs @@ -13,3 +13,27 @@ fn fuse() { assert!(future.poll_future_notify(¬ify_panic(), 0).unwrap().is_ready()); assert!(future.poll_future_notify(¬ify_panic(), 0).unwrap().is_not_ready()); } + +#[test] +fn fuse_is_done() { + use futures::future::{Fuse, FutureResult}; + + struct Wrapped(Fuse>); + + impl Future for Wrapped { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + assert!(!self.0.is_done()); + assert_eq!(self.0.poll().unwrap(), Async::Ready(2)); + assert!(self.0.is_done()); + assert_eq!(self.0.poll().unwrap(), Async::NotReady); + assert!(self.0.is_done()); + + Ok(Async::Ready(())) + } + } + + assert!(Wrapped(ok::(2).fuse()).wait().is_ok()); +} \ No newline at end of file diff --git a/third_party/rust/futures/tests/unsync.rs b/third_party/rust/futures/tests/unsync.rs index b5ae8d0fbf0a..3d1108598094 100644 --- a/third_party/rust/futures/tests/unsync.rs +++ b/third_party/rust/futures/tests/unsync.rs @@ -201,3 +201,65 @@ fn spawn_kill_dead_stream() { }, } } + + +/// Test case for PR #768 (issue #766). +/// The issue was: +/// Given that an empty channel is polled by the Receiver, and the only Sender +/// gets dropped without sending anything, then the Receiver would get stuck. + +#[test] +fn dropped_sender_of_unused_channel_notifies_receiver() { + let core = Core::new(); + type FUTURE = Box>; + + // Constructs the channel which we want to test, and two futures which + // act on that channel. + let pair = |reverse| -> Vec { + // This is the channel which we want to test. + let (tx, rx) = mpsc::channel::(1); + let mut futures: Vec = vec![ + Box::new(futures::stream::iter_ok(vec![]) + .forward(tx) + .map_err(|_: mpsc::SendError| ()) + .map(|_| 42) + ), + Box::new(rx.fold((), |_, _| Ok(())) + .map(|_| 24) + ), + ]; + if reverse { + futures.reverse(); + } + futures + }; + + let make_test_future = |reverse| -> Box, Error=()>> { + let f = futures::future::join_all(pair(reverse)); + + // Use a timeout. This is not meant to test the `sync::oneshot` but + // merely uses it to implement this timeout. + let (timeout_tx, timeout_rx) = futures::sync::oneshot::channel::>(); + std::thread::spawn(move || { + std::thread::sleep(std::time::Duration::from_millis(1000)); + let x = timeout_tx.send(vec![0]); + assert!(x.is_err(), "Test timed out."); + }); + + Box::new(f.select(timeout_rx.map_err(|_|())) + .map_err(|x| x.0) + .map(|x| x.0) + ) + }; + + // The order of the tested futures is important to test fix of PR #768. + // We want future_2 to poll on the Receiver before the Sender is dropped. + let result = core.run(make_test_future(false)); + assert!(result.is_ok()); + assert_eq!(vec![42, 24], result.unwrap()); + + // Test also the other ordering: + let result = core.run(make_test_future(true)); + assert!(result.is_ok()); + assert_eq!(vec![24, 42], result.unwrap()); +} diff --git a/third_party/rust/h2/.cargo-checksum.json b/third_party/rust/h2/.cargo-checksum.json new file mode 100644 index 000000000000..d8884c804d72 --- /dev/null +++ b/third_party/rust/h2/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"4f48a0b7dd2e3541e107acb9f0080a9e604b2ca88fd13a64a9860f9668b3e512","CHANGELOG.md":"25ad126daa4a2dc3772037f3c62d1975d3f32816e3fdbc5af6066382fb24eb91","CONTRIBUTING.md":"149e2dfd4f8b2a8d8bcfaab09c444ff3974a7bdb070ded1de4c2d87ccb8db6f3","Cargo.toml":"d5ce8d43b63f2c28afb3c4ab7af175b84d7c8947b437e787405f64e479e1ac90","LICENSE":"b21623012e6c453d944b0342c515b631cfcbf30704c2621b291526b69c10724d","README.md":"d875b23954a91d7d5fbab0f5b8e69975c4aa2dedd2d31a14573e7bcf64901c19","ci/h2spec.sh":"705c6a3a79bf25c6569364bef0f11edc80a0db0b7908522924f3536e6d1e7d9d","examples/akamai.rs":"60150ab80671da44c76bcd89a3369da7c2ce7240b2c326e8f5b829e2265ccf69","examples/client.rs":"145572ccbe3617af09cc5d6a8d18dd8d9dd995bb92cdbaa19a1afa2631066986","examples/server.rs":"6f21c912568c8a44e0217c1bca3d839c1df5dcf1056f30e58c7ca8cde5d2ab3c","src/client.rs":"5385c411dcf026cdd02e210010d02f33b8492eff4b4f97b21a45874aec7267bf","src/codec/error.rs":"f03da60918bf9f41e4eabb9be802c8aa7230c59118e53efe97ad9804c17057d4","src/codec/framed_read.rs":"c7f7221481b0dee5ca5d1c7c42e0ef1f51b7110b79f6f546edecc6ad90afd61c","src/codec/framed_write.rs":"3a46fd36a9f2237d14eca0e95aa9436dbaaed49940cbf44c1c517ba3c101d0c4","src/codec/mod.rs":"643bcf5086c0b3e8fab4c6f76bf942333e2ba36f3fe68fddb76ee6fd58e1327c","src/error.rs":"3b35505bdedc0820a00b69931378fdcc76920a444b29cf96cc6b01b70fec61f7","src/frame/data.rs":"0b415ee49a6daae460928002b21795f3cd6df2213849b22e628a09901ba2afec","src/frame/go_away.rs":"2024f4fd475a78c2206d28a668d8931e4e579d32f2291db97a259db4abfe334f","src/frame/head.rs":"9e8763b719c4d7116d7b12b4dcc9c344791c3c64500e0acfd52d3679e4ca8a58","src/frame/headers.rs":"72b93064ce05b657ec8a85c62f6f041c96d986c30ec013676c249643e035d24f","src/frame/mod.rs":"020701b8e4fcc7170a2e2a84483151105cd8c595171248a9dc90030d1576b361","src/frame/ping.rs":"68e2fe2f53c39c37a3c121f10b2c0665fc4ff448f36cba4a3603b0fd3e5c9b52","src/frame/priority.rs":"3ad33b4e9a24bb5b2b8b1c92a134efa790ee8ea963b6f4475161034b99f8b5d3","src/frame/reason.rs":"5f8303d1dacc78d622d87da2fcc2324d56adbcde4ca24df7b82194774055c7e4","src/frame/reset.rs":"0595e02a3f83b4451864caecee177fdf003b10fe275a88f23cebb467c71a4444","src/frame/settings.rs":"7956e98f7a8620b904cc6ded5c0f64e32f437cf8871fba29a79b266cb2b756b4","src/frame/stream_id.rs":"088580143cdd34740c2673f50c6e0d6983b2271c011a3583621af01517f862c5","src/frame/util.rs":"116c6e6978eac18405a84806a9872754600fe88f94e6496a9677b15f7c0611f7","src/frame/window_update.rs":"9c664df42fc789050726eb4005cd38660cad7b92f46117a8fd9528761fd76ff5","src/hpack/decoder.rs":"4108a1ac1d15662f2acf8438a2bfe345e5477511a9b40d8f6ad195143bc1efb9","src/hpack/encoder.rs":"a89486e2a53bfc79806fd4ed83edd508dc9ccd495fc884cb83c940cd575c8a99","src/hpack/header.rs":"957d0180b3f475c13685761532c4e8e55013e1a13a1229ff051362b38ddded5d","src/hpack/huffman/mod.rs":"bc3bf001db39efe34f5e142723d03f1718f51cb5ab72ec5974cd7fb9b881eab9","src/hpack/huffman/table.rs":"062ddb1e4f0d4808fd64a4e62b420ea5b6a5ace0546b90f31b36d3f72b3f6619","src/hpack/mod.rs":"858049d8af16288c328e279b1d9feebf3d6a110f1991d4de3fdd64a9f4c8ed5f","src/hpack/table.rs":"f168c1e7621765fd1464c58be021a77b1da83902ded2ee7cb591eec28f6dcc52","src/hpack/test/fixture.rs":"e9c4eee1b69a8bd912532818e2fbda1a1ff8ef769070c71df1618e9f6d5c73ef","src/hpack/test/fuzz.rs":"0eebfa28de8b68f7a7b5803440b37646e8acfea5ee1ef5a4852c0b5241639fa3","src/hpack/test/mod.rs":"56ad5643e7f1e273e5bce8a6fc0552be39c326dacfffd7f9757ccdbe75e9b66e","src/lib.rs":"3b29cb429fc1db80482fe25cdfdd918a153fd53f940182cb3b3a42244f8f1e2f","src/proto/connection.rs":"e24eabf93cd20f80306ac6ee7540b13aab377719a2a508a21ffa210274340069","src/proto/error.rs":"917228534afb2f21977a3eabbb5bad72882134c0bd59f1277999fbe16f30a5d9","src/proto/go_away.rs":"1c5513658f85c8783d25578c32d0654d77609c972bac93fb941c76f3b9a5787d","src/proto/mod.rs":"d091593f4c3bd3e1196c06f34e9b93f2511c1ecbbef0376ad61a22861e219ed3","src/proto/peer.rs":"cd93f7112ded9dca2e5440e820bc3e7cc19b1e42fd2e84c17037148535237066","src/proto/ping_pong.rs":"3747fa1fc1547d0291212030e1c9b98e17e1cf82bfa31be79d8e866cbf593459","src/proto/settings.rs":"91690f4e1c31554c26c4830ffee68de1e89a67e726dcc9119832cdc7ebaeb5e2","src/proto/streams/buffer.rs":"d88060142edd50e23e519e9a67f926e5f5fa5e49fe5c2c2868f330f840290ec9","src/proto/streams/counts.rs":"6ee0e1dc45b6dae820a6f7297b8ea64e0b0ceafe614e40277ef0c577df909683","src/proto/streams/flow_control.rs":"c39d5b77a94c12c724d7a9d391266119af355b12caf2c24169c11b3dfd3bbafa","src/proto/streams/mod.rs":"df857056c7977a8fc368b06dd2e2895ddae30d67bcbf72f4dad7a7c707f6b34b","src/proto/streams/prioritize.rs":"d32b574d84e3dcabcfb9d7e8e29140c0a01ce0322c3a5350ac94d04702b7be07","src/proto/streams/recv.rs":"3f8bea9a48e391e0f38e67194d5d1a9e73464aef07dcc456ab7f6a585935d2b4","src/proto/streams/send.rs":"f2cbbdfcb90c5ceaa93a1e87b477ab8f81774963d5929e4c4f5ab1d4f9ee2c83","src/proto/streams/state.rs":"743e91b24750e765914fcf4a9038bc2015cc57d19fbdb03776f6f789d4a45e6c","src/proto/streams/store.rs":"13c0867be760f71942df98d5e964d56a467996faed338a2056a7735fbb770719","src/proto/streams/stream.rs":"988e61b8a90216a809a6bd2c920e2b46a34f0fc3d25fb0e5ba997091c0c0af6a","src/proto/streams/streams.rs":"ca5a5ca6ce23bdf0f931b19bce6caaed082117dec871157ab0c37cca4ef35c4b","src/server.rs":"c651d5105114a154d643b386c55f05ae5ae1700322185cb3a5087ba7a47595f7","src/share.rs":"a437d444e95534015012ab2191c134833be12206ae9ad4d6c5b8ccf94710daa6"},"package":"a27e7ed946e8335bdf9a191bc1b9b14a03ba822d013d2f58437f4fabcbd7fc2c"} \ No newline at end of file diff --git a/third_party/rust/h2/.travis.yml b/third_party/rust/h2/.travis.yml new file mode 100644 index 000000000000..c42c51d016ed --- /dev/null +++ b/third_party/rust/h2/.travis.yml @@ -0,0 +1,62 @@ +--- +language: rust +dist: trusty +sudo: false + +cache: + cargo: true + apt: true + +addons: + apt: + packages: + - libssl-dev + +matrix: + include: + - rust: nightly + - rust: stable + before_deploy: cargo doc --no-deps + allow_failures: + - rust: nightly + +before_script: + - cargo clean + +script: + # Build without unstable flag + - cargo build + + # Test examples in README. + - rustdoc --test README.md -L target/debug -L target/debug/deps + + # Check with unstable flag + - cargo check --features unstable + + # Run tests, this includes lib tests and doc tests + - RUST_TEST_THREADS=1 cargo test + + # Run integration tests + - cargo test -p h2-tests + + # Run h2spec on stable + - if [ "${TRAVIS_RUST_VERSION}" = "stable" ]; then ./ci/h2spec.sh; fi + +deploy: + provider: pages + skip_cleanup: true + github_token: $GH_TOKEN + target_branch: gh-pages + local_dir: target/doc + on: + branch: master + repo: carllerche/h2 + rust: stable + +env: + global: + secure: LkjG3IYPu7GY7zuMdYyLtdvjR4a6elX6or1Du7LTBz4JSlQXYAaj6DxhfZfm4d1kECIlnJJ2T21BqDoJDnld5lLu6VcXQ2ZEo/2f2k77GQ/9w3erwcDtqxK02rPoslFNzSd2SCdafjGKdbcvGW2HVBEu5gYEfOdu1Cdy6Av3+vLPk5To50khBQY90Kk+cmSd7J0+CHw/wSXnVgIVoO4742+aj5pxZQLx3lsi3ZPzIh1VL4QOUlaI98ybrCVNxADQCeXRRDzj0d8NzeKlkm8eXpgpiMVRJWURMa3rU2sHU9wh+YjMyoqGZWv2LlzG5LBqde3RWPQ99ebxVhlly6RgEom8yvZbavcGJ4BA0OjviLYAMb1Wjlu1paLZikEqlvTojhpzz3PVuIBZHl+rUgnUfkuhfmMzTBJTPHPMP0GtqpIAGpyRwbv56DquuEiubl70FZmz52sXGDseoABv9jQ4SNJrDrA+bfIWkPpWwqnKaWIgGPl0n3GKeceQM3RshpaE59awYUDS4ybjtacb2Fr99fx25mTO2W4x5hcDqAvBohxRPXgRB2y0ZmrcJyCV3rfkiGFUK7H8ZBqNQ6GG/GYilgj40q6TgcnXxUxyKkykDiS9VU0QAjAwz0pkCNipJ+ImS1j0LHEOcKMKZ7OsGOuSqBmF24ewBs+XzXY7dTnM/Xc= + +notifications: + email: + on_success: never diff --git a/third_party/rust/h2/CHANGELOG.md b/third_party/rust/h2/CHANGELOG.md new file mode 100644 index 000000000000..5e4bb853e7a9 --- /dev/null +++ b/third_party/rust/h2/CHANGELOG.md @@ -0,0 +1,66 @@ +# 0.1.12 (August 8, 2018) + +* Fix initial send window size (#301). +* Fix panic when calling `reserve_capacity` after connection has been closed (#302). +* Fix handling of incoming `SETTINGS_INITIAL_WINDOW_SIZE`. (#299) + +# 0.1.11 (July 31, 2018) + +* Add `stream_id` accessors to public API types (#292). +* Fix potential panic when dropping clients (#295). +* Fix busy loop when shutting down server (#296). + +# 0.1.10 (June 15, 2018) + +* Fix potential panic in `SendRequest::poll_ready()` (#281). +* Fix infinite loop on reset connection during prefix (#285). + +# 0.1.9 (May 31, 2018) + +* Add `poll_reset` to `SendResponse` and `SendStream` (#279). + +# 0.1.8 (May 23, 2018) + +* Fix client bug when max streams is reached. (#277) + +# 0.1.7 (May 14, 2018) + +* Misc bug fixes (#266, #273, #261, #275). + +# 0.1.6 (April 24, 2018) + +* Misc bug fixes related to stream management (#258, #260, #262). + +# 0.1.5 (April 6, 2018) + +* Fix the `last_stream_id` sent during graceful GOAWAY (#254). + +# 0.1.4 (April 5, 2018) + +* Add `initial_connection_window_size` to client and server `Builder`s (#249). +* Add `graceful_shutdown` and `abrupt_shutdown` to `server::Connection`, + deprecating `close_connection` (#250). + +# 0.1.3 (March 28, 2018) + +* Allow configuring max streams before the peer's settings frame is + received (#242). +* Fix HPACK decoding bug with regards to large literals (#244). +* Fix state transition bug triggered by receiving a RST_STREAM frame (#247). + +# 0.1.2 (March 13, 2018) + +* Fix another bug relating to resetting connections and reaching + max concurrency (#238). + +# 0.1.1 (March 8, 2018) + +* When streams are dropped, close the connection (#222). +* Notify send tasks on connection error (#231). +* Fix bug relating to resetting connections and reaching max concurrency (#235). +* Normalize HTTP request path to satisfy HTTP/2.0 specification (#228). +* Update internal dependencies. + +# 0.1.0 (Jan 12, 2018) + +* Initial release diff --git a/third_party/rust/h2/CONTRIBUTING.md b/third_party/rust/h2/CONTRIBUTING.md new file mode 100644 index 000000000000..fa0b26393a0c --- /dev/null +++ b/third_party/rust/h2/CONTRIBUTING.md @@ -0,0 +1,84 @@ +# Contributing to _h2_ # + +:balloon: Thanks for your help improving the project! + +## Getting Help ## + +If you have a question about the h2 library or have encountered problems using it, you may +[file an issue][issue] or ask ask a question on the [Tokio Gitter][gitter]. + +## Submitting a Pull Request ## + +Do you have an improvement? + +1. Submit an [issue][issue] describing your proposed change. +2. We will try to respond to your issue promptly. +3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository. +4. Submit a pull request against this repo's `master` branch. +6. Your branch may be merged once all configured checks pass, including: + - Code review has been completed. + - The branch has passed tests in CI. + +## Committing ## + +When initially submitting a pull request, we prefer a single squashed commit. It +is preferable to split up contributions into multiple pull requests if the +changes are unrelated. All pull requests are squashed when merged, but +squashing yourself gives you better control over the commit message. + +After the pull request is submitted, all changes should be done in separate +commits. This makes reviewing the evolution of the pull request easier. We will +squash all the changes into a single commit when we merge the pull request. + +### Commit messages ### + +Finalized commit messages should be in the following format: + +``` +Subject + +Problem + +Solution + +Validation +``` + +#### Subject #### + +- one line, <= 50 characters +- describe what is done; not the result +- use the active voice +- capitalize first word and proper nouns +- do not end in a period — this is a title/subject +- reference the github issue by number + +##### Examples ##### + +``` +bad: server disconnects should cause dst client disconnects. +good: Propagate disconnects from source to destination +``` + +``` +bad: support tls servers +good: Introduce support for server-side TLS (#347) +``` + +#### Problem #### + +Explain the context and why you're making that change. What is the problem +you're trying to solve? In some cases there is not a problem and this can be +thought of as being the motivation for your change. + +#### Solution #### + +Describe the modifications you've made. + +#### Validation #### + +Describe the testing you've done to validate your change. Performance-related +changes should include before- and after- benchmark results. + +[issue]: https://github.com/carllerche/h2/issues/new +[gitter]: https://gitter.im/tokio-rs/tokio diff --git a/third_party/rust/h2/Cargo.toml b/third_party/rust/h2/Cargo.toml new file mode 100644 index 000000000000..938a6d39336a --- /dev/null +++ b/third_party/rust/h2/Cargo.toml @@ -0,0 +1,97 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "h2" +version = "0.1.12" +authors = ["Carl Lerche "] +exclude = ["fixtures/**"] +description = "An HTTP/2.0 client and server" +homepage = "https://github.com/carllerche/h2" +documentation = "https://docs.rs/h2" +readme = "README.md" +keywords = ["http", "async", "non-blocking"] +categories = ["asynchronous", "web-programming", "network-programming"] +license = "MIT" +repository = "https://github.com/carllerche/h2" +[dependencies.byteorder] +version = "1.0" + +[dependencies.bytes] +version = "0.4.7" + +[dependencies.fnv] +version = "1.0.5" + +[dependencies.futures] +version = "0.1" + +[dependencies.http] +version = "0.1.3" + +[dependencies.indexmap] +version = "1.0" + +[dependencies.log] +version = "0.4.1" + +[dependencies.slab] +version = "0.4.0" + +[dependencies.string] +version = "0.1" + +[dependencies.tokio-io] +version = "0.1.4" +[dev-dependencies.env_logger] +version = "0.5.3" +default-features = false + +[dev-dependencies.hex] +version = "0.2.0" + +[dev-dependencies.quickcheck] +version = "0.4.1" +default-features = false + +[dev-dependencies.rand] +version = "0.3.15" + +[dev-dependencies.rustls] +version = "0.12" + +[dev-dependencies.serde] +version = "1.0.0" + +[dev-dependencies.serde_json] +version = "1.0.0" + +[dev-dependencies.tokio-core] +version = "0.1" + +[dev-dependencies.tokio-rustls] +version = "0.5.0" + +[dev-dependencies.walkdir] +version = "1.0.0" + +[dev-dependencies.webpki] +version = "0.18.0-alpha" + +[dev-dependencies.webpki-roots] +version = "0.14" + +[features] +unstable = [] +[badges.travis-ci] +branch = "master" +repository = "carllerche/h2" diff --git a/third_party/rust/h2/LICENSE b/third_party/rust/h2/LICENSE new file mode 100644 index 000000000000..11239dd1c157 --- /dev/null +++ b/third_party/rust/h2/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2017 h2 authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/h2/README.md b/third_party/rust/h2/README.md new file mode 100644 index 000000000000..d3b5a761eebd --- /dev/null +++ b/third_party/rust/h2/README.md @@ -0,0 +1,73 @@ +# H2 + +A Tokio aware, HTTP/2.0 client & server implementation for Rust. + +[![Build Status](https://travis-ci.org/carllerche/h2.svg?branch=master)](https://travis-ci.org/carllerche/h2) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Crates.io](https://img.shields.io/crates/v/h2.svg?maxAge=2592000)](https://crates.io/crates/h2) +[![Documentation](https://docs.rs/h2/badge.svg)][dox] + +More information about this crate can be found in the [crate documentation][dox]. + +[dox]: https://docs.rs/h2 + +## Features + +* Client and server HTTP/2.0 implementation. +* Implements the full HTTP/2.0 specification. +* Passes [h2spec](https://github.com/summerwind/h2spec). +* Focus on performance and correctness. +* Built on [Tokio](https://tokio.rs). + +## Non goals + +This crate is intended to only be an implementation of the HTTP/2.0 +specification. It does not handle: + +* Managing TCP connections +* HTTP 1.0 upgrade +* TLS +* Any feature not described by the HTTP/2.0 specification. + +The intent is that this crate will eventually be used by +[hyper](https://github.com/hyperium/hyper), which will provide all of these features. + +## Usage + +To use `h2`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +h2 = "0.1" +``` + +Next, add this to your crate: + +```rust +extern crate h2; + +use h2::server::Connection; + +fn main() { + // ... +} +``` + +## FAQ + +**How does h2 compare to [solicit] or [rust-http2]?** + +The h2 library has implemented more of the details of the HTTP/2.0 specification +than any other Rust library. It also passes the [h2spec] set of tests. The h2 +library is rapidly approaching "production ready" quality. + +Besides the above, Solicit is built on blocking I/O and does not appear to be +actively maintained. + +**Is this an embedded Java SQL database engine?** + +[No](http://www.h2database.com). + +[solicit]: https://github.com/mlalic/solicit +[rust-http2]: https://github.com/stepancheg/rust-http2 +[h2spec]: https://github.com/summerwind/h2spec diff --git a/third_party/rust/h2/ci/h2spec.sh b/third_party/rust/h2/ci/h2spec.sh new file mode 100755 index 000000000000..aec3f531eeb0 --- /dev/null +++ b/third_party/rust/h2/ci/h2spec.sh @@ -0,0 +1,28 @@ +#!/bin/bash +LOGFILE="h2server.log" + +if ! [ -e "h2spec" ] ; then + # if we don't already have a h2spec executable, wget it from github + wget https://github.com/summerwind/h2spec/releases/download/v2.1.0/h2spec_linux_amd64.tar.gz + tar xf h2spec_linux_amd64.tar.gz +fi + +cargo build --example server +exec 3< <(./target/debug/examples/server); +SERVER_PID=$! + +# wait 'til the server is listening before running h2spec, and pipe server's +# stdout to a log file. +sed '/listening on Ok(V4(127.0.0.1:5928))/q' <&3 ; cat <&3 > "${LOGFILE}" & + +# run h2spec against the server, printing the server log if h2spec failed +./h2spec -p 5928 +H2SPEC_STATUS=$? +if [ "${H2SPEC_STATUS}" -eq 0 ]; then + echo "h2spec passed!" +else + echo "h2spec failed! server logs:" + cat "${LOGFILE}" +fi +kill "${SERVER_PID}" +exit "${H2SPEC_STATUS}" diff --git a/third_party/rust/h2/examples/akamai.rs b/third_party/rust/h2/examples/akamai.rs new file mode 100644 index 000000000000..9ffef7eaae55 --- /dev/null +++ b/third_party/rust/h2/examples/akamai.rs @@ -0,0 +1,93 @@ +extern crate env_logger; +extern crate futures; +extern crate h2; +extern crate http; +extern crate rustls; +extern crate tokio_core; +extern crate tokio_rustls; +extern crate webpki; +extern crate webpki_roots; + +use h2::client; + +use futures::*; +use http::{Method, Request}; + +use tokio_core::net::TcpStream; +use tokio_core::reactor; + +use rustls::Session; +use tokio_rustls::ClientConfigExt; +use webpki::DNSNameRef; + +use std::net::ToSocketAddrs; + +const ALPN_H2: &str = "h2"; + +pub fn main() { + let _ = env_logger::try_init(); + + let tls_client_config = std::sync::Arc::new({ + let mut c = rustls::ClientConfig::new(); + c.root_store + .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + c.alpn_protocols.push(ALPN_H2.to_owned()); + c + }); + + // Sync DNS resolution. + let addr = "http2.akamai.com:443" + .to_socket_addrs() + .unwrap() + .next() + .unwrap(); + + println!("ADDR: {:?}", addr); + + let mut core = reactor::Core::new().unwrap(); + let handle = core.handle(); + + let tcp = TcpStream::connect(&addr, &handle); + let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap(); + + let tcp = tcp.then(|res| { + let tcp = res.unwrap(); + tls_client_config + .connect_async(dns_name, tcp) + .then(|res| { + let tls = res.unwrap(); + { + let (_, session) = tls.get_ref(); + let negotiated_protocol = session.get_alpn_protocol(); + assert_eq!(Some(ALPN_H2), negotiated_protocol.as_ref().map(|x| &**x)); + } + + println!("Starting client handshake"); + client::handshake(tls) + }) + .then(|res| { + let (mut client, h2) = res.unwrap(); + + let request = Request::builder() + .method(Method::GET) + .uri("https://http2.akamai.com/") + .body(()) + .unwrap(); + + let (response, _) = client.send_request(request, true).unwrap(); + + let stream = response.and_then(|response| { + let (_, body) = response.into_parts(); + + body.for_each(|chunk| { + println!("RX: {:?}", chunk); + Ok(()) + }) + }); + + h2.join(stream) + }) + }); + + core.run(tcp).unwrap(); +} diff --git a/third_party/rust/h2/examples/client.rs b/third_party/rust/h2/examples/client.rs new file mode 100644 index 000000000000..8a0d03ad2de1 --- /dev/null +++ b/third_party/rust/h2/examples/client.rs @@ -0,0 +1,97 @@ +extern crate env_logger; +extern crate futures; +extern crate h2; +extern crate http; +extern crate tokio_core; + +use h2::client; +use h2::RecvStream; + +use futures::*; +use http::*; + +use tokio_core::net::TcpStream; +use tokio_core::reactor; + +struct Process { + body: RecvStream, + trailers: bool, +} + +impl Future for Process { + type Item = (); + type Error = h2::Error; + + fn poll(&mut self) -> Poll<(), h2::Error> { + loop { + if self.trailers { + let trailers = try_ready!(self.body.poll_trailers()); + + println!("GOT TRAILERS: {:?}", trailers); + + return Ok(().into()); + } else { + match try_ready!(self.body.poll()) { + Some(chunk) => { + println!("GOT CHUNK = {:?}", chunk); + }, + None => { + self.trailers = true; + }, + } + } + } + } +} + +pub fn main() { + let _ = env_logger::try_init(); + + let mut core = reactor::Core::new().unwrap(); + let handle = core.handle(); + + let tcp = TcpStream::connect(&"127.0.0.1:5928".parse().unwrap(), &handle); + + let tcp = tcp.then(|res| { + let tcp = res.unwrap(); + client::handshake(tcp) + }).then(|res| { + let (mut client, h2) = res.unwrap(); + + println!("sending request"); + + let request = Request::builder() + .uri("https://http2.akamai.com/") + .body(()) + .unwrap(); + + let mut trailers = HeaderMap::new(); + trailers.insert("zomg", "hello".parse().unwrap()); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + // send trailers + stream.send_trailers(trailers).unwrap(); + + // Spawn a task to run the conn... + handle.spawn(h2.map_err(|e| println!("GOT ERR={:?}", e))); + + response + .and_then(|response| { + println!("GOT RESPONSE: {:?}", response); + + // Get the body + let (_, body) = response.into_parts(); + + Process { + body, + trailers: false, + } + }) + .map_err(|e| { + println!("GOT ERR={:?}", e); + }) + }); + + core.run(tcp).unwrap(); +} diff --git a/third_party/rust/h2/examples/server.rs b/third_party/rust/h2/examples/server.rs new file mode 100644 index 000000000000..6167e5ee031d --- /dev/null +++ b/third_party/rust/h2/examples/server.rs @@ -0,0 +1,72 @@ +extern crate bytes; +extern crate env_logger; +extern crate futures; +extern crate h2; +extern crate http; +extern crate tokio_core; + +use h2::server; + +use bytes::*; +use futures::*; +use http::*; + +use tokio_core::net::TcpListener; +use tokio_core::reactor; + +pub fn main() { + let _ = env_logger::try_init(); + + let mut core = reactor::Core::new().unwrap(); + let handle = core.handle(); + + let listener = TcpListener::bind(&"127.0.0.1:5928".parse().unwrap(), &handle).unwrap(); + + println!("listening on {:?}", listener.local_addr()); + + let server = listener.incoming().for_each(move |(socket, _)| { + // let socket = io_dump::Dump::to_stdout(socket); + + let connection = server::handshake(socket) + .and_then(|conn| { + println!("H2 connection bound"); + + conn.for_each(|(request, mut respond)| { + println!("GOT request: {:?}", request); + + let response = Response::builder().status(StatusCode::OK).body(()).unwrap(); + + let mut send = match respond.send_response(response, false) { + Ok(send) => send, + Err(e) => { + println!(" error respond; err={:?}", e); + return Ok(()); + } + }; + + println!(">>>> sending data"); + if let Err(e) = send.send_data(Bytes::from_static(b"hello world"), true) { + println!(" -> err={:?}", e); + } + + Ok(()) + }) + }) + .and_then(|_| { + println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); + Ok(()) + }) + .then(|res| { + if let Err(e) = res { + println!(" -> err={:?}", e); + } + + Ok(()) + }); + + handle.spawn(connection); + Ok(()) + }); + + core.run(server).unwrap(); +} diff --git a/third_party/rust/h2/src/client.rs b/third_party/rust/h2/src/client.rs new file mode 100644 index 000000000000..a94f26f20ec2 --- /dev/null +++ b/third_party/rust/h2/src/client.rs @@ -0,0 +1,1472 @@ +//! Client implementation of the HTTP/2.0 protocol. +//! +//! # Getting started +//! +//! Running an HTTP/2.0 client requires the caller to establish the underlying +//! connection as well as get the connection to a state that is ready to begin +//! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! details. +//! +//! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote +//! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. +//! +//! Once a connection is obtained, it is passed to [`handshake`], which will +//! begin the [HTTP/2.0 handshake]. This returns a future that completes once +//! the handshake process is performed and HTTP/2.0 streams may be initialized. +//! +//! [`handshake`] uses default configuration values. There are a number of +//! settings that can be changed by using [`Builder`] instead. +//! +//! Once the handshake future completes, the caller is provided with a +//! [`Connection`] instance and a [`SendRequest`] instance. The [`Connection`] +//! instance is used to drive the connection (see [Managing the connection]). +//! The [`SendRequest`] instance is used to initialize new streams (see [Making +//! requests]). +//! +//! # Making requests +//! +//! Requests are made using the [`SendRequest`] handle provided by the handshake +//! future. Once a request is submitted, an HTTP/2.0 stream is initialized and +//! the request is sent to the server. +//! +//! A request body and request trailers are sent using [`SendRequest`] and the +//! server's response is returned once the [`ResponseFuture`] future completes. +//! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by +//! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream +//! initialized by the sent request. +//! +//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0 +//! stream can be created, i.e. as long as the current number of active streams +//! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the +//! caller will be notified once an existing stream closes, freeing capacity for +//! the caller. The caller should use [`SendRequest::poll_ready`] to check for +//! capacity before sending a request to the server. +//! +//! [`SendRequest`] enforces the [`MAX_CONCURRENT_STREAMS`] setting. The user +//! must not send a request if `poll_ready` does not return `Ready`. Attempting +//! to do so will result in an [`Error`] being returned. +//! +//! # Managing the connection +//! +//! The [`Connection`] instance is used to manage connection state. The caller +//! is required to call [`Connection::poll`] in order to advance state. +//! [`SendRequest::send_request`] and other functions have no effect unless +//! [`Connection::poll`] is called. +//! +//! The [`Connection`] instance should only be dropped once [`Connection::poll`] +//! returns `Ready`. At this point, the underlying socket has been closed and no +//! further work needs to be done. +//! +//! The easiest way to ensure that the [`Connection`] instance gets polled is to +//! submit the [`Connection`] instance to an [executor]. The executor will then +//! manage polling the connection until the connection is complete. +//! Alternatively, the caller can call `poll` manually. +//! +//! # Example +//! +//! ```rust +//! extern crate futures; +//! extern crate h2; +//! extern crate http; +//! extern crate tokio_core; +//! +//! use h2::client; +//! +//! use futures::*; +//! # use futures::future::ok; +//! use http::*; +//! +//! use tokio_core::net::TcpStream; +//! use tokio_core::reactor; +//! +//! pub fn main() { +//! let mut core = reactor::Core::new().unwrap(); +//! let handle = core.handle(); +//! +//! let addr = "127.0.0.1:5928".parse().unwrap(); +//! +//! core.run({ +//! # let _ = +//! // Establish TCP connection to the server. +//! TcpStream::connect(&addr, &handle) +//! .map_err(|_| { +//! panic!("failed to establish TCP connection") +//! }) +//! .and_then(|tcp| client::handshake(tcp)) +//! .and_then(|(h2, connection)| { +//! let connection = connection +//! .map_err(|_| panic!("HTTP/2.0 connection failed")); +//! +//! // Spawn a new task to drive the connection state +//! handle.spawn(connection); +//! +//! // Wait until the `SendRequest` handle has available +//! // capacity. +//! h2.ready() +//! }) +//! .and_then(|mut h2| { +//! // Prepare the HTTP request to send to the server. +//! let request = Request::builder() +//! .method(Method::GET) +//! .uri("https://www.example.com/") +//! .body(()) +//! .unwrap(); +//! +//! // Send the request. The second tuple item allows the caller +//! // to stream a request body. +//! let (response, _) = h2.send_request(request, true).unwrap(); +//! +//! response.and_then(|response| { +//! let (head, mut body) = response.into_parts(); +//! +//! println!("Received response: {:?}", head); +//! +//! // The `release_capacity` handle allows the caller to manage +//! // flow control. +//! // +//! // Whenever data is received, the caller is responsible for +//! // releasing capacity back to the server once it has freed +//! // the data from memory. +//! let mut release_capacity = body.release_capacity().clone(); +//! +//! body.for_each(move |chunk| { +//! println!("RX: {:?}", chunk); +//! +//! // Let the server send more data. +//! let _ = release_capacity.release_capacity(chunk.len()); +//! +//! Ok(()) +//! }) +//! }) +//! }) +//! # ; +//! # ok::<_, ()>(()) +//! }).ok().expect("failed to perform HTTP/2.0 request"); +//! } +//! ``` +//! +//! [`TcpStream`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpStream.html +//! [`handshake`]: fn.handshake.html +//! [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html +//! [`SendRequest`]: struct.SendRequest.html +//! [`SendStream`]: ../struct.SendStream.html +//! [Making requests]: #making-requests +//! [Managing the connection]: #managing-the-connection +//! [`Connection`]: struct.Connection.html +//! [`Connection::poll`]: struct.Connection.html#method.poll +//! [`SendRequest::send_request`]: struct.SendRequest.html#method.send_request +//! [`MAX_CONCURRENT_STREAMS`]: http://httpwg.org/specs/rfc7540.html#SettingValues +//! [`SendRequest`]: struct.SendRequest.html +//! [`ResponseFuture`]: struct.ResponseFuture.html +//! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready +//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [`Builder`]: struct.Builder.html +//! [`Error`]: ../struct.Error.html + +use {SendStream, RecvStream, ReleaseCapacity}; +use codec::{Codec, RecvError, SendError, UserError}; +use frame::{Headers, Pseudo, Reason, Settings, StreamId}; +use proto; + +use bytes::{Bytes, IntoBuf}; +use futures::{Async, Future, Poll}; +use http::{uri, Request, Response, Method, Version}; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_io::io::WriteAll; + +use std::fmt; +use std::marker::PhantomData; +use std::time::Duration; +use std::usize; + +/// Performs the HTTP/2.0 connection handshake. +/// +/// This type implements `Future`, yielding a `(SendRequest, Connection)` +/// instance once the handshake has completed. +/// +/// The handshake is completed once both the connection preface and the initial +/// settings frame is sent by the client. +/// +/// The handshake future does not wait for the initial settings frame from the +/// server. +/// +/// See [module] level documentation for more details. +/// +/// [module]: index.html +#[must_use = "futures do nothing unless polled"] +pub struct Handshake { + builder: Builder, + inner: WriteAll, + _marker: PhantomData, +} + +/// Initializes new HTTP/2.0 streams on a connection by sending a request. +/// +/// This type does no work itself. Instead, it is a handle to the inner +/// connection state held by [`Connection`]. If the associated connection +/// instance is dropped, all `SendRequest` functions will return [`Error`]. +/// +/// [`SendRequest`] instances are able to move to and operate on separate tasks +/// / threads than their associated [`Connection`] instance. Internally, there +/// is a buffer used to stage requests before they get written to the +/// connection. There is no guarantee that requests get written to the +/// connection in FIFO order as HTTP/2.0 prioritization logic can play a role. +/// +/// [`SendRequest`] implements [`Clone`], enabling the creation of many +/// instances that are backed by a single connection. +/// +/// See [module] level documentation for more details. +/// +/// [module]: index.html +/// [`Connection`]: struct.Connection.html +/// [`Clone`]: https://doc.rust-lang.org/std/clone/trait.Clone.html +/// [`Error`]: ../struct.Error.html +pub struct SendRequest { + inner: proto::Streams, + pending: Option, +} + +/// Returns a `SendRequest` instance once it is ready to send at least one +/// request. +#[derive(Debug)] +pub struct ReadySendRequest { + inner: Option>, +} + +/// Manages all state associated with an HTTP/2.0 client connection. +/// +/// A `Connection` is backed by an I/O resource (usually a TCP socket) and +/// implements the HTTP/2.0 client logic for that connection. It is responsible +/// for driving the internal state forward, performing the work requested of the +/// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], +/// [`RecvStream`]). +/// +/// `Connection` values are created by calling [`handshake`]. Once a +/// `Connection` value is obtained, the caller must repeatedly call [`poll`] +/// until `Ready` is returned. The easiest way to do this is to submit the +/// `Connection` instance to an [executor]. +/// +/// [module]: index.html +/// [`handshake`]: fn.handshake.html +/// [`SendRequest`]: struct.SendRequest.html +/// [`ResponseFuture`]: struct.ResponseFuture.html +/// [`SendStream`]: ../struct.SendStream.html +/// [`RecvStream`]: ../struct.RecvStream.html +/// [`poll`]: #method.poll +/// [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html +/// +/// # Examples +/// +/// ``` +/// # extern crate bytes; +/// # extern crate futures; +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use futures::{Future, Stream}; +/// # use futures::future::Executor; +/// # use tokio_io::*; +/// # use h2::client; +/// # use h2::client::*; +/// # +/// # fn doc(my_io: T, my_executor: E) +/// # where T: AsyncRead + AsyncWrite + 'static, +/// # E: Executor>>, +/// # { +/// client::handshake(my_io) +/// .and_then(|(send_request, connection)| { +/// // Submit the connection handle to an executor. +/// my_executor.execute( +/// # Box::new( +/// connection.map_err(|_| panic!("connection failed")) +/// # ) +/// ).unwrap(); +/// +/// // Now, use `send_request` to initialize HTTP/2.0 streams. +/// // ... +/// # drop(send_request); +/// # Ok(()) +/// }) +/// # .wait().unwrap(); +/// # } +/// # +/// # pub fn main() {} +/// ``` +#[must_use = "futures do nothing unless polled"] +pub struct Connection { + inner: proto::Connection, +} + +/// A future of an HTTP response. +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: proto::OpaqueStreamRef, +} + +/// Builds client connections with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. +/// +/// The client is constructed by calling [`handshake`] and passing the I/O +/// handle that will back the HTTP/2.0 server. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various client +/// configuration settings. +/// +/// [`Builder::new`]: struct.Builder.html#method.new +/// [`handshake`]: struct.Builder.html#method.handshake +/// +/// # Examples +/// +/// ``` +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use tokio_io::*; +/// # use h2::client::*; +/// # +/// # fn doc(my_io: T) +/// # -> Handshake +/// # { +/// // `client_fut` is a future representing the completion of the HTTP/2.0 +/// // handshake. +/// let client_fut = Builder::new() +/// .initial_window_size(1_000_000) +/// .max_concurrent_streams(1000) +/// .handshake(my_io); +/// # client_fut +/// # } +/// # +/// # pub fn main() {} +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + /// Time to keep locally reset streams around before reaping. + reset_stream_duration: Duration, + + /// Initial maximum number of locally initiated (send) streams. + /// After receiving a Settings frame from the remote peer, + /// the connection will overwrite this value with the + /// MAX_CONCURRENT_STREAMS specified in the frame. + initial_max_send_streams: usize, + + /// Initial target window size for new connections. + initial_target_connection_window_size: Option, + + /// Maximum number of locally reset streams to keep at a time. + reset_stream_max: usize, + + /// Initial `Settings` frame to send as part of the handshake. + settings: Settings, + + /// The stream ID of the first (lowest) stream. Subsequent streams will use + /// monotonically increasing stream IDs. + stream_id: StreamId, +} + +#[derive(Debug)] +pub(crate) struct Peer; + +// ===== impl SendRequest ===== + +impl SendRequest +where + B: IntoBuf, + B::Buf: 'static, +{ + /// Returns `Ready` when the connection can initialize a new HTTP/2.0 + /// stream. + /// + /// This function must return `Ready` before `send_request` is called. When + /// `NotReady` is returned, the task will be notified once the readiness + /// state changes. + /// + /// See [module] level docs for more details. + /// + /// [module]: index.html + pub fn poll_ready(&mut self) -> Poll<(), ::Error> { + try_ready!(self.inner.poll_pending_open(self.pending.as_ref())); + self.pending = None; + Ok(().into()) + } + + /// Consumes `self`, returning a future that returns `self` back once it is + /// ready to send a request. + /// + /// This function should be called before calling `send_request`. + /// + /// This is a functional combinator for [`poll_ready`]. The returned future + /// will call `SendStream::poll_ready` until `Ready`, then returns `self` to + /// the caller. + /// + /// # Examples + /// + /// ```rust + /// # extern crate futures; + /// # extern crate h2; + /// # extern crate http; + /// # use futures::*; + /// # use h2::client::*; + /// # use http::*; + /// # fn doc(send_request: SendRequest<&'static [u8]>) + /// # { + /// // First, wait until the `send_request` handle is ready to send a new + /// // request + /// send_request.ready() + /// .and_then(|mut send_request| { + /// // Use `send_request` here. + /// # Ok(()) + /// }) + /// # .wait().unwrap(); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// See [module] level docs for more details. + /// + /// [module]: index.html + pub fn ready(self) -> ReadySendRequest { + ReadySendRequest { inner: Some(self) } + } + + /// Sends a HTTP/2.0 request to the server. + /// + /// `send_request` initializes a new HTTP/2.0 stream on the associated + /// connection, then sends the given request using this new stream. Only the + /// request head is sent. + /// + /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance + /// are returned. The [`ResponseFuture`] instance is used to get the + /// server's response and the [`SendStream`] instance is used to send a + /// request body or trailers to the server over the same HTTP/2.0 stream. + /// + /// To send a request body or trailers, set `end_of_stream` to `false`. + /// Then, use the returned [`SendStream`] instance to stream request body + /// chunks or send trailers. If `end_of_stream` is **not** set to `false` + /// then attempting to call [`SendStream::send_data`] or + /// [`SendStream::send_trailers`] will result in an error. + /// + /// If no request body or trailers are to be sent, set `end_of_stream` to + /// `true` and drop the returned [`SendStream`] instance. + /// + /// # A note on HTTP versions + /// + /// The provided `Request` will be encoded differently depending on the + /// value of its version field. If the version is set to 2.0, then the + /// request is encoded as per the specification recommends. + /// + /// If the version is set to a lower value, then the request is encoded to + /// preserve the characteristics of HTTP 1.1 and lower. Specifically, host + /// headers are permitted and the `:authority` pseudo header is not + /// included. + /// + /// The caller should always set the request's version field to 2.0 unless + /// specifically transmitting an HTTP 1.1 request over 2.0. + /// + /// # Examples + /// + /// Sending a request with no body + /// + /// ```rust + /// # extern crate futures; + /// # extern crate h2; + /// # extern crate http; + /// # use futures::*; + /// # use h2::client::*; + /// # use http::*; + /// # fn doc(send_request: SendRequest<&'static [u8]>) + /// # { + /// // First, wait until the `send_request` handle is ready to send a new + /// // request + /// send_request.ready() + /// .and_then(|mut send_request| { + /// // Prepare the HTTP request to send to the server. + /// let request = Request::get("https://www.example.com/") + /// .body(()) + /// .unwrap(); + /// + /// // Send the request to the server. Since we are not sending a + /// // body or trailers, we can drop the `SendStream` instance. + /// let (response, _) = send_request + /// .send_request(request, true).unwrap(); + /// + /// response + /// }) + /// .and_then(|response| { + /// // Process the response + /// # Ok(()) + /// }) + /// # .wait().unwrap(); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// Sending a request with a body and trailers + /// + /// ```rust + /// # extern crate futures; + /// # extern crate h2; + /// # extern crate http; + /// # use futures::*; + /// # use h2::client::*; + /// # use http::*; + /// # fn doc(send_request: SendRequest<&'static [u8]>) + /// # { + /// // First, wait until the `send_request` handle is ready to send a new + /// // request + /// send_request.ready() + /// .and_then(|mut send_request| { + /// // Prepare the HTTP request to send to the server. + /// let request = Request::get("https://www.example.com/") + /// .body(()) + /// .unwrap(); + /// + /// // Send the request to the server. Since we are not sending a + /// // body or trailers, we can drop the `SendStream` instance. + /// let (response, mut send_stream) = send_request + /// .send_request(request, false).unwrap(); + /// + /// // At this point, one option would be to wait for send capacity. + /// // Doing so would allow us to not hold data in memory that + /// // cannot be sent. However, this is not a requirement, so this + /// // example will skip that step. See `SendStream` documentation + /// // for more details. + /// send_stream.send_data(b"hello", false).unwrap(); + /// send_stream.send_data(b"world", false).unwrap(); + /// + /// // Send the trailers. + /// let mut trailers = HeaderMap::new(); + /// trailers.insert( + /// header::HeaderName::from_bytes(b"my-trailer").unwrap(), + /// header::HeaderValue::from_bytes(b"hello").unwrap()); + /// + /// send_stream.send_trailers(trailers).unwrap(); + /// + /// response + /// }) + /// .and_then(|response| { + /// // Process the response + /// # Ok(()) + /// }) + /// # .wait().unwrap(); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// [`ResponseFuture`]: struct.ResponseFuture.html + /// [`SendStream`]: ../struct.SendStream.html + /// [`SendStream::send_data`]: ../struct.SendStream.html#method.send_data + /// [`SendStream::send_trailers`]: ../struct.SendStream.html#method.send_trailers + pub fn send_request( + &mut self, + request: Request<()>, + end_of_stream: bool, + ) -> Result<(ResponseFuture, SendStream), ::Error> { + self.inner + .send_request(request, end_of_stream, self.pending.as_ref()) + .map_err(Into::into) + .map(|stream| { + if stream.is_pending_open() { + self.pending = Some(stream.clone_to_opaque()); + } + + let response = ResponseFuture { + inner: stream.clone_to_opaque(), + }; + + let stream = SendStream::new(stream); + + (response, stream) + }) + } +} + +impl fmt::Debug for SendRequest +where + B: IntoBuf, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("SendRequest").finish() + } +} + +impl Clone for SendRequest +where + B: IntoBuf, +{ + fn clone(&self) -> Self { + SendRequest { + inner: self.inner.clone(), + pending: None, + } + } +} + +#[cfg(feature = "unstable")] +impl SendRequest +where + B: IntoBuf, +{ + /// Returns the number of active streams. + /// + /// An active stream is a stream that has not yet transitioned to a closed + /// state. + pub fn num_active_streams(&self) -> usize { + self.inner.num_active_streams() + } + + /// Returns the number of streams that are held in memory. + /// + /// A wired stream is a stream that is either active or is closed but must + /// stay in memory for some reason. For example, there are still outstanding + /// userspace handles pointing to the slot. + pub fn num_wired_streams(&self) -> usize { + self.inner.num_wired_streams() + } +} + +// ===== impl ReadySendRequest ===== + +impl Future for ReadySendRequest +where B: IntoBuf, + B::Buf: 'static, +{ + type Item = SendRequest; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + match self.inner { + Some(ref mut send_request) => { + let _ = try_ready!(send_request.poll_ready()); + } + None => panic!("called `poll` after future completed"), + } + + Ok(self.inner.take().unwrap().into()) + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Returns a new client builder instance initialized with default + /// configuration values. + /// + /// Configuration methods can be chained on the return value. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .initial_window_size(1_000_000) + /// .max_concurrent_streams(1000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn new() -> Builder { + Builder { + reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), + reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + initial_target_connection_window_size: None, + initial_max_send_streams: usize::MAX, + settings: Default::default(), + stream_id: 1.into(), + } + } + + /// Indicates the initial window size (in octets) for stream-level + /// flow control for received data. + /// + /// The initial window of a stream is used as part of flow control. For more + /// details, see [`ReleaseCapacity`]. + /// + /// The default value is 65,535. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .initial_window_size(1_000_000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn initial_window_size(&mut self, size: u32) -> &mut Self { + self.settings.set_initial_window_size(Some(size)); + self + } + + /// Indicates the initial window size (in octets) for connection-level flow control + /// for received data. + /// + /// The initial window of a connection is used as part of flow control. For more details, + /// see [`ReleaseCapacity`]. + /// + /// The default value is 65,535. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .initial_connection_window_size(1_000_000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { + self.initial_target_connection_window_size = Some(size); + self + } + + /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// configured client is able to accept. + /// + /// The sender may send data frames that are **smaller** than this value, + /// but any data larger than `max` will be broken up into multiple `DATA` + /// frames. + /// + /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_frame_size(1_000_000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + /// + /// # Panics + /// + /// This function panics if `max` is not within the legal range specified + /// above. + pub fn max_frame_size(&mut self, max: u32) -> &mut Self { + self.settings.set_max_frame_size(Some(max)); + self + } + + /// Sets the max size of received header frames. + /// + /// This advisory setting informs a peer of the maximum size of header list + /// that the sender is prepared to accept, in octets. The value is based on + /// the uncompressed size of header fields, including the length of the name + /// and value in octets plus an overhead of 32 octets for each header field. + /// + /// This setting is also used to limit the maximum amount of data that is + /// buffered to decode HEADERS frames. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_header_list_size(16 * 1024) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.settings.set_max_header_list_size(Some(max)); + self + } + + /// Sets the maximum number of concurrent streams. + /// + /// The maximum concurrent streams setting only controls the maximum number + /// of streams that can be initiated by the remote peer. In other words, + /// when this setting is set to 100, this does not limit the number of + /// concurrent streams that can be created by the caller. + /// + /// It is recommended that this value be no smaller than 100, so as to not + /// unnecessarily limit parallelism. However, any value is legal, including + /// 0. If `max` is set to 0, then the remote will not be permitted to + /// initiate streams. + /// + /// Note that streams in the reserved state, i.e., push promises that have + /// been reserved but the stream has not started, do not count against this + /// setting. + /// + /// Also note that if the remote *does* exceed the value set here, it is not + /// a protocol level error. Instead, the `h2` library will immediately reset + /// the stream. + /// + /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// + /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_concurrent_streams(1000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { + self.settings.set_max_concurrent_streams(Some(max)); + self + } + + /// Sets the initial maximum of locally initiated (send) streams. + /// + /// The initial settings will be overwritten by the remote peer when + /// the Settings frame is received. The new value will be set to the + /// `max_concurrent_streams()` from the frame. + /// + /// This setting prevents the caller from exceeding this number of + /// streams that are counted towards the concurrency limit. + /// + /// Sending streams past the limit returned by the peer will be treated + /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. + /// + /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// + /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .initial_max_send_streams(1000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn initial_max_send_streams(&mut self, initial: usize) -> &mut Self { + self.initial_max_send_streams = initial; + self + } + + /// Sets the maximum number of concurrent locally reset streams. + /// + /// When a stream is explicitly reset by either calling + /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance + /// before completing the stream, the HTTP/2.0 specification requires that + /// any further frames received for that stream must be ignored for "some + /// time". + /// + /// In order to satisfy the specification, internal state must be maintained + /// to implement the behavior. This state grows linearly with the number of + /// streams that are locally reset. + /// + /// The `max_concurrent_reset_streams` setting configures sets an upper + /// bound on the amount of state that is maintained. When this max value is + /// reached, the oldest reset stream is purged from memory. + /// + /// Once the stream has been fully purged from memory, any additional frames + /// received for that stream will result in a connection level protocol + /// error, forcing the connection to terminate. + /// + /// The default value is 10. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_concurrent_reset_streams(1000) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.reset_stream_max = max; + self + } + + /// Sets the maximum number of concurrent locally reset streams. + /// + /// When a stream is explicitly reset by either calling + /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance + /// before completing the stream, the HTTP/2.0 specification requires that + /// any further frames received for that stream must be ignored for "some + /// time". + /// + /// In order to satisfy the specification, internal state must be maintained + /// to implement the behavior. This state grows linearly with the number of + /// streams that are locally reset. + /// + /// The `reset_stream_duration` setting configures the max amount of time + /// this state will be maintained in memory. Once the duration elapses, the + /// stream state is purged from memory. + /// + /// Once the stream has been fully purged from memory, any additional frames + /// received for that stream will result in a connection level protocol + /// error, forcing the connection to terminate. + /// + /// The default value is 30 seconds. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # use std::time::Duration; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .reset_stream_duration(Duration::from_secs(10)) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { + self.reset_stream_duration = dur; + self + } + + /// Enables or disables server push promises. + /// + /// This value is included in the initial SETTINGS handshake. When set, the + /// server MUST NOT send a push promise. Setting this value to value to + /// false in the initial SETTINGS handshake guarantees that the remote server + /// will never send a push promise. + /// + /// This setting can be changed during the life of a single HTTP/2.0 + /// connection by sending another settings frame updating the value. + /// + /// Default value: `true`. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # use std::time::Duration; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .enable_push(false) + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn enable_push(&mut self, enabled: bool) -> &mut Self { + self.settings.set_enable_push(enabled); + self + } + + /// Sets the first stream ID to something other than 1. + #[cfg(feature = "unstable")] + pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self { + self.stream_id = stream_id.into(); + assert!( + self.stream_id.is_client_initiated(), + "stream id must be odd" + ); + self + } + + /// Creates a new configured HTTP/2.0 client backed by `io`. + /// + /// It is expected that `io` already be in an appropriate state to commence + /// the [HTTP/2.0 handshake]. See [Handshake] for more details. + /// + /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] + /// tuple once the HTTP/2.0 handshake has been completed. + /// + /// This function also allows the caller to configure the send payload data + /// type. See [Outbound data type] for more details. + /// + /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [Handshake]: ../index.html#handshake + /// [`Connection`]: struct.Connection.html + /// [`SendRequest`]: struct.SendRequest.html + /// [Outbound data type]: ../index.html#outbound-data-type. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut = Builder::new() + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + /// + /// Configures the send-payload data type. In this case, the outbound data + /// type will be `&'static [u8]`. + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::client::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let client_fut: Handshake<_, &'static [u8]> = Builder::new() + /// .handshake(my_io); + /// # client_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn handshake(&self, io: T) -> Handshake + where + T: AsyncRead + AsyncWrite, + B: IntoBuf, + B::Buf: 'static, + { + Connection::handshake2(io, self.clone()) + } +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + +/// Creates a new configured HTTP/2.0 client with default configuration +/// values backed by `io`. +/// +/// It is expected that `io` already be in an appropriate state to commence +/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// +/// Returns a future which resolves to the [`Connection`] / [`SendRequest`] +/// tuple once the HTTP/2.0 handshake has been completed. The returned +/// [`Connection`] instance will be using default configuration values. Use +/// [`Builder`] to customize the configuration values used by a [`Connection`] +/// instance. +/// +/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [Handshake]: ../index.html#handshake +/// [`Connection`]: struct.Connection.html +/// [`SendRequest`]: struct.SendRequest.html +/// +/// # Examples +/// +/// ``` +/// # extern crate futures; +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use futures::*; +/// # use tokio_io::*; +/// # use h2::client; +/// # use h2::client::*; +/// # +/// # fn doc(my_io: T) +/// # { +/// client::handshake(my_io) +/// .and_then(|(send_request, connection)| { +/// // The HTTP/2.0 handshake has completed, now start polling +/// // `connection` and use `send_request` to send requests to the +/// // server. +/// # Ok(()) +/// }) +/// # .wait().unwrap(); +/// # } +/// # +/// # pub fn main() {} +/// ``` +pub fn handshake(io: T) -> Handshake +where T: AsyncRead + AsyncWrite, +{ + Builder::new().handshake(io) +} + +// ===== impl Connection ===== + +impl Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + fn handshake2(io: T, builder: Builder) -> Handshake { + use tokio_io::io; + + debug!("binding client connection"); + + let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + let handshake = io::write_all(io, msg); + + Handshake { + builder, + inner: handshake, + _marker: PhantomData, + } + } + + /// Sets the target window size for the whole connection. + /// + /// If `size` is greater than the current value, then a `WINDOW_UPDATE` + /// frame will be immediately sent to the remote, increasing the connection + /// level window by `size - current_value`. + /// + /// If `size` is less than the current value, nothing will happen + /// immediately. However, as window capacity is released by + /// [`ReleaseCapacity`] instances, no `WINDOW_UPDATE` frames will be sent + /// out until the number of "in flight" bytes drops below `size`. + /// + /// The default value is 65,535. + /// + /// See [`ReleaseCapacity`] documentation for more details. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// [library level]: ../index.html#flow-control + pub fn set_target_window_size(&mut self, size: u32) { + assert!(size <= proto::MAX_WINDOW_SIZE); + self.inner.set_target_window_size(size); + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll<(), ::Error> { + self.inner.maybe_close_connection_if_no_streams(); + self.inner.poll().map_err(Into::into) + } +} + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite, + T: fmt::Debug, + B: fmt::Debug + IntoBuf, + B::Buf: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, fmt) + } +} + +// ===== impl Handshake ===== + +impl Future for Handshake +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, + B::Buf: 'static, +{ + type Item = (SendRequest, Connection); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + let res = self.inner.poll() + .map_err(::Error::from); + + let (io, _) = try_ready!(res); + + debug!("client connection bound"); + + // Create the codec + let mut codec = Codec::new(io); + + if let Some(max) = self.builder.settings.max_frame_size() { + codec.set_max_recv_frame_size(max as usize); + } + + if let Some(max) = self.builder.settings.max_header_list_size() { + codec.set_max_recv_header_list_size(max as usize); + } + + // Send initial settings frame + codec + .buffer(self.builder.settings.clone().into()) + .expect("invalid SETTINGS frame"); + + let inner = proto::Connection::new(codec, proto::Config { + next_stream_id: self.builder.stream_id, + initial_max_send_streams: self.builder.initial_max_send_streams, + reset_stream_duration: self.builder.reset_stream_duration, + reset_stream_max: self.builder.reset_stream_max, + settings: self.builder.settings.clone(), + }); + let send_request = SendRequest { + inner: inner.streams().clone(), + pending: None, + }; + + let mut connection = Connection { inner }; + if let Some(sz) = self.builder.initial_target_connection_window_size { + connection.set_target_window_size(sz); + } + + Ok(Async::Ready((send_request, connection))) + } +} + +impl fmt::Debug for Handshake +where + T: AsyncRead + AsyncWrite, + T: fmt::Debug, + B: fmt::Debug + IntoBuf, + B::Buf: fmt::Debug + IntoBuf, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "client::Handshake") + } +} + +// ===== impl ResponseFuture ===== + +impl Future for ResponseFuture { + type Item = Response; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + let (parts, _) = try_ready!(self.inner.poll_response()).into_parts(); + let body = RecvStream::new(ReleaseCapacity::new(self.inner.clone())); + + Ok(Response::from_parts(parts, body).into()) + } +} + +impl ResponseFuture { + /// Returns the stream ID of the response stream. + /// + /// # Panics + /// + /// If the lock on the stream store has been poisoned. + pub fn stream_id(&self) -> ::StreamId { + ::StreamId::from_internal(self.inner.stream_id()) + } +} + +// ===== impl Peer ===== + +impl Peer { + pub fn convert_send_message( + id: StreamId, + request: Request<()>, + end_of_stream: bool) -> Result + { + use http::request::Parts; + + let ( + Parts { + method, + uri, + headers, + version, + .. + }, + _, + ) = request.into_parts(); + + let is_connect = method == Method::CONNECT; + + // Build the set pseudo header set. All requests will include `method` + // and `path`. + let mut pseudo = Pseudo::request(method, uri); + + if pseudo.scheme.is_none() { + // If the scheme is not set, then there are a two options. + // + // 1) Authority is not set. In this case, a request was issued with + // a relative URI. This is permitted **only** when forwarding + // HTTP 1.x requests. If the HTTP version is set to 2.0, then + // this is an error. + // + // 2) Authority is set, then the HTTP method *must* be CONNECT. + // + // It is not possible to have a scheme but not an authority set (the + // `http` crate does not allow it). + // + if pseudo.authority.is_none() { + if version == Version::HTTP_2 { + return Err(UserError::MissingUriSchemeAndAuthority.into()); + } else { + // This is acceptable as per the above comment. However, + // HTTP/2.0 requires that a scheme is set. Since we are + // forwarding an HTTP 1.1 request, the scheme is set to + // "http". + pseudo.set_scheme(uri::Scheme::HTTP); + } + } else if !is_connect { + // TODO: Error + } + } + + // Create the HEADERS frame + let mut frame = Headers::new(id, pseudo, headers); + + if end_of_stream { + frame.set_end_stream() + } + + Ok(frame) + } +} + +impl proto::Peer for Peer { + type Poll = Response<()>; + + fn dyn() -> proto::DynPeer { + proto::DynPeer::Client + } + + fn is_server() -> bool { + false + } + + fn convert_poll_message(headers: Headers) -> Result { + let mut b = Response::builder(); + + let stream_id = headers.stream_id(); + let (pseudo, fields) = headers.into_parts(); + + b.version(Version::HTTP_2); + + if let Some(status) = pseudo.status { + b.status(status); + } + + let mut response = match b.body(()) { + Ok(response) => response, + Err(_) => { + // TODO: Should there be more specialized handling for different + // kinds of errors + return Err(RecvError::Stream { + id: stream_id, + reason: Reason::PROTOCOL_ERROR, + }); + }, + }; + + *response.headers_mut() = fields; + + Ok(response) + } +} diff --git a/third_party/rust/h2/src/codec/error.rs b/third_party/rust/h2/src/codec/error.rs new file mode 100644 index 000000000000..b979ae204ef1 --- /dev/null +++ b/third_party/rust/h2/src/codec/error.rs @@ -0,0 +1,146 @@ +use frame::{Reason, StreamId}; + +use std::{error, fmt, io}; + +/// Errors that are received +#[derive(Debug)] +pub enum RecvError { + Connection(Reason), + Stream { id: StreamId, reason: Reason }, + Io(io::Error), +} + +/// Errors caused by sending a message +#[derive(Debug)] +pub enum SendError { + /// User error + User(UserError), + + /// Connection error prevents sending. + Connection(Reason), + + /// I/O error + Io(io::Error), +} + +/// Errors caused by users of the library +#[derive(Debug)] +pub enum UserError { + /// The stream ID is no longer accepting frames. + InactiveStreamId, + + /// The stream is not currently expecting a frame of this type. + UnexpectedFrameType, + + /// The payload size is too big + PayloadTooBig, + + /// The application attempted to initiate too many streams to remote. + Rejected, + + /// The released capacity is larger than claimed capacity. + ReleaseCapacityTooBig, + + /// The stream ID space is overflowed. + /// + /// A new connection is needed. + OverflowedStreamId, + + /// Illegal headers, such as connection-specific headers. + MalformedHeaders, + + /// Request submitted with relative URI. + MissingUriSchemeAndAuthority, + + /// Calls `SendResponse::poll_reset` after having called `send_response`. + PollResetAfterSendResponse, +} + +// ===== impl RecvError ===== + +impl From for RecvError { + fn from(src: io::Error) -> Self { + RecvError::Io(src) + } +} + +impl error::Error for RecvError { + fn description(&self) -> &str { + use self::RecvError::*; + + match *self { + Connection(ref reason) => reason.description(), + Stream { + ref reason, .. + } => reason.description(), + Io(ref e) => e.description(), + } + } +} + +impl fmt::Display for RecvError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use std::error::Error; + write!(fmt, "{}", self.description()) + } +} + +// ===== impl SendError ===== + +impl error::Error for SendError { + fn description(&self) -> &str { + use self::SendError::*; + + match *self { + User(ref e) => e.description(), + Connection(ref reason) => reason.description(), + Io(ref e) => e.description(), + } + } +} + +impl fmt::Display for SendError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use std::error::Error; + write!(fmt, "{}", self.description()) + } +} + +impl From for SendError { + fn from(src: io::Error) -> Self { + SendError::Io(src) + } +} + +impl From for SendError { + fn from(src: UserError) -> Self { + SendError::User(src) + } +} + +// ===== impl UserError ===== + +impl error::Error for UserError { + fn description(&self) -> &str { + use self::UserError::*; + + match *self { + InactiveStreamId => "inactive stream", + UnexpectedFrameType => "unexpected frame type", + PayloadTooBig => "payload too big", + Rejected => "rejected", + ReleaseCapacityTooBig => "release capacity too big", + OverflowedStreamId => "stream ID overflowed", + MalformedHeaders => "malformed headers", + MissingUriSchemeAndAuthority => "request URI missing scheme and authority", + PollResetAfterSendResponse => "poll_reset after send_response is illegal", + } + } +} + +impl fmt::Display for UserError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use std::error::Error; + write!(fmt, "{}", self.description()) + } +} diff --git a/third_party/rust/h2/src/codec/framed_read.rs b/third_party/rust/h2/src/codec/framed_read.rs new file mode 100644 index 000000000000..2f7bdc021bdd --- /dev/null +++ b/third_party/rust/h2/src/codec/framed_read.rs @@ -0,0 +1,392 @@ +use codec::RecvError; +use frame::{self, Frame, Kind, Reason}; +use frame::{DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE}; + +use hpack; + +use futures::*; + +use bytes::BytesMut; + +use std::io; + +use tokio_io::AsyncRead; +use tokio_io::codec::length_delimited; + +// 16 MB "sane default" taken from golang http2 +const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20; + +#[derive(Debug)] +pub struct FramedRead { + inner: length_delimited::FramedRead, + + // hpack decoder state + hpack: hpack::Decoder, + + max_header_list_size: usize, + + partial: Option, +} + +/// Partially loaded headers frame +#[derive(Debug)] +struct Partial { + /// Empty frame + frame: Continuable, + + /// Partial header payload + buf: BytesMut, +} + +#[derive(Debug)] +enum Continuable { + Headers(frame::Headers), + PushPromise(frame::PushPromise), +} + +impl FramedRead { + pub fn new(inner: length_delimited::FramedRead) -> FramedRead { + FramedRead { + inner: inner, + hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE), + max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, + partial: None, + } + } + + fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { + use self::RecvError::*; + + trace!("decoding frame from {}B", bytes.len()); + + // Parse the head + let head = frame::Head::parse(&bytes); + + if self.partial.is_some() && head.kind() != Kind::Continuation { + trace!("connection error PROTOCOL_ERROR -- expected CONTINUATION, got {:?}", head.kind()); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + let kind = head.kind(); + + trace!(" -> kind={:?}", kind); + + macro_rules! header_block { + ($frame:ident, $head:ident, $bytes:ident) => ({ + // Drop the frame header + // TODO: Change to drain: carllerche/bytes#130 + let _ = $bytes.split_to(frame::HEADER_LEN); + + // Parse the header frame w/o parsing the payload + let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { + Ok(res) => res, + Err(frame::Error::InvalidDependencyId) => { + debug!("stream error PROTOCOL_ERROR -- invalid HEADERS dependency ID"); + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + return Err(Stream { + id: $head.stream_id(), + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(e) => { + debug!("connection error PROTOCOL_ERROR -- failed to load frame; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + }; + + let is_end_headers = frame.is_end_headers(); + + // Load the HPACK encoded headers + match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) { + Ok(_) => {}, + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, + Err(frame::Error::MalformedMessage) => { + + debug!("stream error PROTOCOL_ERROR -- malformed header block"); + return Err(Stream { + id: $head.stream_id(), + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(e) => { + debug!("connection error PROTOCOL_ERROR -- failed HPACK decoding; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + } + + if is_end_headers { + frame.into() + } else { + trace!("loaded partial header block"); + // Defer returning the frame + self.partial = Some(Partial { + frame: Continuable::$frame(frame), + buf: payload, + }); + + return Ok(None); + } + }); + } + + let frame = match kind { + Kind::Settings => { + let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + debug!("connection error PROTOCOL_ERROR -- failed to load SETTINGS frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })?.into() + }, + Kind::Ping => { + let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + debug!("connection error PROTOCOL_ERROR -- failed to load PING frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })?.into() + }, + Kind::WindowUpdate => { + let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + debug!("connection error PROTOCOL_ERROR -- failed to load WINDOW_UPDATE frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })?.into() + }, + Kind::Data => { + let _ = bytes.split_to(frame::HEADER_LEN); + let res = frame::Data::load(head, bytes.freeze()); + + // TODO: Should this always be connection level? Probably not... + res.map_err(|e| { + debug!("connection error PROTOCOL_ERROR -- failed to load DATA frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })?.into() + }, + Kind::Headers => { + header_block!(Headers, head, bytes) + }, + Kind::Reset => { + let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); + res.map_err(|_| Connection(Reason::PROTOCOL_ERROR))?.into() + }, + Kind::GoAway => { + let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); + res.map_err(|_| Connection(Reason::PROTOCOL_ERROR))?.into() + }, + Kind::PushPromise => { + header_block!(PushPromise, head, bytes) + }, + Kind::Priority => { + if head.stream_id() == 0 { + // Invalid stream identifier + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { + Ok(frame) => frame.into(), + Err(frame::Error::InvalidDependencyId) => { + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + debug!("stream error PROTOCOL_ERROR -- PRIORITY invalid dependency ID"); + return Err(Stream { + id: head.stream_id(), + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(_) => return Err(Connection(Reason::PROTOCOL_ERROR)), + } + }, + Kind::Continuation => { + let is_end_headers = (head.flag() & 0x4) == 0x4; + + let mut partial = match self.partial.take() { + Some(partial) => partial, + None => { + debug!("connection error PROTOCOL_ERROR -- received unexpected CONTINUATION frame"); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + }; + + // The stream identifiers must match + if partial.frame.stream_id() != head.stream_id() { + debug!("connection error PROTOCOL_ERROR -- CONTINUATION frame stream ID does not match previous frame stream ID"); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + + + // Extend the buf + if partial.buf.is_empty() { + partial.buf = bytes.split_off(frame::HEADER_LEN); + } else { + if partial.frame.is_over_size() { + // If there was left over bytes previously, they may be + // needed to continue decoding, even though we will + // be ignoring this frame. This is done to keep the HPACK + // decoder state up-to-date. + // + // Still, we need to be careful, because if a malicious + // attacker were to try to send a gigantic string, such + // that it fits over multiple header blocks, we could + // grow memory uncontrollably again, and that'd be a shame. + // + // Instead, we use a simple heuristic to determine if + // we should continue to ignore decoding, or to tell + // the attacker to go away. + if partial.buf.len() + bytes.len() > self.max_header_list_size { + debug!("connection error COMPRESSION_ERROR -- CONTINUATION frame header block size over ignorable limit"); + return Err(Connection(Reason::COMPRESSION_ERROR)); + } + } + partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); + } + + match partial.frame.load_hpack(&mut partial.buf, self.max_header_list_size, &mut self.hpack) { + Ok(_) => {}, + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, + Err(frame::Error::MalformedMessage) => { + debug!("stream error PROTOCOL_ERROR -- malformed CONTINUATION frame"); + return Err(Stream { + id: head.stream_id(), + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(e) => { + debug!("connection error PROTOCOL_ERROR -- failed HPACK decoding; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + }, + } + + if is_end_headers { + partial.frame.into() + } else { + self.partial = Some(partial); + return Ok(None); + } + }, + Kind::Unknown => { + // Unknown frames are ignored + return Ok(None); + }, + }; + + Ok(Some(frame)) + } + + pub fn get_ref(&self) -> &T { + self.inner.get_ref() + } + + pub fn get_mut(&mut self) -> &mut T { + self.inner.get_mut() + } + + /// Returns the current max frame size setting + #[cfg(feature = "unstable")] + #[inline] + pub fn max_frame_size(&self) -> usize { + self.inner.max_frame_length() + } + + /// Updates the max frame size setting. + /// + /// Must be within 16,384 and 16,777,215. + #[inline] + pub fn set_max_frame_size(&mut self, val: usize) { + assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize); + self.inner.set_max_frame_length(val) + } + + /// Update the max header list size setting. + #[inline] + pub fn set_max_header_list_size(&mut self, val: usize) { + self.max_header_list_size = val; + } +} + +impl Stream for FramedRead +where + T: AsyncRead, +{ + type Item = Frame; + type Error = RecvError; + + fn poll(&mut self) -> Poll, Self::Error> { + loop { + trace!("poll"); + let bytes = match try_ready!(self.inner.poll().map_err(map_err)) { + Some(bytes) => bytes, + None => return Ok(Async::Ready(None)), + }; + + trace!("poll; bytes={}B", bytes.len()); + if let Some(frame) = self.decode_frame(bytes)? { + debug!("received; frame={:?}", frame); + return Ok(Async::Ready(Some(frame))); + } + } + } +} + +fn map_err(err: io::Error) -> RecvError { + use tokio_io::codec::length_delimited::FrameTooBig; + + if let io::ErrorKind::InvalidData = err.kind() { + if let Some(custom) = err.get_ref() { + if custom.is::() { + return RecvError::Connection(Reason::FRAME_SIZE_ERROR); + } + } + } + err.into() +} + +// ===== impl Continuable ===== + +impl Continuable { + fn stream_id(&self) -> frame::StreamId { + match *self { + Continuable::Headers(ref h) => h.stream_id(), + Continuable::PushPromise(ref p) => p.stream_id(), + } + } + + fn is_over_size(&self) -> bool { + match *self { + Continuable::Headers(ref h) => h.is_over_size(), + Continuable::PushPromise(ref p) => p.is_over_size(), + } + } + + fn load_hpack( + &mut self, + src: &mut BytesMut, + max_header_list_size: usize, + decoder: &mut hpack::Decoder, + ) -> Result<(), frame::Error> { + match *self { + Continuable::Headers(ref mut h) => h.load_hpack(src, max_header_list_size, decoder), + Continuable::PushPromise(ref mut p) => p.load_hpack(src, max_header_list_size, decoder), + } + } +} + +impl From for Frame { + fn from(cont: Continuable) -> Self { + match cont { + Continuable::Headers(mut headers) => { + headers.set_end_headers(); + headers.into() + } + Continuable::PushPromise(mut push) => { + push.set_end_headers(); + push.into() + } + } + } +} diff --git a/third_party/rust/h2/src/codec/framed_write.rs b/third_party/rust/h2/src/codec/framed_write.rs new file mode 100644 index 000000000000..504e0c585890 --- /dev/null +++ b/third_party/rust/h2/src/codec/framed_write.rs @@ -0,0 +1,284 @@ +use codec::UserError; +use codec::UserError::*; +use frame::{self, Frame, FrameSize}; +use hpack; + +use bytes::{Buf, BufMut, BytesMut}; +use futures::*; +use tokio_io::{AsyncRead, AsyncWrite}; + +use std::io::{self, Cursor}; + +#[derive(Debug)] +pub struct FramedWrite { + /// Upstream `AsyncWrite` + inner: T, + + /// HPACK encoder + hpack: hpack::Encoder, + + /// Write buffer + /// + /// TODO: Should this be a ring buffer? + buf: Cursor, + + /// Next frame to encode + next: Option>, + + /// Last data frame + last_data_frame: Option>, + + /// Max frame size, this is specified by the peer + max_frame_size: FrameSize, +} + +#[derive(Debug)] +enum Next { + Data(frame::Data), + Continuation(frame::Continuation), +} + +/// Initialze the connection with this amount of write buffer. +const DEFAULT_BUFFER_CAPACITY: usize = 4 * 1_024; + +/// Min buffer required to attempt to write a frame +const MIN_BUFFER_CAPACITY: usize = frame::HEADER_LEN + CHAIN_THRESHOLD; + +/// Chain payloads bigger than this. The remote will never advertise a max frame +/// size less than this (well, the spec says the max frame size can't be less +/// than 16kb, so not even close). +const CHAIN_THRESHOLD: usize = 256; + +// TODO: Make generic +impl FramedWrite +where + T: AsyncWrite, + B: Buf, +{ + pub fn new(inner: T) -> FramedWrite { + FramedWrite { + inner: inner, + hpack: hpack::Encoder::default(), + buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), + next: None, + last_data_frame: None, + max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, + } + } + + /// Returns `Ready` when `send` is able to accept a frame + /// + /// Calling this function may result in the current contents of the buffer + /// to be flushed to `T`. + pub fn poll_ready(&mut self) -> Poll<(), io::Error> { + if !self.has_capacity() { + // Try flushing + self.flush()?; + + if !self.has_capacity() { + return Ok(Async::NotReady); + } + } + + Ok(Async::Ready(())) + } + + /// Buffer a frame. + /// + /// `poll_ready` must be called first to ensure that a frame may be + /// accepted. + pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { + // Ensure that we have enough capacity to accept the write. + assert!(self.has_capacity()); + + debug!("send; frame={:?}", item); + + match item { + Frame::Data(mut v) => { + // Ensure that the payload is not greater than the max frame. + let len = v.payload().remaining(); + + if len > self.max_frame_size() { + return Err(PayloadTooBig); + } + + if len >= CHAIN_THRESHOLD { + let head = v.head(); + + // Encode the frame head to the buffer + head.encode(len, self.buf.get_mut()); + + // Save the data frame + self.next = Some(Next::Data(v)); + } else { + v.encode_chunk(self.buf.get_mut()); + + // The chunk has been fully encoded, so there is no need to + // keep it around + assert_eq!(v.payload().remaining(), 0, "chunk not fully encoded"); + + // Save off the last frame... + self.last_data_frame = Some(v); + } + }, + Frame::Headers(v) => { + if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { + self.next = Some(Next::Continuation(continuation)); + } + }, + Frame::PushPromise(v) => { + if let Some(continuation) = v.encode(&mut self.hpack, self.buf.get_mut()) { + self.next = Some(Next::Continuation(continuation)); + } + }, + Frame::Settings(v) => { + v.encode(self.buf.get_mut()); + trace!("encoded settings; rem={:?}", self.buf.remaining()); + }, + Frame::GoAway(v) => { + v.encode(self.buf.get_mut()); + trace!("encoded go_away; rem={:?}", self.buf.remaining()); + }, + Frame::Ping(v) => { + v.encode(self.buf.get_mut()); + trace!("encoded ping; rem={:?}", self.buf.remaining()); + }, + Frame::WindowUpdate(v) => { + v.encode(self.buf.get_mut()); + trace!("encoded window_update; rem={:?}", self.buf.remaining()); + }, + + Frame::Priority(_) => { + /* + v.encode(self.buf.get_mut()); + trace!("encoded priority; rem={:?}", self.buf.remaining()); + */ + unimplemented!(); + }, + Frame::Reset(v) => { + v.encode(self.buf.get_mut()); + trace!("encoded reset; rem={:?}", self.buf.remaining()); + }, + } + + Ok(()) + } + + /// Flush buffered data to the wire + pub fn flush(&mut self) -> Poll<(), io::Error> { + trace!("flush"); + + loop { + while !self.is_empty() { + match self.next { + Some(Next::Data(ref mut frame)) => { + trace!(" -> queued data frame"); + let mut buf = Buf::by_ref(&mut self.buf).chain(frame.payload_mut()); + try_ready!(self.inner.write_buf(&mut buf)); + }, + _ => { + trace!(" -> not a queued data frame"); + try_ready!(self.inner.write_buf(&mut self.buf)); + }, + } + } + + // Clear internal buffer + self.buf.set_position(0); + self.buf.get_mut().clear(); + + // The data frame has been written, so unset it + match self.next.take() { + Some(Next::Data(frame)) => { + self.last_data_frame = Some(frame); + debug_assert!(self.is_empty()); + break; + }, + Some(Next::Continuation(frame)) => { + // Buffer the continuation frame, then try to write again + if let Some(continuation) = frame.encode(&mut self.hpack, self.buf.get_mut()) { + self.next = Some(Next::Continuation(continuation)); + } + }, + None => { + break; + } + } + } + + trace!("flushing buffer"); + // Flush the upstream + try_nb!(self.inner.flush()); + + Ok(Async::Ready(())) + } + + /// Close the codec + pub fn shutdown(&mut self) -> Poll<(), io::Error> { + try_ready!(self.flush()); + self.inner.shutdown().map_err(Into::into) + } + + fn has_capacity(&self) -> bool { + self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY + } + + fn is_empty(&self) -> bool { + match self.next { + Some(Next::Data(ref frame)) => !frame.payload().has_remaining(), + _ => !self.buf.has_remaining(), + } + } +} + +impl FramedWrite { + /// Returns the max frame size that can be sent + pub fn max_frame_size(&self) -> usize { + self.max_frame_size as usize + } + + /// Set the peer's max frame size. + pub fn set_max_frame_size(&mut self, val: usize) { + assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); + self.max_frame_size = val as FrameSize; + } + + /// Retrieve the last data frame that has been sent + pub fn take_last_data_frame(&mut self) -> Option> { + self.last_data_frame.take() + } + + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + +impl io::Read for FramedWrite { + fn read(&mut self, dst: &mut [u8]) -> io::Result { + self.inner.read(dst) + } +} + +impl AsyncRead for FramedWrite { + fn read_buf(&mut self, buf: &mut B2) -> Poll + where + Self: Sized, + { + self.inner.read_buf(buf) + } + + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } +} + +#[cfg(feature = "unstable")] +mod unstable { + use super::*; + + impl FramedWrite { + pub fn get_ref(&self) -> &T { + &self.inner + } + } +} diff --git a/third_party/rust/h2/src/codec/mod.rs b/third_party/rust/h2/src/codec/mod.rs new file mode 100644 index 000000000000..0f8acbf9f798 --- /dev/null +++ b/third_party/rust/h2/src/codec/mod.rs @@ -0,0 +1,198 @@ +mod error; +mod framed_read; +mod framed_write; + +pub use self::error::{RecvError, SendError, UserError}; + +use self::framed_read::FramedRead; +use self::framed_write::FramedWrite; + +use frame::{self, Data, Frame}; + +use futures::*; + +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_io::codec::length_delimited; + +use bytes::Buf; + +use std::io; + +#[derive(Debug)] +pub struct Codec { + inner: FramedRead>, +} + +impl Codec +where + T: AsyncRead + AsyncWrite, + B: Buf, +{ + /// Returns a new `Codec` with the default max frame size + #[inline] + pub fn new(io: T) -> Self { + Self::with_max_recv_frame_size(io, frame::DEFAULT_MAX_FRAME_SIZE as usize) + } + + /// Returns a new `Codec` with the given maximum frame size + pub fn with_max_recv_frame_size(io: T, max_frame_size: usize) -> Self { + // Wrap with writer + let framed_write = FramedWrite::new(io); + + // Delimit the frames + let delimited = length_delimited::Builder::new() + .big_endian() + .length_field_length(3) + .length_adjustment(9) + .num_skip(0) // Don't skip the header + .new_read(framed_write); + + let mut inner = FramedRead::new(delimited); + + // Use FramedRead's method since it checks the value is within range. + inner.set_max_frame_size(max_frame_size); + + Codec { + inner, + } + } +} + +impl Codec { + /// Updates the max received frame size. + /// + /// The change takes effect the next time a frame is decoded. In other + /// words, if a frame is currently in process of being decoded with a frame + /// size greater than `val` but less than the max frame size in effect + /// before calling this function, then the frame will be allowed. + #[inline] + pub fn set_max_recv_frame_size(&mut self, val: usize) { + self.inner.set_max_frame_size(val) + } + + /// Returns the current max received frame size setting. + /// + /// This is the largest size this codec will accept from the wire. Larger + /// frames will be rejected. + #[cfg(feature = "unstable")] + #[inline] + pub fn max_recv_frame_size(&self) -> usize { + self.inner.max_frame_size() + } + + /// Returns the max frame size that can be sent to the peer. + pub fn max_send_frame_size(&self) -> usize { + self.inner.get_ref().max_frame_size() + } + + /// Set the peer's max frame size. + pub fn set_max_send_frame_size(&mut self, val: usize) { + self.framed_write().set_max_frame_size(val) + } + + /// Set the max header list size that can be received. + pub fn set_max_recv_header_list_size(&mut self, val: usize) { + self.inner.set_max_header_list_size(val); + } + + /// Get a reference to the inner stream. + #[cfg(feature = "unstable")] + pub fn get_ref(&self) -> &T { + self.inner.get_ref().get_ref() + } + + /// Get a mutable reference to the inner stream. + pub fn get_mut(&mut self) -> &mut T { + self.inner.get_mut().get_mut() + } + + /// Takes the data payload value that was fully written to the socket + pub(crate) fn take_last_data_frame(&mut self) -> Option> { + self.framed_write().take_last_data_frame() + } + + fn framed_write(&mut self) -> &mut FramedWrite { + self.inner.get_mut() + } +} + +impl Codec +where + T: AsyncWrite, + B: Buf, +{ + /// Returns `Ready` when the codec can buffer a frame + pub fn poll_ready(&mut self) -> Poll<(), io::Error> { + self.framed_write().poll_ready() + } + + /// Buffer a frame. + /// + /// `poll_ready` must be called first to ensure that a frame may be + /// accepted. + /// + /// TODO: Rename this to avoid conflicts with Sink::buffer + pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { + self.framed_write().buffer(item) + } + + /// Flush buffered data to the wire + pub fn flush(&mut self) -> Poll<(), io::Error> { + self.framed_write().flush() + } + + /// Shutdown the send half + pub fn shutdown(&mut self) -> Poll<(), io::Error> { + self.framed_write().shutdown() + } +} + +impl Stream for Codec +where + T: AsyncRead, +{ + type Item = Frame; + type Error = RecvError; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.poll() + } +} + +impl Sink for Codec +where + T: AsyncWrite, + B: Buf, +{ + type SinkItem = Frame; + type SinkError = SendError; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend { + if !self.poll_ready()?.is_ready() { + return Ok(AsyncSink::NotReady(item)); + } + + self.buffer(item)?; + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.flush()?; + Ok(Async::Ready(())) + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.shutdown()?; + Ok(Async::Ready(())) + } +} + +// TODO: remove (or improve) this +impl From for Codec> +where + T: AsyncRead + AsyncWrite, +{ + fn from(src: T) -> Self { + Self::new(src) + } +} diff --git a/third_party/rust/h2/src/error.rs b/third_party/rust/h2/src/error.rs new file mode 100644 index 000000000000..d8a747c9d665 --- /dev/null +++ b/third_party/rust/h2/src/error.rs @@ -0,0 +1,123 @@ +use codec::{SendError, UserError}; +use proto; + +use std::{error, fmt, io}; + +pub use frame::Reason; + +/// Represents HTTP/2.0 operation errors. +/// +/// `Error` covers error cases raised by protocol errors caused by the +/// peer, I/O (transport) errors, and errors caused by the user of the library. +/// +/// If the error was caused by the remote peer, then it will contain a +/// [`Reason`] which can be obtained with the [`reason`] function. +/// +/// [`Reason`]: struct.Reason.html +/// [`reason`]: #method.reason +#[derive(Debug)] +pub struct Error { + kind: Kind, +} + +#[derive(Debug)] +enum Kind { + /// An error caused by an action taken by the remote peer. + /// + /// This is either an error received by the peer or caused by an invalid + /// action taken by the peer (i.e. a protocol error). + Proto(Reason), + + /// An error resulting from an invalid action taken by the user of this + /// library. + User(UserError), + + /// An `io::Error` occurred while trying to read or write. + Io(io::Error), +} + +// ===== impl Error ===== + +impl Error { + /// If the error was caused by the remote peer, the error reason. + /// + /// This is either an error received by the peer or caused by an invalid + /// action taken by the peer (i.e. a protocol error). + pub fn reason(&self) -> Option { + match self.kind { + Kind::Proto(reason) => Some(reason), + _ => None, + } + } +} + +impl From for Error { + fn from(src: proto::Error) -> Error { + use proto::Error::*; + + Error { + kind: match src { + Proto(reason) => Kind::Proto(reason), + Io(e) => Kind::Io(e), + }, + } + } +} + +impl From for Error { + fn from(src: io::Error) -> Error { + Error { + kind: Kind::Io(src), + } + } +} + +impl From for Error { + fn from(src: Reason) -> Error { + Error { + kind: Kind::Proto(src), + } + } +} + +impl From for Error { + fn from(src: SendError) -> Error { + match src { + SendError::User(e) => e.into(), + SendError::Connection(reason) => reason.into(), + SendError::Io(e) => e.into(), + } + } +} + +impl From for Error { + fn from(src: UserError) -> Error { + Error { + kind: Kind::User(src), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::Kind::*; + + match self.kind { + Proto(ref reason) => write!(fmt, "protocol error: {}", reason), + User(ref e) => write!(fmt, "user error: {}", e), + Io(ref e) => fmt::Display::fmt(e, fmt), + } + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + use self::Kind::*; + + match self.kind { + Io(ref e) => error::Error::description(e), + Proto(ref reason) => reason.description(), + User(ref user) => user.description(), + } + } +} diff --git a/third_party/rust/h2/src/frame/data.rs b/third_party/rust/h2/src/frame/data.rs new file mode 100644 index 000000000000..a3da4356e888 --- /dev/null +++ b/third_party/rust/h2/src/frame/data.rs @@ -0,0 +1,215 @@ +use bytes::{Buf, BufMut, Bytes}; +use frame::{util, Error, Frame, Head, Kind, StreamId}; + +use std::fmt; + +/// Data frame +/// +/// Data frames convey arbitrary, variable-length sequences of octets associated +/// with a stream. One or more DATA frames are used, for instance, to carry HTTP +/// request or response payloads. +#[derive(Eq, PartialEq)] +pub struct Data { + stream_id: StreamId, + data: T, + flags: DataFlags, + pad_len: Option, +} + +#[derive(Copy, Clone, Eq, PartialEq)] +struct DataFlags(u8); + +const END_STREAM: u8 = 0x1; +const PADDED: u8 = 0x8; +const ALL: u8 = END_STREAM | PADDED; + +impl Data { + /// Creates a new DATA frame. + pub fn new(stream_id: StreamId, payload: T) -> Self { + assert!(!stream_id.is_zero()); + + Data { + stream_id: stream_id, + data: payload, + flags: DataFlags::default(), + pad_len: None, + } + } + + /// Returns the stream identifer that this frame is associated with. + /// + /// This cannot be a zero stream identifier. + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + /// Gets the value of the `END_STREAM` flag for this frame. + /// + /// If true, this frame is the last that the endpoint will send for the + /// identified stream. + /// + /// Setting this flag causes the stream to enter one of the "half-closed" + /// states or the "closed" state (Section 5.1). + pub fn is_end_stream(&self) -> bool { + self.flags.is_end_stream() + } + + /// Sets the value for the `END_STREAM` flag on this frame. + pub fn set_end_stream(&mut self, val: bool) { + if val { + self.flags.set_end_stream(); + } else { + self.flags.unset_end_stream(); + } + } + + /// Returns a reference to this frame's payload. + /// + /// This does **not** include any padding that might have been originally + /// included. + pub fn payload(&self) -> &T { + &self.data + } + + /// Returns a mutable reference to this frame's payload. + /// + /// This does **not** include any padding that might have been originally + /// included. + pub fn payload_mut(&mut self) -> &mut T { + &mut self.data + } + + /// Consumes `self` and returns the frame's payload. + /// + /// This does **not** include any padding that might have been originally + /// included. + pub fn into_payload(self) -> T { + self.data + } + + pub(crate) fn head(&self) -> Head { + Head::new(Kind::Data, self.flags.into(), self.stream_id) + } + + pub(crate) fn map(self, f: F) -> Data + where + F: FnOnce(T) -> U, + { + Data { + stream_id: self.stream_id, + data: f(self.data), + flags: self.flags, + pad_len: self.pad_len, + } + } +} + +impl Data { + pub(crate) fn load(head: Head, mut payload: Bytes) -> Result { + let flags = DataFlags::load(head.flag()); + + // The stream identifier must not be zero + if head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + + let pad_len = if flags.is_padded() { + let len = util::strip_padding(&mut payload)?; + Some(len) + } else { + None + }; + + Ok(Data { + stream_id: head.stream_id(), + data: payload, + flags: flags, + pad_len: pad_len, + }) + } +} + +impl Data { + /// Encode the data frame into the `dst` buffer. + /// + /// # Panics + /// + /// Panics if `dst` cannot contain the data frame. + pub(crate) fn encode_chunk(&mut self, dst: &mut U) { + let len = self.data.remaining() as usize; + + assert!(dst.remaining_mut() >= len); + + self.head().encode(len, dst); + dst.put(&mut self.data); + } +} + +impl From> for Frame { + fn from(src: Data) -> Self { + Frame::Data(src) + } +} + +impl fmt::Debug for Data { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Data") + .field("stream_id", &self.stream_id) + .field("flags", &self.flags) + .field("pad_len", &self.pad_len) + // `data` purposefully excluded + .finish() + } +} + +// ===== impl DataFlags ===== + +impl DataFlags { + fn load(bits: u8) -> DataFlags { + DataFlags(bits & ALL) + } + + fn is_end_stream(&self) -> bool { + self.0 & END_STREAM == END_STREAM + } + + fn set_end_stream(&mut self) { + self.0 |= END_STREAM + } + + fn unset_end_stream(&mut self) { + self.0 &= !END_STREAM + } + + fn is_padded(&self) -> bool { + self.0 & PADDED == PADDED + } +} + +impl Default for DataFlags { + fn default() -> Self { + DataFlags(0) + } +} + +impl From for u8 { + fn from(src: DataFlags) -> u8 { + src.0 + } +} + +impl fmt::Debug for DataFlags { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut f = fmt.debug_struct("DataFlags"); + + if self.is_end_stream() { + f.field("end_stream", &true); + } + + if self.is_padded() { + f.field("padded", &true); + } + + f.finish() + } +} diff --git a/third_party/rust/h2/src/frame/go_away.rs b/third_party/rust/h2/src/frame/go_away.rs new file mode 100644 index 000000000000..d97f04ff265b --- /dev/null +++ b/third_party/rust/h2/src/frame/go_away.rs @@ -0,0 +1,54 @@ +use frame::{self, Error, Head, Kind, Reason, StreamId}; + +use bytes::{BufMut}; + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct GoAway { + last_stream_id: StreamId, + error_code: Reason, +} + +impl GoAway { + pub fn new(last_stream_id: StreamId, reason: Reason) -> Self { + GoAway { + last_stream_id, + error_code: reason, + } + } + + pub fn last_stream_id(&self) -> StreamId { + self.last_stream_id + } + + pub fn reason(&self) -> Reason { + self.error_code + } + + pub fn load(payload: &[u8]) -> Result { + if payload.len() < 8 { + return Err(Error::BadFrameSize); + } + + let (last_stream_id, _) = StreamId::parse(&payload[..4]); + let error_code = unpack_octets_4!(payload, 4, u32); + + Ok(GoAway { + last_stream_id: last_stream_id, + error_code: error_code.into(), + }) + } + + pub fn encode(&self, dst: &mut B) { + trace!("encoding GO_AWAY; code={:?}", self.error_code); + let head = Head::new(Kind::GoAway, 0, StreamId::zero()); + head.encode(8, dst); + dst.put_u32_be(self.last_stream_id.into()); + dst.put_u32_be(self.error_code.into()); + } +} + +impl From for frame::Frame { + fn from(src: GoAway) -> Self { + frame::Frame::GoAway(src) + } +} diff --git a/third_party/rust/h2/src/frame/head.rs b/third_party/rust/h2/src/frame/head.rs new file mode 100644 index 000000000000..a72c5b31546e --- /dev/null +++ b/third_party/rust/h2/src/frame/head.rs @@ -0,0 +1,94 @@ +use super::StreamId; + +use bytes::{BufMut}; + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct Head { + kind: Kind, + flag: u8, + stream_id: StreamId, +} + +#[repr(u8)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Kind { + Data = 0, + Headers = 1, + Priority = 2, + Reset = 3, + Settings = 4, + PushPromise = 5, + Ping = 6, + GoAway = 7, + WindowUpdate = 8, + Continuation = 9, + Unknown, +} + +// ===== impl Head ===== + +impl Head { + pub fn new(kind: Kind, flag: u8, stream_id: StreamId) -> Head { + Head { + kind: kind, + flag: flag, + stream_id: stream_id, + } + } + + /// Parse an HTTP/2.0 frame header + pub fn parse(header: &[u8]) -> Head { + let (stream_id, _) = StreamId::parse(&header[5..]); + + Head { + kind: Kind::new(header[3]), + flag: header[4], + stream_id, + } + } + + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + pub fn kind(&self) -> Kind { + self.kind + } + + pub fn flag(&self) -> u8 { + self.flag + } + + pub fn encode_len(&self) -> usize { + super::HEADER_LEN + } + + pub fn encode(&self, payload_len: usize, dst: &mut T) { + debug_assert!(self.encode_len() <= dst.remaining_mut()); + + dst.put_uint_be(payload_len as u64, 3); + dst.put_u8(self.kind as u8); + dst.put_u8(self.flag); + dst.put_u32_be(self.stream_id.into()); + } +} + +// ===== impl Kind ===== + +impl Kind { + pub fn new(byte: u8) -> Kind { + match byte { + 0 => Kind::Data, + 1 => Kind::Headers, + 2 => Kind::Priority, + 3 => Kind::Reset, + 4 => Kind::Settings, + 5 => Kind::PushPromise, + 6 => Kind::Ping, + 7 => Kind::GoAway, + 8 => Kind::WindowUpdate, + 9 => Kind::Continuation, + _ => Kind::Unknown, + } + } +} diff --git a/third_party/rust/h2/src/frame/headers.rs b/third_party/rust/h2/src/frame/headers.rs new file mode 100644 index 000000000000..d30374e8db3e --- /dev/null +++ b/third_party/rust/h2/src/frame/headers.rs @@ -0,0 +1,855 @@ +use super::{StreamDependency, StreamId}; +use frame::{Error, Frame, Head, Kind}; +use hpack; + +use http::{uri, HeaderMap, Method, StatusCode, Uri}; +use http::header::{self, HeaderName, HeaderValue}; + +use byteorder::{BigEndian, ByteOrder}; +use bytes::{Bytes, BytesMut}; +use string::String; + +use std::fmt; +use std::io::Cursor; + +/// Header frame +/// +/// This could be either a request or a response. +#[derive(Eq, PartialEq)] +pub struct Headers { + /// The ID of the stream with which this frame is associated. + stream_id: StreamId, + + /// The stream dependency information, if any. + stream_dep: Option, + + /// The header block fragment + header_block: HeaderBlock, + + /// The associated flags + flags: HeadersFlag, +} + +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct HeadersFlag(u8); + +#[derive(Eq, PartialEq)] +pub struct PushPromise { + /// The ID of the stream with which this frame is associated. + stream_id: StreamId, + + /// The ID of the stream being reserved by this PushPromise. + promised_id: StreamId, + + /// The header block fragment + header_block: HeaderBlock, + + /// The associated flags + flags: PushPromiseFlag, +} + +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct PushPromiseFlag(u8); + +#[derive(Debug)] +pub struct Continuation { + /// Stream ID of continuation frame + stream_id: StreamId, + + header_block: EncodingHeaderBlock, +} + +// TODO: These fields shouldn't be `pub` +#[derive(Debug, Default, Eq, PartialEq)] +pub struct Pseudo { + // Request + pub method: Option, + pub scheme: Option>, + pub authority: Option>, + pub path: Option>, + + // Response + pub status: Option, +} + +#[derive(Debug)] +pub struct Iter { + /// Pseudo headers + pseudo: Option, + + /// Header fields + fields: header::IntoIter, +} + +#[derive(Debug, PartialEq, Eq)] +struct HeaderBlock { + /// The decoded header fields + fields: HeaderMap, + + /// Set to true if decoding went over the max header list size. + is_over_size: bool, + + /// Pseudo headers, these are broken out as they must be sent as part of the + /// headers frame. + pseudo: Pseudo, +} + +#[derive(Debug)] +struct EncodingHeaderBlock { + /// Argument to pass to the HPACK encoder to resume encoding + hpack: Option, + + /// remaining headers to encode + headers: Iter, +} + +const END_STREAM: u8 = 0x1; +const END_HEADERS: u8 = 0x4; +const PADDED: u8 = 0x8; +const PRIORITY: u8 = 0x20; +const ALL: u8 = END_STREAM | END_HEADERS | PADDED | PRIORITY; + +// ===== impl Headers ===== + +impl Headers { + /// Create a new HEADERS frame + pub fn new(stream_id: StreamId, pseudo: Pseudo, fields: HeaderMap) -> Self { + Headers { + stream_id: stream_id, + stream_dep: None, + header_block: HeaderBlock { + fields: fields, + is_over_size: false, + pseudo: pseudo, + }, + flags: HeadersFlag::default(), + } + } + + pub fn trailers(stream_id: StreamId, fields: HeaderMap) -> Self { + let mut flags = HeadersFlag::default(); + flags.set_end_stream(); + + Headers { + stream_id, + stream_dep: None, + header_block: HeaderBlock { + fields: fields, + is_over_size: false, + pseudo: Pseudo::default(), + }, + flags: flags, + } + } + + /// Loads the header frame but doesn't actually do HPACK decoding. + /// + /// HPACK decoding is done in the `load_hpack` step. + pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { + let flags = HeadersFlag(head.flag()); + let mut pad = 0; + + trace!("loading headers; flags={:?}", flags); + + // Read the padding length + if flags.is_padded() { + if src.len() < 1 { + return Err(Error::MalformedMessage); + } + pad = src[0] as usize; + + // Drop the padding + let _ = src.split_to(1); + } + + // Read the stream dependency + let stream_dep = if flags.is_priority() { + if src.len() < 5 { + return Err(Error::MalformedMessage); + } + let stream_dep = StreamDependency::load(&src[..5])?; + + if stream_dep.dependency_id() == head.stream_id() { + return Err(Error::InvalidDependencyId); + } + + // Drop the next 5 bytes + let _ = src.split_to(5); + + Some(stream_dep) + } else { + None + }; + + if pad > 0 { + if pad > src.len() { + return Err(Error::TooMuchPadding); + } + + let len = src.len() - pad; + src.truncate(len); + } + + let headers = Headers { + stream_id: head.stream_id(), + stream_dep: stream_dep, + header_block: HeaderBlock { + fields: HeaderMap::new(), + is_over_size: false, + pseudo: Pseudo::default(), + }, + flags: flags, + }; + + Ok((headers, src)) + } + + pub fn load_hpack(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { + self.header_block.load(src, max_header_list_size, decoder) + } + + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + pub fn is_end_headers(&self) -> bool { + self.flags.is_end_headers() + } + + pub fn set_end_headers(&mut self) { + self.flags.set_end_headers(); + } + + pub fn is_end_stream(&self) -> bool { + self.flags.is_end_stream() + } + + pub fn set_end_stream(&mut self) { + self.flags.set_end_stream() + } + + pub fn is_over_size(&self) -> bool { + self.header_block.is_over_size + } + + pub fn into_parts(self) -> (Pseudo, HeaderMap) { + (self.header_block.pseudo, self.header_block.fields) + } + + #[cfg(feature = "unstable")] + pub fn pseudo_mut(&mut self) -> &mut Pseudo { + &mut self.header_block.pseudo + } + + pub fn fields(&self) -> &HeaderMap { + &self.header_block.fields + } + + pub fn into_fields(self) -> HeaderMap { + self.header_block.fields + } + + pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { + // At this point, the `is_end_headers` flag should always be set + debug_assert!(self.flags.is_end_headers()); + + // Get the HEADERS frame head + let head = self.head(); + + self.header_block.into_encoding() + .encode(&head, encoder, dst, |_| { + }) + } + + fn head(&self) -> Head { + Head::new(Kind::Headers, self.flags.into(), self.stream_id) + } +} + +impl From for Frame { + fn from(src: Headers) -> Self { + Frame::Headers(src) + } +} + +impl fmt::Debug for Headers { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Headers") + .field("stream_id", &self.stream_id) + .field("stream_dep", &self.stream_dep) + .field("flags", &self.flags) + // `fields` and `pseudo` purposefully not included + .finish() + } +} + +// ===== impl PushPromise ===== + +impl PushPromise { + /// Loads the push promise frame but doesn't actually do HPACK decoding. + /// + /// HPACK decoding is done in the `load_hpack` step. + pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { + let flags = PushPromiseFlag(head.flag()); + let mut pad = 0; + + // Read the padding length + if flags.is_padded() { + if src.len() < 1 { + return Err(Error::MalformedMessage); + } + + // TODO: Ensure payload is sized correctly + pad = src[0] as usize; + + // Drop the padding + let _ = src.split_to(1); + } + + if src.len() < 5 { + return Err(Error::MalformedMessage); + } + + let (promised_id, _) = StreamId::parse(&src[..4]); + // Drop promised_id bytes + let _ = src.split_to(5); + + if pad > 0 { + if pad > src.len() { + return Err(Error::TooMuchPadding); + } + + let len = src.len() - pad; + src.truncate(len); + } + + let frame = PushPromise { + flags: flags, + header_block: HeaderBlock { + fields: HeaderMap::new(), + is_over_size: false, + pseudo: Pseudo::default(), + }, + promised_id: promised_id, + stream_id: head.stream_id(), + }; + Ok((frame, src)) + } + + pub fn load_hpack(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { + self.header_block.load(src, max_header_list_size, decoder) + } + + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + pub fn promised_id(&self) -> StreamId { + self.promised_id + } + + pub fn is_end_headers(&self) -> bool { + self.flags.is_end_headers() + } + + pub fn set_end_headers(&mut self) { + self.flags.set_end_headers(); + } + + pub fn is_over_size(&self) -> bool { + self.header_block.is_over_size + } + + pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { + use bytes::BufMut; + + // At this point, the `is_end_headers` flag should always be set + debug_assert!(self.flags.is_end_headers()); + + let head = self.head(); + let promised_id = self.promised_id; + + self.header_block.into_encoding() + .encode(&head, encoder, dst, |dst| { + dst.put_u32_be(promised_id.into()); + }) + } + + fn head(&self) -> Head { + Head::new(Kind::PushPromise, self.flags.into(), self.stream_id) + } +} + +#[cfg(feature = "unstable")] +impl PushPromise { + pub fn new( + stream_id: StreamId, + promised_id: StreamId, + pseudo: Pseudo, + fields: HeaderMap, + ) -> Self { + PushPromise { + flags: PushPromiseFlag::default(), + header_block: HeaderBlock { + fields, + is_over_size: false, + pseudo, + }, + promised_id, + stream_id, + } + } + + pub fn into_parts(self) -> (Pseudo, HeaderMap) { + (self.header_block.pseudo, self.header_block.fields) + } + + pub fn fields(&self) -> &HeaderMap { + &self.header_block.fields + } + + pub fn into_fields(self) -> HeaderMap { + self.header_block.fields + } +} + +impl From for Frame { + fn from(src: PushPromise) -> Self { + Frame::PushPromise(src) + } +} + +impl fmt::Debug for PushPromise { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PushPromise") + .field("stream_id", &self.stream_id) + .field("promised_id", &self.promised_id) + .field("flags", &self.flags) + // `fields` and `pseudo` purposefully not included + .finish() + } +} + +// ===== impl Continuation ===== + +impl Continuation { + fn head(&self) -> Head { + Head::new(Kind::Continuation, END_HEADERS, self.stream_id) + } + + pub fn encode(self, encoder: &mut hpack::Encoder, dst: &mut BytesMut) -> Option { + // Get the CONTINUATION frame head + let head = self.head(); + + self.header_block + .encode(&head, encoder, dst, |_| { + }) + } +} + +// ===== impl Pseudo ===== + +impl Pseudo { + pub fn request(method: Method, uri: Uri) -> Self { + let parts = uri::Parts::from(uri); + + let mut path = parts + .path_and_query + .map(|v| v.into()) + .unwrap_or_else(|| Bytes::new()); + + if path.is_empty() && method != Method::OPTIONS { + path = Bytes::from_static(b"/"); + } + + let mut pseudo = Pseudo { + method: Some(method), + scheme: None, + authority: None, + path: Some(to_string(path)), + status: None, + }; + + // If the URI includes a scheme component, add it to the pseudo headers + // + // TODO: Scheme must be set... + if let Some(scheme) = parts.scheme { + pseudo.set_scheme(scheme); + } + + // If the URI includes an authority component, add it to the pseudo + // headers + if let Some(authority) = parts.authority { + pseudo.set_authority(to_string(authority.into())); + } + + pseudo + } + + pub fn response(status: StatusCode) -> Self { + Pseudo { + method: None, + scheme: None, + authority: None, + path: None, + status: Some(status), + } + } + + pub fn set_scheme(&mut self, scheme: uri::Scheme) { + self.scheme = Some(to_string(scheme.into())); + } + + pub fn set_authority(&mut self, authority: String) { + self.authority = Some(authority); + } +} + +fn to_string(src: Bytes) -> String { + unsafe { String::from_utf8_unchecked(src) } +} + +// ===== impl EncodingHeaderBlock ===== + +impl EncodingHeaderBlock { + fn encode(mut self, + head: &Head, + encoder: &mut hpack::Encoder, + dst: &mut BytesMut, + f: F) + -> Option + where F: FnOnce(&mut BytesMut), + { + let head_pos = dst.len(); + + // At this point, we don't know how big the h2 frame will be. + // So, we write the head with length 0, then write the body, and + // finally write the length once we know the size. + head.encode(0, dst); + + let payload_pos = dst.len(); + + f(dst); + + // Now, encode the header payload + let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) { + hpack::Encode::Full => None, + hpack::Encode::Partial(state) => Some(Continuation { + stream_id: head.stream_id(), + header_block: EncodingHeaderBlock { + hpack: Some(state), + headers: self.headers, + }, + }), + }; + + // Compute the header block length + let payload_len = (dst.len() - payload_pos) as u64; + + // Write the frame length + BigEndian::write_uint(&mut dst[head_pos..head_pos + 3], payload_len, 3); + + if continuation.is_some() { + // There will be continuation frames, so the `is_end_headers` flag + // must be unset + debug_assert!(dst[head_pos + 4] & END_HEADERS == END_HEADERS); + + dst[head_pos + 4] -= END_HEADERS; + } + + continuation + } +} + +// ===== impl Iter ===== + +impl Iterator for Iter { + type Item = hpack::Header>; + + fn next(&mut self) -> Option { + use hpack::Header::*; + + if let Some(ref mut pseudo) = self.pseudo { + if let Some(method) = pseudo.method.take() { + return Some(Method(method)); + } + + if let Some(scheme) = pseudo.scheme.take() { + return Some(Scheme(scheme)); + } + + if let Some(authority) = pseudo.authority.take() { + return Some(Authority(authority)); + } + + if let Some(path) = pseudo.path.take() { + return Some(Path(path)); + } + + if let Some(status) = pseudo.status.take() { + return Some(Status(status)); + } + } + + self.pseudo = None; + + self.fields.next().map(|(name, value)| { + Field { + name: name, + value: value, + } + }) + } +} + +// ===== impl HeadersFlag ===== + +impl HeadersFlag { + pub fn empty() -> HeadersFlag { + HeadersFlag(0) + } + + pub fn load(bits: u8) -> HeadersFlag { + HeadersFlag(bits & ALL) + } + + pub fn is_end_stream(&self) -> bool { + self.0 & END_STREAM == END_STREAM + } + + pub fn set_end_stream(&mut self) { + self.0 |= END_STREAM; + } + + pub fn is_end_headers(&self) -> bool { + self.0 & END_HEADERS == END_HEADERS + } + + pub fn set_end_headers(&mut self) { + self.0 |= END_HEADERS; + } + + pub fn is_padded(&self) -> bool { + self.0 & PADDED == PADDED + } + + pub fn is_priority(&self) -> bool { + self.0 & PRIORITY == PRIORITY + } +} + +impl Default for HeadersFlag { + /// Returns a `HeadersFlag` value with `END_HEADERS` set. + fn default() -> Self { + HeadersFlag(END_HEADERS) + } +} + +impl From for u8 { + fn from(src: HeadersFlag) -> u8 { + src.0 + } +} + +impl fmt::Debug for HeadersFlag { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("HeadersFlag") + .field("end_stream", &self.is_end_stream()) + .field("end_headers", &self.is_end_headers()) + .field("padded", &self.is_padded()) + .field("priority", &self.is_priority()) + .finish() + } +} + +// ===== impl PushPromiseFlag ===== + +impl PushPromiseFlag { + pub fn empty() -> PushPromiseFlag { + PushPromiseFlag(0) + } + + pub fn load(bits: u8) -> PushPromiseFlag { + PushPromiseFlag(bits & ALL) + } + + pub fn is_end_headers(&self) -> bool { + self.0 & END_HEADERS == END_HEADERS + } + + pub fn set_end_headers(&mut self) { + self.0 |= END_HEADERS; + } + + pub fn is_padded(&self) -> bool { + self.0 & PADDED == PADDED + } +} + +impl Default for PushPromiseFlag { + /// Returns a `PushPromiseFlag` value with `END_HEADERS` set. + fn default() -> Self { + PushPromiseFlag(END_HEADERS) + } +} + +impl From for u8 { + fn from(src: PushPromiseFlag) -> u8 { + src.0 + } +} + +impl fmt::Debug for PushPromiseFlag { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("PushPromiseFlag") + .field("end_headers", &self.is_end_headers()) + .field("padded", &self.is_padded()) + .finish() + } +} + +// ===== HeaderBlock ===== + + +impl HeaderBlock { + fn load(&mut self, src: &mut BytesMut, max_header_list_size: usize, decoder: &mut hpack::Decoder) -> Result<(), Error> { + let mut reg = !self.fields.is_empty(); + let mut malformed = false; + let mut headers_size = self.calculate_header_list_size(); + + macro_rules! set_pseudo { + ($field:ident, $val:expr) => {{ + if reg { + trace!("load_hpack; header malformed -- pseudo not at head of block"); + malformed = true; + } else if self.pseudo.$field.is_some() { + trace!("load_hpack; header malformed -- repeated pseudo"); + malformed = true; + } else { + let __val = $val; + headers_size += decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len()); + if headers_size < max_header_list_size { + self.pseudo.$field = Some(__val); + } else if !self.is_over_size { + trace!("load_hpack; header list size over max"); + self.is_over_size = true; + } + } + }} + } + + let mut cursor = Cursor::new(src); + + // If the header frame is malformed, we still have to continue decoding + // the headers. A malformed header frame is a stream level error, but + // the hpack state is connection level. In order to maintain correct + // state for other streams, the hpack decoding process must complete. + let res = decoder.decode(&mut cursor, |header| { + use hpack::Header::*; + + match header { + Field { + name, + value, + } => { + // Connection level header fields are not supported and must + // result in a protocol error. + + if name == header::CONNECTION + || name == header::TRANSFER_ENCODING + || name == header::UPGRADE + || name == "keep-alive" + || name == "proxy-connection" + { + trace!("load_hpack; connection level header"); + malformed = true; + } else if name == header::TE && value != "trailers" { + trace!("load_hpack; TE header not set to trailers; val={:?}", value); + malformed = true; + } else { + reg = true; + + headers_size += decoded_header_size(name.as_str().len(), value.len()); + if headers_size < max_header_list_size { + self.fields.append(name, value); + } else if !self.is_over_size { + trace!("load_hpack; header list size over max"); + self.is_over_size = true; + } + } + }, + Authority(v) => set_pseudo!(authority, v), + Method(v) => set_pseudo!(method, v), + Scheme(v) => set_pseudo!(scheme, v), + Path(v) => set_pseudo!(path, v), + Status(v) => set_pseudo!(status, v), + } + }); + + if let Err(e) = res { + trace!("hpack decoding error; err={:?}", e); + return Err(e.into()); + } + + if malformed { + trace!("malformed message"); + return Err(Error::MalformedMessage.into()); + } + + Ok(()) + } + + fn into_encoding(self) -> EncodingHeaderBlock { + EncodingHeaderBlock { + hpack: None, + headers: Iter { + pseudo: Some(self.pseudo), + fields: self.fields.into_iter(), + }, + } + } + + /// Calculates the size of the currently decoded header list. + /// + /// According to http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE + /// + /// > The value is based on the uncompressed size of header fields, + /// > including the length of the name and value in octets plus an + /// > overhead of 32 octets for each header field. + fn calculate_header_list_size(&self) -> usize { + macro_rules! pseudo_size { + ($name:ident) => ({ + self.pseudo + .$name + .as_ref() + .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) + .unwrap_or(0) + }); + } + + pseudo_size!(method) + + pseudo_size!(scheme) + + pseudo_size!(status) + + pseudo_size!(authority) + + pseudo_size!(path) + + self.fields.iter() + .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) + .sum::() + } +} + +fn decoded_header_size(name: usize, value: usize) -> usize { + name + value + 32 +} + +// Stupid hack to make the set_pseudo! macro happy, since all other values +// have a method `as_str` except for `String`. +trait AsStr { + fn as_str(&self) -> &str; +} + +impl AsStr for String { + fn as_str(&self) -> &str { + self + } +} diff --git a/third_party/rust/h2/src/frame/mod.rs b/third_party/rust/h2/src/frame/mod.rs new file mode 100644 index 000000000000..36fcafa0f003 --- /dev/null +++ b/third_party/rust/h2/src/frame/mod.rs @@ -0,0 +1,160 @@ +use hpack; + +use bytes::Bytes; + +use std::fmt; + +/// A helper macro that unpacks a sequence of 4 bytes found in the buffer with +/// the given identifier, starting at the given offset, into the given integer +/// type. Obviously, the integer type should be able to support at least 4 +/// bytes. +/// +/// # Examples +/// +/// ```rust +/// let buf: [u8; 4] = [0, 0, 0, 1]; +/// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); +/// ``` +#[macro_escape] +macro_rules! unpack_octets_4 { + // TODO: Get rid of this macro + ($buf:expr, $offset:expr, $tip:ty) => ( + (($buf[$offset + 0] as $tip) << 24) | + (($buf[$offset + 1] as $tip) << 16) | + (($buf[$offset + 2] as $tip) << 8) | + (($buf[$offset + 3] as $tip) << 0) + ); +} + +mod data; +mod go_away; +mod head; +mod headers; +mod ping; +mod priority; +mod reason; +mod reset; +mod settings; +mod stream_id; +mod util; +mod window_update; + +pub use self::data::Data; +pub use self::go_away::GoAway; +pub use self::head::{Head, Kind}; +pub use self::headers::{Continuation, Headers, Pseudo, PushPromise}; +pub use self::ping::Ping; +pub use self::priority::{Priority, StreamDependency}; +pub use self::reason::Reason; +pub use self::reset::Reset; +pub use self::settings::Settings; +pub use self::stream_id::{StreamId, StreamIdOverflow}; +pub use self::window_update::WindowUpdate; + +// Re-export some constants + +pub use self::settings::{ + DEFAULT_INITIAL_WINDOW_SIZE, + DEFAULT_MAX_FRAME_SIZE, + DEFAULT_SETTINGS_HEADER_TABLE_SIZE, + MAX_INITIAL_WINDOW_SIZE, + MAX_MAX_FRAME_SIZE, +}; + +pub type FrameSize = u32; + +pub const HEADER_LEN: usize = 9; + +#[derive(Eq, PartialEq)] +pub enum Frame { + Data(Data), + Headers(Headers), + Priority(Priority), + PushPromise(PushPromise), + Settings(Settings), + Ping(Ping), + GoAway(GoAway), + WindowUpdate(WindowUpdate), + Reset(Reset), +} + +impl Frame { + pub fn map(self, f: F) -> Frame + where + F: FnOnce(T) -> U, + { + use self::Frame::*; + + match self { + Data(frame) => frame.map(f).into(), + Headers(frame) => frame.into(), + Priority(frame) => frame.into(), + PushPromise(frame) => frame.into(), + Settings(frame) => frame.into(), + Ping(frame) => frame.into(), + GoAway(frame) => frame.into(), + WindowUpdate(frame) => frame.into(), + Reset(frame) => frame.into(), + } + } +} + +impl fmt::Debug for Frame { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::Frame::*; + + match *self { + Data(ref frame) => write!(fmt, "Frame::Data({:?})", frame), + Headers(ref frame) => write!(fmt, "Frame::Headers({:?})", frame), + Priority(ref frame) => write!(fmt, "Frame::Priority({:?})", frame), + PushPromise(ref frame) => write!(fmt, "Frame::PushPromise({:?})", frame), + Settings(ref frame) => write!(fmt, "Frame::Settings({:?})", frame), + Ping(ref frame) => write!(fmt, "Frame::Ping({:?})", frame), + GoAway(ref frame) => write!(fmt, "Frame::GoAway({:?})", frame), + WindowUpdate(ref frame) => write!(fmt, "Frame::WindowUpdate({:?})", frame), + Reset(ref frame) => write!(fmt, "Frame::Reset({:?})", frame), + } + } +} + +/// Errors that can occur during parsing an HTTP/2 frame. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Error { + /// A length value other than 8 was set on a PING message. + BadFrameSize, + + /// The padding length was larger than the frame-header-specified + /// length of the payload. + TooMuchPadding, + + /// An invalid setting value was provided + InvalidSettingValue, + + /// An invalid window update value + InvalidWindowUpdateValue, + + /// The payload length specified by the frame header was not the + /// value necessary for the specific frame type. + InvalidPayloadLength, + + /// Received a payload with an ACK settings frame + InvalidPayloadAckSettings, + + /// An invalid stream identifier was provided. + /// + /// This is returned if a SETTINGS or PING frame is received with a stream + /// identifier other than zero. + InvalidStreamId, + + /// A request or response is malformed. + MalformedMessage, + + /// An invalid stream dependency ID was provided + /// + /// This is returned if a HEADERS or PRIORITY frame is received with an + /// invalid stream identifier. + InvalidDependencyId, + + /// Failed to perform HPACK decoding + Hpack(hpack::DecoderError), +} diff --git a/third_party/rust/h2/src/frame/ping.rs b/third_party/rust/h2/src/frame/ping.rs new file mode 100644 index 000000000000..7f2c1ba45581 --- /dev/null +++ b/third_party/rust/h2/src/frame/ping.rs @@ -0,0 +1,102 @@ +use bytes::{Buf, BufMut, IntoBuf}; +use frame::{Error, Frame, Head, Kind, StreamId}; + +const ACK_FLAG: u8 = 0x1; + +pub type Payload = [u8; 8]; + +#[derive(Debug, Eq, PartialEq)] +pub struct Ping { + ack: bool, + payload: Payload, +} + +// This was just 8 randomly generated bytes. We use something besides just +// zeroes to distinguish this specific PING from any other. +const SHUTDOWN_PAYLOAD: Payload = [0x0b, 0x7b, 0xa2, 0xf0, 0x8b, 0x9b, 0xfe, 0x54]; + +impl Ping { + + #[cfg(feature = "unstable")] + pub const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; + + #[cfg(not(feature = "unstable"))] + pub(crate) const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; + + pub fn new(payload: Payload) -> Ping { + Ping { + ack: false, + payload, + } + } + + pub fn pong(payload: Payload) -> Ping { + Ping { + ack: true, + payload, + } + } + + pub fn is_ack(&self) -> bool { + self.ack + } + + pub fn payload(&self) -> &Payload { + &self.payload + } + + pub fn into_payload(self) -> Payload { + self.payload + } + + /// Builds a `Ping` frame from a raw frame. + pub fn load(head: Head, bytes: &[u8]) -> Result { + debug_assert_eq!(head.kind(), ::frame::Kind::Ping); + + // PING frames are not associated with any individual stream. If a PING + // frame is received with a stream identifier field value other than + // 0x0, the recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if !head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + + // In addition to the frame header, PING frames MUST contain 8 octets of opaque + // data in the payload. + if bytes.len() != 8 { + return Err(Error::BadFrameSize); + } + + let mut payload = [0; 8]; + bytes.into_buf().copy_to_slice(&mut payload); + + // The PING frame defines the following flags: + // + // ACK (0x1): When set, bit 0 indicates that this PING frame is a PING + // response. An endpoint MUST set this flag in PING responses. An + // endpoint MUST NOT respond to PING frames containing this flag. + let ack = head.flag() & ACK_FLAG != 0; + + Ok(Ping { + ack, + payload, + }) + } + + pub fn encode(&self, dst: &mut B) { + let sz = self.payload.len(); + trace!("encoding PING; ack={} len={}", self.ack, sz); + + let flags = if self.ack { ACK_FLAG } else { 0 }; + let head = Head::new(Kind::Ping, flags, StreamId::zero()); + + head.encode(sz, dst); + dst.put_slice(&self.payload); + } +} + +impl From for Frame { + fn from(src: Ping) -> Frame { + Frame::Ping(src) + } +} diff --git a/third_party/rust/h2/src/frame/priority.rs b/third_party/rust/h2/src/frame/priority.rs new file mode 100644 index 000000000000..d0f84c0d9181 --- /dev/null +++ b/third_party/rust/h2/src/frame/priority.rs @@ -0,0 +1,72 @@ +use frame::*; + +#[derive(Debug, Eq, PartialEq)] +pub struct Priority { + stream_id: StreamId, + dependency: StreamDependency, +} + +#[derive(Debug, Eq, PartialEq)] +pub struct StreamDependency { + /// The ID of the stream dependency target + dependency_id: StreamId, + + /// The weight for the stream. The value exposed (and set) here is always in + /// the range [0, 255], instead of [1, 256] (as defined in section 5.3.2.) + /// so that the value fits into a `u8`. + weight: u8, + + /// True if the stream dependency is exclusive. + is_exclusive: bool, +} + +impl Priority { + pub fn load(head: Head, payload: &[u8]) -> Result { + let dependency = StreamDependency::load(payload)?; + + if dependency.dependency_id() == head.stream_id() { + return Err(Error::InvalidDependencyId); + } + + Ok(Priority { + stream_id: head.stream_id(), + dependency: dependency, + }) + } +} + +impl From for Frame { + fn from(src: Priority) -> Self { + Frame::Priority(src) + } +} + +// ===== impl StreamDependency ===== + +impl StreamDependency { + pub fn new(dependency_id: StreamId, weight: u8, is_exclusive: bool) -> Self { + StreamDependency { + dependency_id, + weight, + is_exclusive, + } + } + + pub fn load(src: &[u8]) -> Result { + if src.len() != 5 { + return Err(Error::InvalidPayloadLength); + } + + // Parse the stream ID and exclusive flag + let (dependency_id, is_exclusive) = StreamId::parse(&src[..4]); + + // Read the weight + let weight = src[4]; + + Ok(StreamDependency::new(dependency_id, weight, is_exclusive)) + } + + pub fn dependency_id(&self) -> StreamId { + self.dependency_id + } +} diff --git a/third_party/rust/h2/src/frame/reason.rs b/third_party/rust/h2/src/frame/reason.rs new file mode 100644 index 000000000000..a33b98849ef3 --- /dev/null +++ b/third_party/rust/h2/src/frame/reason.rs @@ -0,0 +1,137 @@ +use std::fmt; + + +/// HTTP/2.0 error codes. +/// +/// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the +/// reasons for the stream or connection error. For example, +/// [`SendStream::send_reset`] takes a `Reason` argument. Also, the `Error` type +/// may contain a `Reason`. +/// +/// Error codes share a common code space. Some error codes apply only to +/// streams, others apply only to connections, and others may apply to either. +/// See [RFC 7540] for more information. +/// +/// See [Error Codes in the spec][spec]. +/// +/// [spec]: http://httpwg.org/specs/rfc7540.html#ErrorCodes +/// [`SendStream::send_reset`]: struct.SendStream.html#method.send_reset +#[derive(PartialEq, Eq, Clone, Copy)] +pub struct Reason(u32); + +impl Reason { + /// The associated condition is not a result of an error. + /// + /// For example, a GOAWAY might include this code to indicate graceful + /// shutdown of a connection. + pub const NO_ERROR: Reason = Reason(0); + /// The endpoint detected an unspecific protocol error. + /// + /// This error is for use when a more specific error code is not available. + pub const PROTOCOL_ERROR: Reason = Reason(1); + /// The endpoint encountered an unexpected internal error. + pub const INTERNAL_ERROR: Reason = Reason(2); + /// The endpoint detected that its peer violated the flow-control protocol. + pub const FLOW_CONTROL_ERROR: Reason = Reason(3); + /// The endpoint sent a SETTINGS frame but did not receive a response in + /// a timely manner. + pub const SETTINGS_TIMEOUT: Reason = Reason(4); + /// The endpoint received a frame after a stream was half-closed. + pub const STREAM_CLOSED: Reason = Reason(5); + /// The endpoint received a frame with an invalid size. + pub const FRAME_SIZE_ERROR: Reason = Reason(6); + /// The endpoint refused the stream prior to performing any application + /// processing. + pub const REFUSED_STREAM: Reason = Reason(7); + /// Used by the endpoint to indicate that the stream is no longer needed. + pub const CANCEL: Reason = Reason(8); + /// The endpoint is unable to maintain the header compression context for + /// the connection. + pub const COMPRESSION_ERROR: Reason = Reason(9); + /// The connection established in response to a CONNECT request was reset + /// or abnormally closed. + pub const CONNECT_ERROR: Reason = Reason(10); + /// The endpoint detected that its peer is exhibiting a behavior that might + /// be generating excessive load. + pub const ENHANCE_YOUR_CALM: Reason = Reason(11); + /// The underlying transport has properties that do not meet minimum + /// security requirements. + pub const INADEQUATE_SECURITY: Reason = Reason(12); + /// The endpoint requires that HTTP/1.1 be used instead of HTTP/2. + pub const HTTP_1_1_REQUIRED: Reason = Reason(13); + + /// Get a string description of the error code. + pub fn description(&self) -> &str { + match self.0 { + 0 => "not a result of an error", + 1 => "unspecific protocol error detected", + 2 => "unexpected internal error encountered", + 3 => "flow-control protocol violated", + 4 => "settings ACK not received in timely manner", + 5 => "received frame when stream half-closed", + 6 => "frame with invalid size", + 7 => "refused stream before processing any application logic", + 8 => "stream no longer needed", + 9 => "unable to maintain the header compression context", + 10 => { + "connection established in response to a CONNECT request was reset or abnormally \ + closed" + }, + 11 => "detected excessive load generating behavior", + 12 => "security properties do not meet minimum requirements", + 13 => "endpoint requires HTTP/1.1", + _ => "unknown reason", + } + } +} + +impl From for Reason { + fn from(src: u32) -> Reason { + Reason(src) + } +} + +impl From for u32 { + fn from(src: Reason) -> u32 { + src.0 + } +} + +impl fmt::Debug for Reason { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let name = match self.0 { + 0 => "NO_ERROR", + 1 => "PROTOCOL_ERROR", + 2 => "INTERNAL_ERROR", + 3 => "FLOW_CONTROL_ERROR", + 4 => "SETTINGS_TIMEOUT", + 5 => "STREAM_CLOSED", + 6 => "FRAME_SIZE_ERROR", + 7 => "REFUSED_STREAM", + 8 => "CANCEL", + 9 => "COMPRESSION_ERROR", + 10 => "CONNECT_ERROR", + 11 => "ENHANCE_YOUR_CALM", + 12 => "INADEQUATE_SECURITY", + 13 => "HTTP_1_1_REQUIRED", + other => return f.debug_tuple("Reason") + .field(&Hex(other)) + .finish(), + }; + f.write_str(name) + } +} + +struct Hex(u32); + +impl fmt::Debug for Hex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::LowerHex::fmt(&self.0, f) + } +} + +impl fmt::Display for Reason { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}", self.description()) + } +} diff --git a/third_party/rust/h2/src/frame/reset.rs b/third_party/rust/h2/src/frame/reset.rs new file mode 100644 index 000000000000..c5e6ae28ea65 --- /dev/null +++ b/third_party/rust/h2/src/frame/reset.rs @@ -0,0 +1,56 @@ +use frame::{self, Error, Head, Kind, Reason, StreamId}; + +use bytes::{BufMut}; + +#[derive(Debug, Eq, PartialEq)] +pub struct Reset { + stream_id: StreamId, + error_code: Reason, +} + +impl Reset { + pub fn new(stream_id: StreamId, error: Reason) -> Reset { + Reset { + stream_id, + error_code: error, + } + } + + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + pub fn reason(&self) -> Reason { + self.error_code + } + + pub fn load(head: Head, payload: &[u8]) -> Result { + if payload.len() != 4 { + return Err(Error::InvalidPayloadLength); + } + + let error_code = unpack_octets_4!(payload, 0, u32); + + Ok(Reset { + stream_id: head.stream_id(), + error_code: error_code.into(), + }) + } + + pub fn encode(&self, dst: &mut B) { + trace!( + "encoding RESET; id={:?} code={:?}", + self.stream_id, + self.error_code + ); + let head = Head::new(Kind::Reset, 0, self.stream_id); + head.encode(4, dst); + dst.put_u32_be(self.error_code.into()); + } +} + +impl From for frame::Frame { + fn from(src: Reset) -> Self { + frame::Frame::Reset(src) + } +} diff --git a/third_party/rust/h2/src/frame/settings.rs b/third_party/rust/h2/src/frame/settings.rs new file mode 100644 index 000000000000..c91a7198fcd6 --- /dev/null +++ b/third_party/rust/h2/src/frame/settings.rs @@ -0,0 +1,312 @@ +use bytes::{BufMut, BytesMut}; +use frame::{Error, Frame, FrameSize, Head, Kind, StreamId}; + +#[derive(Debug, Clone, Default, Eq, PartialEq)] +pub struct Settings { + flags: SettingsFlags, + // Fields + header_table_size: Option, + enable_push: Option, + max_concurrent_streams: Option, + initial_window_size: Option, + max_frame_size: Option, + max_header_list_size: Option, +} + +/// An enum that lists all valid settings that can be sent in a SETTINGS +/// frame. +/// +/// Each setting has a value that is a 32 bit unsigned integer (6.5.1.). +#[derive(Debug)] +pub enum Setting { + HeaderTableSize(u32), + EnablePush(u32), + MaxConcurrentStreams(u32), + InitialWindowSize(u32), + MaxFrameSize(u32), + MaxHeaderListSize(u32), +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)] +pub struct SettingsFlags(u8); + +const ACK: u8 = 0x1; +const ALL: u8 = ACK; + +/// The default value of SETTINGS_HEADER_TABLE_SIZE +pub const DEFAULT_SETTINGS_HEADER_TABLE_SIZE: usize = 4_096; + +/// The default value of SETTINGS_INITIAL_WINDOW_SIZE +pub const DEFAULT_INITIAL_WINDOW_SIZE: u32 = 65_535; + +/// The default value of MAX_FRAME_SIZE +pub const DEFAULT_MAX_FRAME_SIZE: FrameSize = 16_384; + +/// INITIAL_WINDOW_SIZE upper bound +pub const MAX_INITIAL_WINDOW_SIZE: usize = (1 << 31) - 1; + +/// MAX_FRAME_SIZE upper bound +pub const MAX_MAX_FRAME_SIZE: FrameSize = (1 << 24) - 1; + +// ===== impl Settings ===== + +impl Settings { + pub fn ack() -> Settings { + Settings { + flags: SettingsFlags::ack(), + ..Settings::default() + } + } + + pub fn is_ack(&self) -> bool { + self.flags.is_ack() + } + + pub fn initial_window_size(&self) -> Option { + self.initial_window_size + } + + pub fn set_initial_window_size(&mut self, size: Option) { + self.initial_window_size = size; + } + + pub fn max_concurrent_streams(&self) -> Option { + self.max_concurrent_streams + } + + pub fn set_max_concurrent_streams(&mut self, max: Option) { + self.max_concurrent_streams = max; + } + + pub fn max_frame_size(&self) -> Option { + self.max_frame_size + } + + pub fn set_max_frame_size(&mut self, size: Option) { + if let Some(val) = size { + assert!(DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE); + } + self.max_frame_size = size; + } + + pub fn max_header_list_size(&self) -> Option { + self.max_header_list_size + } + + pub fn set_max_header_list_size(&mut self, size: Option) { + self.max_header_list_size = size; + } + + pub fn is_push_enabled(&self) -> bool { + self.enable_push.unwrap_or(1) != 0 + } + + pub fn set_enable_push(&mut self, enable: bool) { + self.enable_push = Some(enable as u32); + } + + pub fn load(head: Head, payload: &[u8]) -> Result { + use self::Setting::*; + + debug_assert_eq!(head.kind(), ::frame::Kind::Settings); + + if !head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + + // Load the flag + let flag = SettingsFlags::load(head.flag()); + + if flag.is_ack() { + // Ensure that the payload is empty + if payload.len() > 0 { + return Err(Error::InvalidPayloadLength); + } + + // Return the ACK frame + return Ok(Settings::ack()); + } + + // Ensure the payload length is correct, each setting is 6 bytes long. + if payload.len() % 6 != 0 { + debug!("invalid settings payload length; len={:?}", payload.len()); + return Err(Error::InvalidPayloadAckSettings); + } + + let mut settings = Settings::default(); + debug_assert!(!settings.flags.is_ack()); + + for raw in payload.chunks(6) { + match Setting::load(raw) { + Some(HeaderTableSize(val)) => { + settings.header_table_size = Some(val); + }, + Some(EnablePush(val)) => match val { + 0 | 1 => { + settings.enable_push = Some(val); + }, + _ => { + return Err(Error::InvalidSettingValue); + }, + }, + Some(MaxConcurrentStreams(val)) => { + settings.max_concurrent_streams = Some(val); + }, + Some(InitialWindowSize(val)) => if val as usize > MAX_INITIAL_WINDOW_SIZE { + return Err(Error::InvalidSettingValue); + } else { + settings.initial_window_size = Some(val); + }, + Some(MaxFrameSize(val)) => { + if val < DEFAULT_MAX_FRAME_SIZE || val > MAX_MAX_FRAME_SIZE { + return Err(Error::InvalidSettingValue); + } else { + settings.max_frame_size = Some(val); + } + }, + Some(MaxHeaderListSize(val)) => { + settings.max_header_list_size = Some(val); + }, + None => {}, + } + } + + Ok(settings) + } + + fn payload_len(&self) -> usize { + let mut len = 0; + self.for_each(|_| len += 6); + len + } + + pub fn encode(&self, dst: &mut BytesMut) { + // Create & encode an appropriate frame head + let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); + let payload_len = self.payload_len(); + + trace!("encoding SETTINGS; len={}", payload_len); + + head.encode(payload_len, dst); + + // Encode the settings + self.for_each(|setting| { + trace!("encoding setting; val={:?}", setting); + setting.encode(dst) + }); + } + + fn for_each(&self, mut f: F) { + use self::Setting::*; + + if let Some(v) = self.header_table_size { + f(HeaderTableSize(v)); + } + + if let Some(v) = self.enable_push { + f(EnablePush(v)); + } + + if let Some(v) = self.max_concurrent_streams { + f(MaxConcurrentStreams(v)); + } + + if let Some(v) = self.initial_window_size { + f(InitialWindowSize(v)); + } + + if let Some(v) = self.max_frame_size { + f(MaxFrameSize(v)); + } + + if let Some(v) = self.max_header_list_size { + f(MaxHeaderListSize(v)); + } + } +} + +impl From for Frame { + fn from(src: Settings) -> Frame { + Frame::Settings(src) + } +} + +// ===== impl Setting ===== + +impl Setting { + /// Creates a new `Setting` with the correct variant corresponding to the + /// given setting id, based on the settings IDs defined in section + /// 6.5.2. + pub fn from_id(id: u16, val: u32) -> Option { + use self::Setting::*; + + match id { + 1 => Some(HeaderTableSize(val)), + 2 => Some(EnablePush(val)), + 3 => Some(MaxConcurrentStreams(val)), + 4 => Some(InitialWindowSize(val)), + 5 => Some(MaxFrameSize(val)), + 6 => Some(MaxHeaderListSize(val)), + _ => None, + } + } + + /// Creates a new `Setting` by parsing the given buffer of 6 bytes, which + /// contains the raw byte representation of the setting, according to the + /// "SETTINGS format" defined in section 6.5.1. + /// + /// The `raw` parameter should have length at least 6 bytes, since the + /// length of the raw setting is exactly 6 bytes. + /// + /// # Panics + /// + /// If given a buffer shorter than 6 bytes, the function will panic. + fn load(raw: &[u8]) -> Option { + let id: u16 = ((raw[0] as u16) << 8) | (raw[1] as u16); + let val: u32 = unpack_octets_4!(raw, 2, u32); + + Setting::from_id(id, val) + } + + fn encode(&self, dst: &mut BytesMut) { + use self::Setting::*; + + let (kind, val) = match *self { + HeaderTableSize(v) => (1, v), + EnablePush(v) => (2, v), + MaxConcurrentStreams(v) => (3, v), + InitialWindowSize(v) => (4, v), + MaxFrameSize(v) => (5, v), + MaxHeaderListSize(v) => (6, v), + }; + + dst.put_u16_be(kind); + dst.put_u32_be(val); + } +} + +// ===== impl SettingsFlags ===== + +impl SettingsFlags { + pub fn empty() -> SettingsFlags { + SettingsFlags(0) + } + + pub fn load(bits: u8) -> SettingsFlags { + SettingsFlags(bits & ALL) + } + + pub fn ack() -> SettingsFlags { + SettingsFlags(ACK) + } + + pub fn is_ack(&self) -> bool { + self.0 & ACK == ACK + } +} + +impl From for u8 { + fn from(src: SettingsFlags) -> u8 { + src.0 + } +} diff --git a/third_party/rust/h2/src/frame/stream_id.rs b/third_party/rust/h2/src/frame/stream_id.rs new file mode 100644 index 000000000000..039936fc7145 --- /dev/null +++ b/third_party/rust/h2/src/frame/stream_id.rs @@ -0,0 +1,95 @@ +use byteorder::{BigEndian, ByteOrder}; +use std::u32; + +/// A stream identifier, as described in [Section 5.1.1] of RFC 7540. +/// +/// Streams are identified with an unsigned 31-bit integer. Streams +/// initiated by a client MUST use odd-numbered stream identifiers; those +/// initiated by the server MUST use even-numbered stream identifiers. A +/// stream identifier of zero (0x0) is used for connection control +/// messages; the stream identifier of zero cannot be used to establish a +/// new stream. +/// +/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct StreamId(u32); + +#[derive(Debug, Copy, Clone)] +pub struct StreamIdOverflow; + +const STREAM_ID_MASK: u32 = 1 << 31; + +impl StreamId { + /// Stream ID 0. + pub const ZERO: StreamId = StreamId(0); + + /// The maximum allowed stream ID. + pub const MAX: StreamId = StreamId(u32::MAX >> 1); + + /// Parse the stream ID + #[inline] + pub fn parse(buf: &[u8]) -> (StreamId, bool) { + let unpacked = BigEndian::read_u32(buf); + let flag = unpacked & STREAM_ID_MASK == STREAM_ID_MASK; + + // Now clear the most significant bit, as that is reserved and MUST be + // ignored when received. + (StreamId(unpacked & !STREAM_ID_MASK), flag) + } + + /// Returns true if this stream ID corresponds to a stream that + /// was initiated by the client. + pub fn is_client_initiated(&self) -> bool { + let id = self.0; + id != 0 && id % 2 == 1 + } + + /// Returns true if this stream ID corresponds to a stream that + /// was initiated by the server. + pub fn is_server_initiated(&self) -> bool { + let id = self.0; + id != 0 && id % 2 == 0 + } + + /// Return a new `StreamId` for stream 0. + #[inline] + pub fn zero() -> StreamId { + StreamId::ZERO + } + + /// Returns true if this stream ID is zero. + pub fn is_zero(&self) -> bool { + self.0 == 0 + } + + /// Returns the next stream ID initiated by the same peer as this stream + /// ID, or an error if incrementing this stream ID would overflow the + /// maximum. + pub fn next_id(&self) -> Result { + let next = self.0 + 2; + if next > StreamId::MAX.0 { + Err(StreamIdOverflow) + } else { + Ok(StreamId(next)) + } + } +} + +impl From for StreamId { + fn from(src: u32) -> Self { + assert_eq!(src & STREAM_ID_MASK, 0, "invalid stream ID -- MSB is set"); + StreamId(src) + } +} + +impl From for u32 { + fn from(src: StreamId) -> Self { + src.0 + } +} + +impl PartialEq for StreamId { + fn eq(&self, other: &u32) -> bool { + self.0 == *other + } +} diff --git a/third_party/rust/h2/src/frame/util.rs b/third_party/rust/h2/src/frame/util.rs new file mode 100644 index 000000000000..decc0306e708 --- /dev/null +++ b/third_party/rust/h2/src/frame/util.rs @@ -0,0 +1,36 @@ +use super::Error; +use bytes::Bytes; + +/// Strip padding from the given payload. +/// +/// It is assumed that the frame had the padded flag set. This means that the +/// first byte is the length of the padding with that many +/// 0 bytes expected to follow the actual payload. +/// +/// # Returns +/// +/// A slice of the given payload where the actual one is found and the length +/// of the padding. +/// +/// If the padded payload is invalid (e.g. the length of the padding is equal +/// to the total length), returns `None`. +pub fn strip_padding(payload: &mut Bytes) -> Result { + if payload.len() == 0 { + // If this is the case, the frame is invalid as no padding length can be + // extracted, even though the frame should be padded. + return Err(Error::TooMuchPadding); + } + + let pad_len = payload[0] as usize; + + if pad_len >= payload.len() { + // This is invalid: the padding length MUST be less than the + // total frame size. + return Err(Error::TooMuchPadding); + } + + let _ = payload.split_to(1); + let _ = payload.split_off(pad_len); + + Ok(pad_len as u8) +} diff --git a/third_party/rust/h2/src/frame/window_update.rs b/third_party/rust/h2/src/frame/window_update.rs new file mode 100644 index 000000000000..687e079c4bba --- /dev/null +++ b/third_party/rust/h2/src/frame/window_update.rs @@ -0,0 +1,62 @@ +use frame::{self, Error, Head, Kind, StreamId}; + +use bytes::{BufMut}; + +const SIZE_INCREMENT_MASK: u32 = 1 << 31; + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct WindowUpdate { + stream_id: StreamId, + size_increment: u32, +} + +impl WindowUpdate { + pub fn new(stream_id: StreamId, size_increment: u32) -> WindowUpdate { + WindowUpdate { + stream_id, + size_increment, + } + } + + pub fn stream_id(&self) -> StreamId { + self.stream_id + } + + pub fn size_increment(&self) -> u32 { + self.size_increment + } + + /// Builds a `WindowUpdate` frame from a raw frame. + pub fn load(head: Head, payload: &[u8]) -> Result { + debug_assert_eq!(head.kind(), ::frame::Kind::WindowUpdate); + if payload.len() != 4 { + return Err(Error::BadFrameSize); + } + + // Clear the most significant bit, as that is reserved and MUST be ignored + // when received. + let size_increment = unpack_octets_4!(payload, 0, u32) & !SIZE_INCREMENT_MASK; + + if size_increment == 0 { + return Err(Error::InvalidWindowUpdateValue.into()); + } + + Ok(WindowUpdate { + stream_id: head.stream_id(), + size_increment, + }) + } + + pub fn encode(&self, dst: &mut B) { + trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); + let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); + head.encode(4, dst); + dst.put_u32_be(self.size_increment); + } +} + +impl From for frame::Frame { + fn from(src: WindowUpdate) -> Self { + frame::Frame::WindowUpdate(src) + } +} diff --git a/third_party/rust/h2/src/hpack/decoder.rs b/third_party/rust/h2/src/hpack/decoder.rs new file mode 100644 index 000000000000..58ec7c709247 --- /dev/null +++ b/third_party/rust/h2/src/hpack/decoder.rs @@ -0,0 +1,852 @@ +use super::{huffman, Header}; +use frame; + +use bytes::{Buf, Bytes, BytesMut}; +use http::header; +use http::method::{self, Method}; +use http::status::{self, StatusCode}; +use string::String; + +use std::cmp; +use std::collections::VecDeque; +use std::io::Cursor; +use std::str::Utf8Error; + +/// Decodes headers using HPACK +#[derive(Debug)] +pub struct Decoder { + // Protocol indicated that the max table size will update + max_size_update: Option, + last_max_update: usize, + table: Table, + buffer: BytesMut, +} + +/// Represents all errors that can be encountered while performing the decoding +/// of an HPACK header set. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum DecoderError { + InvalidRepresentation, + InvalidIntegerPrefix, + InvalidTableIndex, + InvalidHuffmanCode, + InvalidUtf8, + InvalidStatusCode, + InvalidPseudoheader, + InvalidMaxDynamicSize, + IntegerOverflow, + NeedMore(NeedMore), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum NeedMore { + UnexpectedEndOfStream, + IntegerUnderflow, + StringUnderflow, +} + +enum Representation { + /// Indexed header field representation + /// + /// An indexed header field representation identifies an entry in either the + /// static table or the dynamic table (see Section 2.3). + /// + /// # Header encoding + /// + /// ```text + /// 0 1 2 3 4 5 6 7 + /// +---+---+---+---+---+---+---+---+ + /// | 1 | Index (7+) | + /// +---+---------------------------+ + /// ``` + Indexed, + + /// Literal Header Field with Incremental Indexing + /// + /// A literal header field with incremental indexing representation results + /// in appending a header field to the decoded header list and inserting it + /// as a new entry into the dynamic table. + /// + /// # Header encoding + /// + /// ```text + /// 0 1 2 3 4 5 6 7 + /// +---+---+---+---+---+---+---+---+ + /// | 0 | 1 | Index (6+) | + /// +---+---+-----------------------+ + /// | H | Value Length (7+) | + /// +---+---------------------------+ + /// | Value String (Length octets) | + /// +-------------------------------+ + /// ``` + LiteralWithIndexing, + + /// Literal Header Field without Indexing + /// + /// A literal header field without indexing representation results in + /// appending a header field to the decoded header list without altering the + /// dynamic table. + /// + /// # Header encoding + /// + /// ```text + /// 0 1 2 3 4 5 6 7 + /// +---+---+---+---+---+---+---+---+ + /// | 0 | 0 | 0 | 0 | Index (4+) | + /// +---+---+-----------------------+ + /// | H | Value Length (7+) | + /// +---+---------------------------+ + /// | Value String (Length octets) | + /// +-------------------------------+ + /// ``` + LiteralWithoutIndexing, + + /// Literal Header Field Never Indexed + /// + /// A literal header field never-indexed representation results in appending + /// a header field to the decoded header list without altering the dynamic + /// table. Intermediaries MUST use the same representation for encoding this + /// header field. + /// + /// ```text + /// 0 1 2 3 4 5 6 7 + /// +---+---+---+---+---+---+---+---+ + /// | 0 | 0 | 0 | 1 | Index (4+) | + /// +---+---+-----------------------+ + /// | H | Value Length (7+) | + /// +---+---------------------------+ + /// | Value String (Length octets) | + /// +-------------------------------+ + /// ``` + LiteralNeverIndexed, + + /// Dynamic Table Size Update + /// + /// A dynamic table size update signals a change to the size of the dynamic + /// table. + /// + /// # Header encoding + /// + /// ```text + /// 0 1 2 3 4 5 6 7 + /// +---+---+---+---+---+---+---+---+ + /// | 0 | 0 | 1 | Max size (5+) | + /// +---+---------------------------+ + /// ``` + SizeUpdate, +} + +#[derive(Debug)] +struct Table { + entries: VecDeque
, + size: usize, + max_size: usize, +} + +// ===== impl Decoder ===== + +impl Decoder { + /// Creates a new `Decoder` with all settings set to default values. + pub fn new(size: usize) -> Decoder { + Decoder { + max_size_update: None, + last_max_update: size, + table: Table::new(size), + buffer: BytesMut::with_capacity(4096), + } + } + + /// Queues a potential size update + #[allow(dead_code)] + pub fn queue_size_update(&mut self, size: usize) { + let size = match self.max_size_update { + Some(v) => cmp::max(v, size), + None => size, + }; + + self.max_size_update = Some(size); + } + + /// Decodes the headers found in the given buffer. + pub fn decode(&mut self, src: &mut Cursor<&mut BytesMut>, mut f: F) -> Result<(), DecoderError> + where + F: FnMut(Header), + { + use self::Representation::*; + + let mut can_resize = true; + + if let Some(size) = self.max_size_update.take() { + self.last_max_update = size; + } + + trace!("decode"); + + while let Some(ty) = peek_u8(src) { + // At this point we are always at the beginning of the next block + // within the HPACK data. The type of the block can always be + // determined from the first byte. + match Representation::load(ty)? { + Indexed => { + trace!(" Indexed; rem={:?}", src.remaining()); + can_resize = false; + let entry = self.decode_indexed(src)?; + consume(src); + f(entry); + }, + LiteralWithIndexing => { + trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); + can_resize = false; + let entry = self.decode_literal(src, true)?; + + // Insert the header into the table + self.table.insert(entry.clone()); + consume(src); + + f(entry); + }, + LiteralWithoutIndexing => { + trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); + can_resize = false; + let entry = self.decode_literal(src, false)?; + consume(src); + f(entry); + }, + LiteralNeverIndexed => { + trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); + can_resize = false; + let entry = self.decode_literal(src, false)?; + consume(src); + + // TODO: Track that this should never be indexed + + f(entry); + }, + SizeUpdate => { + trace!(" SizeUpdate; rem={:?}", src.remaining()); + if !can_resize { + return Err(DecoderError::InvalidMaxDynamicSize); + } + + // Handle the dynamic table size update + self.process_size_update(src)?; + consume(src); + }, + } + } + + Ok(()) + } + + fn process_size_update(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<(), DecoderError> { + let new_size = decode_int(buf, 5)?; + + if new_size > self.last_max_update { + return Err(DecoderError::InvalidMaxDynamicSize); + } + + debug!( + "Decoder changed max table size from {} to {}", + self.table.size(), + new_size + ); + + self.table.set_max_size(new_size); + + Ok(()) + } + + fn decode_indexed(&self, buf: &mut Cursor<&mut BytesMut>) -> Result { + let index = decode_int(buf, 7)?; + self.table.get(index) + } + + fn decode_literal( + &mut self, + buf: &mut Cursor<&mut BytesMut>, + index: bool, + ) -> Result { + let prefix = if index { 6 } else { 4 }; + + // Extract the table index for the name, or 0 if not indexed + let table_idx = decode_int(buf, prefix)?; + + // First, read the header name + if table_idx == 0 { + // Read the name as a literal + let name = self.decode_string(buf)?; + let value = self.decode_string(buf)?; + + Header::new(name, value) + } else { + let e = self.table.get(table_idx)?; + let value = self.decode_string(buf)?; + + e.name().into_entry(value) + } + } + + fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { + const HUFF_FLAG: u8 = 0b10000000; + + // The first bit in the first byte contains the huffman encoded flag. + let huff = match peek_u8(buf) { + Some(hdr) => (hdr & HUFF_FLAG) == HUFF_FLAG, + None => return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)), + }; + + // Decode the string length using 7 bit prefix + let len = decode_int(buf, 7)?; + + if len > buf.remaining() { + trace!( + "decode_string underflow; len={}; remaining={}", + len, + buf.remaining() + ); + return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); + } + + if huff { + let ret = { + let raw = &buf.bytes()[..len]; + huffman::decode(raw, &mut self.buffer).map(Into::into) + }; + + buf.advance(len); + return ret; + } + + Ok(take(buf, len)) + } +} + +impl Default for Decoder { + fn default() -> Decoder { + Decoder::new(4096) + } +} + +// ===== impl Representation ===== + +impl Representation { + pub fn load(byte: u8) -> Result { + const INDEXED: u8 = 0b10000000; + const LITERAL_WITH_INDEXING: u8 = 0b01000000; + const LITERAL_WITHOUT_INDEXING: u8 = 0b11110000; + const LITERAL_NEVER_INDEXED: u8 = 0b00010000; + const SIZE_UPDATE_MASK: u8 = 0b11100000; + const SIZE_UPDATE: u8 = 0b00100000; + + // TODO: What did I even write here? + + if byte & INDEXED == INDEXED { + Ok(Representation::Indexed) + } else if byte & LITERAL_WITH_INDEXING == LITERAL_WITH_INDEXING { + Ok(Representation::LiteralWithIndexing) + } else if byte & LITERAL_WITHOUT_INDEXING == 0 { + Ok(Representation::LiteralWithoutIndexing) + } else if byte & LITERAL_WITHOUT_INDEXING == LITERAL_NEVER_INDEXED { + Ok(Representation::LiteralNeverIndexed) + } else if byte & SIZE_UPDATE_MASK == SIZE_UPDATE { + Ok(Representation::SizeUpdate) + } else { + Err(DecoderError::InvalidRepresentation) + } + } +} + +fn decode_int(buf: &mut B, prefix_size: u8) -> Result { + // The octet limit is chosen such that the maximum allowed *value* can + // never overflow an unsigned 32-bit integer. The maximum value of any + // integer that can be encoded with 5 octets is ~2^28 + const MAX_BYTES: usize = 5; + const VARINT_MASK: u8 = 0b01111111; + const VARINT_FLAG: u8 = 0b10000000; + + if prefix_size < 1 || prefix_size > 8 { + return Err(DecoderError::InvalidIntegerPrefix); + } + + if !buf.has_remaining() { + return Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)); + } + + let mask = if prefix_size == 8 { + 0xFF + } else { + (1u8 << prefix_size).wrapping_sub(1) + }; + + let mut ret = (buf.get_u8() & mask) as usize; + + if ret < mask as usize { + // Value fits in the prefix bits + return Ok(ret); + } + + // The int did not fit in the prefix bits, so continue reading. + // + // The total number of bytes used to represent the int. The first byte was + // the prefix, so start at 1. + let mut bytes = 1; + + // The rest of the int is stored as a varint -- 7 bits for the value and 1 + // bit to indicate if it is the last byte. + let mut shift = 0; + + while buf.has_remaining() { + let b = buf.get_u8(); + + bytes += 1; + ret += ((b & VARINT_MASK) as usize) << shift; + shift += 7; + + if b & VARINT_FLAG == 0 { + return Ok(ret); + } + + if bytes == MAX_BYTES { + // The spec requires that this situation is an error + return Err(DecoderError::IntegerOverflow); + } + } + + Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)) +} + +fn peek_u8(buf: &mut B) -> Option { + if buf.has_remaining() { + Some(buf.bytes()[0]) + } else { + None + } +} + +fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes { + let pos = buf.position() as usize; + let mut head = buf.get_mut().split_to(pos + n); + buf.set_position(0); + head.split_to(pos); + head.freeze() +} + +fn consume(buf: &mut Cursor<&mut BytesMut>) { + // remove bytes from the internal BytesMut when they have been successfully + // decoded. This is a more permanent cursor position, which will be + // used to resume if decoding was only partial. + take(buf, 0); +} + +// ===== impl Table ===== + +impl Table { + fn new(max_size: usize) -> Table { + Table { + entries: VecDeque::new(), + size: 0, + max_size: max_size, + } + } + + fn size(&self) -> usize { + self.size + } + + /// Returns the entry located at the given index. + /// + /// The table is 1-indexed and constructed in such a way that the first + /// entries belong to the static table, followed by entries in the dynamic + /// table. They are merged into a single index address space, though. + /// + /// This is according to the [HPACK spec, section 2.3.3.] + /// (http://http2.github.io/http2-spec/compression.html#index.address.space) + pub fn get(&self, index: usize) -> Result { + if index == 0 { + return Err(DecoderError::InvalidTableIndex); + } + + if index <= 61 { + return Ok(get_static(index)); + } + + // Convert the index for lookup in the entries structure. + match self.entries.get(index - 62) { + Some(e) => Ok(e.clone()), + None => Err(DecoderError::InvalidTableIndex), + } + } + + fn insert(&mut self, entry: Header) { + let len = entry.len(); + + self.reserve(len); + + if self.size + len <= self.max_size { + self.size += len; + + // Track the entry + self.entries.push_front(entry); + } + } + + fn set_max_size(&mut self, size: usize) { + self.max_size = size; + // Make the table size fit within the new constraints. + self.consolidate(); + } + + fn reserve(&mut self, size: usize) { + while self.size + size > self.max_size { + match self.entries.pop_back() { + Some(last) => { + self.size -= last.len(); + } + None => return, + } + } + } + + fn consolidate(&mut self) { + while self.size > self.max_size { + { + let last = match self.entries.back() { + Some(x) => x, + None => { + // Can never happen as the size of the table must reach + // 0 by the time we've exhausted all elements. + panic!("Size of table != 0, but no headers left!"); + }, + }; + + self.size -= last.len(); + } + + self.entries.pop_back(); + } + } +} + +// ===== impl DecoderError ===== + +impl From for DecoderError { + fn from(_: Utf8Error) -> DecoderError { + // TODO: Better error? + DecoderError::InvalidUtf8 + } +} + +impl From for DecoderError { + fn from(_: header::InvalidHeaderValue) -> DecoderError { + // TODO: Better error? + DecoderError::InvalidUtf8 + } +} + +impl From for DecoderError { + fn from(_: header::InvalidHeaderName) -> DecoderError { + // TODO: Better error + DecoderError::InvalidUtf8 + } +} + +impl From for DecoderError { + fn from(_: method::InvalidMethod) -> DecoderError { + // TODO: Better error + DecoderError::InvalidUtf8 + } +} + +impl From for DecoderError { + fn from(_: status::InvalidStatusCode) -> DecoderError { + // TODO: Better error + DecoderError::InvalidUtf8 + } +} + +impl From for frame::Error { + fn from(src: DecoderError) -> Self { + frame::Error::Hpack(src) + } +} + +/// Get an entry from the static table +pub fn get_static(idx: usize) -> Header { + use http::header; + use http::header::HeaderValue; + + match idx { + 1 => Header::Authority(from_static("")), + 2 => Header::Method(Method::GET), + 3 => Header::Method(Method::POST), + 4 => Header::Path(from_static("/")), + 5 => Header::Path(from_static("/index.html")), + 6 => Header::Scheme(from_static("http")), + 7 => Header::Scheme(from_static("https")), + 8 => Header::Status(StatusCode::OK), + 9 => Header::Status(StatusCode::NO_CONTENT), + 10 => Header::Status(StatusCode::PARTIAL_CONTENT), + 11 => Header::Status(StatusCode::NOT_MODIFIED), + 12 => Header::Status(StatusCode::BAD_REQUEST), + 13 => Header::Status(StatusCode::NOT_FOUND), + 14 => Header::Status(StatusCode::INTERNAL_SERVER_ERROR), + 15 => Header::Field { + name: header::ACCEPT_CHARSET, + value: HeaderValue::from_static(""), + }, + 16 => Header::Field { + name: header::ACCEPT_ENCODING, + value: HeaderValue::from_static("gzip, deflate"), + }, + 17 => Header::Field { + name: header::ACCEPT_LANGUAGE, + value: HeaderValue::from_static(""), + }, + 18 => Header::Field { + name: header::ACCEPT_RANGES, + value: HeaderValue::from_static(""), + }, + 19 => Header::Field { + name: header::ACCEPT, + value: HeaderValue::from_static(""), + }, + 20 => Header::Field { + name: header::ACCESS_CONTROL_ALLOW_ORIGIN, + value: HeaderValue::from_static(""), + }, + 21 => Header::Field { + name: header::AGE, + value: HeaderValue::from_static(""), + }, + 22 => Header::Field { + name: header::ALLOW, + value: HeaderValue::from_static(""), + }, + 23 => Header::Field { + name: header::AUTHORIZATION, + value: HeaderValue::from_static(""), + }, + 24 => Header::Field { + name: header::CACHE_CONTROL, + value: HeaderValue::from_static(""), + }, + 25 => Header::Field { + name: header::CONTENT_DISPOSITION, + value: HeaderValue::from_static(""), + }, + 26 => Header::Field { + name: header::CONTENT_ENCODING, + value: HeaderValue::from_static(""), + }, + 27 => Header::Field { + name: header::CONTENT_LANGUAGE, + value: HeaderValue::from_static(""), + }, + 28 => Header::Field { + name: header::CONTENT_LENGTH, + value: HeaderValue::from_static(""), + }, + 29 => Header::Field { + name: header::CONTENT_LOCATION, + value: HeaderValue::from_static(""), + }, + 30 => Header::Field { + name: header::CONTENT_RANGE, + value: HeaderValue::from_static(""), + }, + 31 => Header::Field { + name: header::CONTENT_TYPE, + value: HeaderValue::from_static(""), + }, + 32 => Header::Field { + name: header::COOKIE, + value: HeaderValue::from_static(""), + }, + 33 => Header::Field { + name: header::DATE, + value: HeaderValue::from_static(""), + }, + 34 => Header::Field { + name: header::ETAG, + value: HeaderValue::from_static(""), + }, + 35 => Header::Field { + name: header::EXPECT, + value: HeaderValue::from_static(""), + }, + 36 => Header::Field { + name: header::EXPIRES, + value: HeaderValue::from_static(""), + }, + 37 => Header::Field { + name: header::FROM, + value: HeaderValue::from_static(""), + }, + 38 => Header::Field { + name: header::HOST, + value: HeaderValue::from_static(""), + }, + 39 => Header::Field { + name: header::IF_MATCH, + value: HeaderValue::from_static(""), + }, + 40 => Header::Field { + name: header::IF_MODIFIED_SINCE, + value: HeaderValue::from_static(""), + }, + 41 => Header::Field { + name: header::IF_NONE_MATCH, + value: HeaderValue::from_static(""), + }, + 42 => Header::Field { + name: header::IF_RANGE, + value: HeaderValue::from_static(""), + }, + 43 => Header::Field { + name: header::IF_UNMODIFIED_SINCE, + value: HeaderValue::from_static(""), + }, + 44 => Header::Field { + name: header::LAST_MODIFIED, + value: HeaderValue::from_static(""), + }, + 45 => Header::Field { + name: header::LINK, + value: HeaderValue::from_static(""), + }, + 46 => Header::Field { + name: header::LOCATION, + value: HeaderValue::from_static(""), + }, + 47 => Header::Field { + name: header::MAX_FORWARDS, + value: HeaderValue::from_static(""), + }, + 48 => Header::Field { + name: header::PROXY_AUTHENTICATE, + value: HeaderValue::from_static(""), + }, + 49 => Header::Field { + name: header::PROXY_AUTHORIZATION, + value: HeaderValue::from_static(""), + }, + 50 => Header::Field { + name: header::RANGE, + value: HeaderValue::from_static(""), + }, + 51 => Header::Field { + name: header::REFERER, + value: HeaderValue::from_static(""), + }, + 52 => Header::Field { + name: header::REFRESH, + value: HeaderValue::from_static(""), + }, + 53 => Header::Field { + name: header::RETRY_AFTER, + value: HeaderValue::from_static(""), + }, + 54 => Header::Field { + name: header::SERVER, + value: HeaderValue::from_static(""), + }, + 55 => Header::Field { + name: header::SET_COOKIE, + value: HeaderValue::from_static(""), + }, + 56 => Header::Field { + name: header::STRICT_TRANSPORT_SECURITY, + value: HeaderValue::from_static(""), + }, + 57 => Header::Field { + name: header::TRANSFER_ENCODING, + value: HeaderValue::from_static(""), + }, + 58 => Header::Field { + name: header::USER_AGENT, + value: HeaderValue::from_static(""), + }, + 59 => Header::Field { + name: header::VARY, + value: HeaderValue::from_static(""), + }, + 60 => Header::Field { + name: header::VIA, + value: HeaderValue::from_static(""), + }, + 61 => Header::Field { + name: header::WWW_AUTHENTICATE, + value: HeaderValue::from_static(""), + }, + _ => unreachable!(), + } +} + +fn from_static(s: &'static str) -> String { + unsafe { String::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) } +} + +#[cfg(test)] +mod test { + use super::*; + use hpack::Header; + + #[test] + fn test_peek_u8() { + let b = 0xff; + let mut buf = Cursor::new(vec![b]); + assert_eq!(peek_u8(&mut buf), Some(b)); + assert_eq!(buf.get_u8(), b); + assert_eq!(peek_u8(&mut buf), None); + } + + #[test] + fn test_decode_string_empty() { + let mut de = Decoder::new(0); + let mut buf = BytesMut::new(); + let err = de.decode_string(&mut Cursor::new(&mut buf)).unwrap_err(); + assert_eq!(err, DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); + } + + #[test] + fn test_decode_empty() { + let mut de = Decoder::new(0); + let mut buf = BytesMut::new(); + let empty = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); + assert_eq!(empty, ()); + } + + #[test] + fn test_decode_indexed_larger_than_table() { + let mut de = Decoder::new(0); + + let mut buf = vec![0b01000000, 0x80 | 2]; + buf.extend(huff_encode(b"foo")); + buf.extend(&[0x80 | 3]); + buf.extend(huff_encode(b"bar")); + + let mut buf = buf.into(); + + let mut res = vec![]; + let _ = de.decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }).unwrap(); + + assert_eq!(res.len(), 1); + assert_eq!(de.table.size(), 0); + + match res[0] { + Header::Field { ref name, ref value } => { + assert_eq!(name, "foo"); + assert_eq!(value, "bar"); + } + _ => panic!(), + } + } + + fn huff_encode(src: &[u8]) -> BytesMut { + let mut buf = BytesMut::new(); + huffman::encode(src, &mut buf).unwrap(); + buf + } +} diff --git a/third_party/rust/h2/src/hpack/encoder.rs b/third_party/rust/h2/src/hpack/encoder.rs new file mode 100644 index 000000000000..9c3a4c6df95b --- /dev/null +++ b/third_party/rust/h2/src/hpack/encoder.rs @@ -0,0 +1,840 @@ +use super::{huffman, Header}; +use super::table::{Index, Table}; + +use bytes::{BufMut, BytesMut}; +use http::header::{HeaderName, HeaderValue}; + +#[derive(Debug)] +pub struct Encoder { + table: Table, + size_update: Option, +} + +#[derive(Debug)] +pub enum Encode { + Full, + Partial(EncodeState), +} + +#[derive(Debug)] +pub struct EncodeState { + index: Index, + value: Option, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum EncoderError { + BufferOverflow, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum SizeUpdate { + One(usize), + Two(usize, usize), // min, max +} + +impl Encoder { + pub fn new(max_size: usize, capacity: usize) -> Encoder { + Encoder { + table: Table::new(max_size, capacity), + size_update: None, + } + } + + /// Queues a max size update. + /// + /// The next call to `encode` will include a dynamic size update frame. + #[allow(dead_code)] + pub fn update_max_size(&mut self, val: usize) { + match self.size_update { + Some(SizeUpdate::One(old)) => if val > old { + if old > self.table.max_size() { + self.size_update = Some(SizeUpdate::One(val)); + } else { + self.size_update = Some(SizeUpdate::Two(old, val)); + } + } else { + self.size_update = Some(SizeUpdate::One(val)); + }, + Some(SizeUpdate::Two(min, _)) => if val < min { + self.size_update = Some(SizeUpdate::One(val)); + } else { + self.size_update = Some(SizeUpdate::Two(min, val)); + }, + None => { + if val != self.table.max_size() { + // Don't bother writing a frame if the value already matches + // the table's max size. + self.size_update = Some(SizeUpdate::One(val)); + } + }, + } + } + + /// Encode a set of headers into the provide buffer + pub fn encode( + &mut self, + resume: Option, + headers: &mut I, + dst: &mut BytesMut, + ) -> Encode + where + I: Iterator>>, + { + let len = dst.len(); + + if let Err(e) = self.encode_size_updates(dst) { + if e == EncoderError::BufferOverflow { + dst.truncate(len); + } + + unreachable!(); + } + + if let Some(resume) = resume { + let len = dst.len(); + + let res = match resume.value { + Some(ref value) => self.encode_header_without_name(&resume.index, value, dst), + None => self.encode_header(&resume.index, dst), + }; + + if res.is_err() { + dst.truncate(len); + return Encode::Partial(resume); + } + } + + let mut last_index = None; + + for header in headers { + let len = dst.len(); + + match header.reify() { + // The header has an associated name. In which case, try to + // index it in the table. + Ok(header) => { + let index = self.table.index(header); + let res = self.encode_header(&index, dst); + + if res.is_err() { + dst.truncate(len); + return Encode::Partial(EncodeState { + index: index, + value: None, + }); + } + + last_index = Some(index); + }, + // The header does not have an associated name. This means that + // the name is the same as the previously yielded header. In + // which case, we skip table lookup and just use the same index + // as the previous entry. + Err(value) => { + let res = + self.encode_header_without_name(last_index.as_ref().unwrap(), &value, dst); + + if res.is_err() { + dst.truncate(len); + return Encode::Partial(EncodeState { + index: last_index.unwrap(), + value: Some(value), + }); + } + }, + }; + } + + Encode::Full + } + + fn encode_size_updates(&mut self, dst: &mut BytesMut) -> Result<(), EncoderError> { + match self.size_update.take() { + Some(SizeUpdate::One(val)) => { + self.table.resize(val); + encode_size_update(val, dst)?; + }, + Some(SizeUpdate::Two(min, max)) => { + self.table.resize(min); + self.table.resize(max); + encode_size_update(min, dst)?; + encode_size_update(max, dst)?; + }, + None => {}, + } + + Ok(()) + } + + fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) -> Result<(), EncoderError> { + match *index { + Index::Indexed(idx, _) => { + encode_int(idx, 7, 0x80, dst)?; + }, + Index::Name(idx, _) => { + let header = self.table.resolve(&index); + + encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?; + }, + Index::Inserted(_) => { + let header = self.table.resolve(&index); + + assert!(!header.is_sensitive()); + + if !dst.has_remaining_mut() { + return Err(EncoderError::BufferOverflow); + } + + dst.put_u8(0b01000000); + + encode_str(header.name().as_slice(), dst)?; + encode_str(header.value_slice(), dst)?; + }, + Index::InsertedValue(idx, _) => { + let header = self.table.resolve(&index); + + assert!(!header.is_sensitive()); + + encode_int(idx, 6, 0b01000000, dst)?; + encode_str(header.value_slice(), dst)?; + }, + Index::NotIndexed(_) => { + let header = self.table.resolve(&index); + + encode_not_indexed2( + header.name().as_slice(), + header.value_slice(), + header.is_sensitive(), + dst, + )?; + }, + } + + Ok(()) + } + + fn encode_header_without_name( + &mut self, + last: &Index, + value: &HeaderValue, + dst: &mut BytesMut, + ) -> Result<(), EncoderError> { + match *last { + Index::Indexed(..) | + Index::Name(..) | + Index::Inserted(..) | + Index::InsertedValue(..) => { + let idx = self.table.resolve_idx(last); + + encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?; + }, + Index::NotIndexed(_) => { + let last = self.table.resolve(last); + + encode_not_indexed2( + last.name().as_slice(), + value.as_ref(), + value.is_sensitive(), + dst, + )?; + }, + } + + Ok(()) + } +} + +impl Default for Encoder { + fn default() -> Encoder { + Encoder::new(4096, 0) + } +} + +fn encode_size_update(val: usize, dst: &mut B) -> Result<(), EncoderError> { + encode_int(val, 5, 0b00100000, dst) +} + +fn encode_not_indexed( + name: usize, + value: &[u8], + sensitive: bool, + dst: &mut BytesMut, +) -> Result<(), EncoderError> { + if sensitive { + encode_int(name, 4, 0b10000, dst)?; + } else { + encode_int(name, 4, 0, dst)?; + } + + encode_str(value, dst)?; + Ok(()) +} + +fn encode_not_indexed2( + name: &[u8], + value: &[u8], + sensitive: bool, + dst: &mut BytesMut, +) -> Result<(), EncoderError> { + if !dst.has_remaining_mut() { + return Err(EncoderError::BufferOverflow); + } + + if sensitive { + dst.put_u8(0b10000); + } else { + dst.put_u8(0); + } + + encode_str(name, dst)?; + encode_str(value, dst)?; + Ok(()) +} + +fn encode_str(val: &[u8], dst: &mut BytesMut) -> Result<(), EncoderError> { + use std::io::Cursor; + + if !dst.has_remaining_mut() { + return Err(EncoderError::BufferOverflow); + } + + if val.len() != 0 { + let idx = dst.len(); + + // Push a placeholder byte for the length header + dst.put_u8(0); + + // Encode with huffman + huffman::encode(val, dst)?; + + let huff_len = dst.len() - (idx + 1); + + if encode_int_one_byte(huff_len, 7) { + // Write the string head + dst[idx] = 0x80 | huff_len as u8; + } else { + // Write the head to a placeholer + let mut buf = [0; 8]; + + let head_len = { + let mut head_dst = Cursor::new(&mut buf); + encode_int(huff_len, 7, 0x80, &mut head_dst)?; + head_dst.position() as usize + }; + + if dst.remaining_mut() < head_len { + return Err(EncoderError::BufferOverflow); + } + + // This is just done to reserve space in the destination + dst.put_slice(&buf[1..head_len]); + + // Shift the header forward + for i in 0..huff_len { + let src_i = idx + 1 + (huff_len - (i + 1)); + let dst_i = idx + head_len + (huff_len - (i + 1)); + dst[dst_i] = dst[src_i]; + } + + // Copy in the head + for i in 0..head_len { + dst[idx + i] = buf[i]; + } + } + } else { + // Write an empty string + dst.put_u8(0); + } + + Ok(()) +} + +/// Encode an integer into the given destination buffer +fn encode_int( + mut value: usize, // The integer to encode + prefix_bits: usize, // The number of bits in the prefix + first_byte: u8, // The base upon which to start encoding the int + dst: &mut B, +) -> Result<(), EncoderError> { + let mut rem = dst.remaining_mut(); + + if rem == 0 { + return Err(EncoderError::BufferOverflow); + } + + if encode_int_one_byte(value, prefix_bits) { + dst.put_u8(first_byte | value as u8); + return Ok(()); + } + + let low = (1 << prefix_bits) - 1; + + value -= low; + + if value > 0x0fffffff { + panic!("value out of range"); + } + + dst.put_u8(first_byte | low as u8); + rem -= 1; + + while value >= 128 { + if rem == 0 { + return Err(EncoderError::BufferOverflow); + } + + dst.put_u8(0b10000000 | value as u8); + rem -= 1; + + value = value >> 7; + } + + if rem == 0 { + return Err(EncoderError::BufferOverflow); + } + + dst.put_u8(value as u8); + + Ok(()) +} + +/// Returns true if the in the int can be fully encoded in the first byte. +fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool { + value < (1 << prefix_bits) - 1 +} + +#[cfg(test)] +mod test { + use super::*; + use hpack::Header; + use http::*; + + #[test] + fn test_encode_method_get() { + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![method("GET")]); + assert_eq!(*res, [0x80 | 2]); + assert_eq!(encoder.table.len(), 0); + } + + #[test] + fn test_encode_method_post() { + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![method("POST")]); + assert_eq!(*res, [0x80 | 3]); + assert_eq!(encoder.table.len(), 0); + } + + #[test] + fn test_encode_method_patch() { + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![method("PATCH")]); + + assert_eq!(res[0], 0b01000000 | 2); // Incremental indexing w/ name pulled from table + assert_eq!(res[1], 0x80 | 5); // header value w/ huffman coding + + assert_eq!("PATCH", huff_decode(&res[2..7])); + assert_eq!(encoder.table.len(), 1); + + let res = encode(&mut encoder, vec![method("PATCH")]); + + assert_eq!(1 << 7 | 62, res[0]); + assert_eq!(1, res.len()); + } + + #[test] + fn test_repeated_headers_are_indexed() { + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![header("foo", "hello")]); + + assert_eq!(&[0b01000000, 0x80 | 2], &res[0..2]); + assert_eq!("foo", huff_decode(&res[2..4])); + assert_eq!(0x80 | 4, res[4]); + assert_eq!("hello", huff_decode(&res[5..])); + assert_eq!(9, res.len()); + + assert_eq!(1, encoder.table.len()); + + let res = encode(&mut encoder, vec![header("foo", "hello")]); + assert_eq!([0x80 | 62], *res); + + assert_eq!(encoder.table.len(), 1); + } + + #[test] + fn test_evicting_headers() { + let mut encoder = Encoder::default(); + + // Fill the table + for i in 0..64 { + let key = format!("x-hello-world-{:02}", i); + let res = encode(&mut encoder, vec![header(&key, &key)]); + + assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); + assert_eq!(key, huff_decode(&res[2..14])); + assert_eq!(0x80 | 12, res[14]); + assert_eq!(key, huff_decode(&res[15..])); + assert_eq!(27, res.len()); + + // Make sure the header can be found... + let res = encode(&mut encoder, vec![header(&key, &key)]); + + // Only check that it is found + assert_eq!(0x80, res[0] & 0x80); + } + + assert_eq!(4096, encoder.table.size()); + assert_eq!(64, encoder.table.len()); + + // Find existing headers + for i in 0..64 { + let key = format!("x-hello-world-{:02}", i); + let res = encode(&mut encoder, vec![header(&key, &key)]); + assert_eq!(0x80, res[0] & 0x80); + } + + // Insert a new header + let key = "x-hello-world-64"; + let res = encode(&mut encoder, vec![header(key, key)]); + + assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); + assert_eq!(key, huff_decode(&res[2..14])); + assert_eq!(0x80 | 12, res[14]); + assert_eq!(key, huff_decode(&res[15..])); + assert_eq!(27, res.len()); + + assert_eq!(64, encoder.table.len()); + + // Now try encoding entries that should exist in the table + for i in 1..65 { + let key = format!("x-hello-world-{:02}", i); + let res = encode(&mut encoder, vec![header(&key, &key)]); + assert_eq!(0x80 | (61 + (65 - i)), res[0]); + } + } + + #[test] + fn test_large_headers_are_not_indexed() { + let mut encoder = Encoder::new(128, 0); + let key = "hello-world-hello-world-HELLO-zzz"; + + let res = encode(&mut encoder, vec![header(key, key)]); + + assert_eq!(&[0, 0x80 | 25], &res[..2]); + + assert_eq!(0, encoder.table.len()); + assert_eq!(0, encoder.table.size()); + } + + #[test] + fn test_sensitive_headers_are_never_indexed() { + use http::header::HeaderValue; + + let name = "my-password".parse().unwrap(); + let mut value = HeaderValue::from_bytes(b"12345").unwrap(); + value.set_sensitive(true); + + let header = Header::Field { + name: Some(name), + value: value, + }; + + // Now, try to encode the sensitive header + + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![header]); + + assert_eq!(&[0b10000, 0x80 | 8], &res[..2]); + assert_eq!("my-password", huff_decode(&res[2..10])); + assert_eq!(0x80 | 4, res[10]); + assert_eq!("12345", huff_decode(&res[11..])); + + // Now, try to encode a sensitive header w/ a name in the static table + let name = "authorization".parse().unwrap(); + let mut value = HeaderValue::from_bytes(b"12345").unwrap(); + value.set_sensitive(true); + + let header = Header::Field { + name: Some(name), + value: value, + }; + + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![header]); + + assert_eq!(&[0b11111, 8], &res[..2]); + assert_eq!(0x80 | 4, res[2]); + assert_eq!("12345", huff_decode(&res[3..])); + + // Using the name component of a previously indexed header (without + // sensitive flag set) + + let _ = encode( + &mut encoder, + vec![self::header("my-password", "not-so-secret")], + ); + + let name = "my-password".parse().unwrap(); + let mut value = HeaderValue::from_bytes(b"12345").unwrap(); + value.set_sensitive(true); + + let header = Header::Field { + name: Some(name), + value: value, + }; + let res = encode(&mut encoder, vec![header]); + + assert_eq!(&[0b11111, 47], &res[..2]); + assert_eq!(0x80 | 4, res[2]); + assert_eq!("12345", huff_decode(&res[3..])); + } + + #[test] + fn test_content_length_value_not_indexed() { + let mut encoder = Encoder::default(); + let res = encode(&mut encoder, vec![header("content-length", "1234")]); + + assert_eq!(&[15, 13, 0x80 | 3], &res[0..3]); + assert_eq!("1234", huff_decode(&res[3..])); + assert_eq!(6, res.len()); + } + + #[test] + fn test_encoding_headers_with_same_name() { + let mut encoder = Encoder::default(); + let name = "hello"; + + // Encode first one + let _ = encode(&mut encoder, vec![header(name, "one")]); + + // Encode second one + let res = encode(&mut encoder, vec![header(name, "two")]); + assert_eq!(&[0x40 | 62, 0x80 | 3], &res[0..2]); + assert_eq!("two", huff_decode(&res[2..])); + assert_eq!(5, res.len()); + + // Encode the first one again + let res = encode(&mut encoder, vec![header(name, "one")]); + assert_eq!(&[0x80 | 63], &res[..]); + + // Now the second one + let res = encode(&mut encoder, vec![header(name, "two")]); + assert_eq!(&[0x80 | 62], &res[..]); + } + + #[test] + fn test_evicting_headers_when_multiple_of_same_name_are_in_table() { + // The encoder only has space for 2 headers + let mut encoder = Encoder::new(76, 0); + + let _ = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(1, encoder.table.len()); + + let _ = encode(&mut encoder, vec![header("bar", "foo")]); + assert_eq!(2, encoder.table.len()); + + // This will evict the first header, while still referencing the header + // name + let res = encode(&mut encoder, vec![header("foo", "baz")]); + assert_eq!(&[0x40 | 63, 0, 0x80 | 3], &res[..3]); + assert_eq!(2, encoder.table.len()); + + // Try adding the same header again + let res = encode(&mut encoder, vec![header("foo", "baz")]); + assert_eq!(&[0x80 | 62], &res[..]); + assert_eq!(2, encoder.table.len()); + } + + #[test] + fn test_max_size_zero() { + // Static table only + let mut encoder = Encoder::new(0, 0); + let res = encode(&mut encoder, vec![method("GET")]); + assert_eq!(*res, [0x80 | 2]); + assert_eq!(encoder.table.len(), 0); + + let res = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(&[0, 0x80 | 2], &res[..2]); + assert_eq!("foo", huff_decode(&res[2..4])); + assert_eq!(0x80 | 3, res[4]); + assert_eq!("bar", huff_decode(&res[5..8])); + assert_eq!(0, encoder.table.len()); + + // Encode a custom value + let res = encode(&mut encoder, vec![header("transfer-encoding", "chunked")]); + assert_eq!(&[15, 42, 0x80 | 6], &res[..3]); + assert_eq!("chunked", huff_decode(&res[3..])); + } + + #[test] + fn test_update_max_size_combos() { + let mut encoder = Encoder::default(); + assert!(encoder.size_update.is_none()); + assert_eq!(4096, encoder.table.max_size()); + + encoder.update_max_size(4096); // Default size + assert!(encoder.size_update.is_none()); + + encoder.update_max_size(0); + assert_eq!(Some(SizeUpdate::One(0)), encoder.size_update); + + encoder.update_max_size(100); + assert_eq!(Some(SizeUpdate::Two(0, 100)), encoder.size_update); + + let mut encoder = Encoder::default(); + encoder.update_max_size(8000); + assert_eq!(Some(SizeUpdate::One(8000)), encoder.size_update); + + encoder.update_max_size(100); + assert_eq!(Some(SizeUpdate::One(100)), encoder.size_update); + + encoder.update_max_size(8000); + assert_eq!(Some(SizeUpdate::Two(100, 8000)), encoder.size_update); + + encoder.update_max_size(4000); + assert_eq!(Some(SizeUpdate::Two(100, 4000)), encoder.size_update); + + encoder.update_max_size(50); + assert_eq!(Some(SizeUpdate::One(50)), encoder.size_update); + } + + #[test] + fn test_resizing_table() { + let mut encoder = Encoder::default(); + + // Add a header + let _ = encode(&mut encoder, vec![header("foo", "bar")]); + + encoder.update_max_size(1); + assert_eq!(1, encoder.table.len()); + + let res = encode(&mut encoder, vec![method("GET")]); + assert_eq!(&[32 | 1, 0x80 | 2], &res[..]); + assert_eq!(0, encoder.table.len()); + + let res = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(0, res[0]); + + encoder.update_max_size(100); + let res = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(&[32 | 31, 69, 64], &res[..3]); + + encoder.update_max_size(0); + let res = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(&[32, 0], &res[..2]); + } + + #[test] + fn test_decreasing_table_size_without_eviction() { + let mut encoder = Encoder::default(); + + // Add a header + let _ = encode(&mut encoder, vec![header("foo", "bar")]); + + encoder.update_max_size(100); + assert_eq!(1, encoder.table.len()); + + let res = encode(&mut encoder, vec![header("foo", "bar")]); + assert_eq!(&[32 | 31, 69, 0x80 | 62], &res[..]); + } + + #[test] + fn test_nameless_header() { + let mut encoder = Encoder::default(); + + let res = encode( + &mut encoder, + vec![ + Header::Field { + name: Some("hello".parse().unwrap()), + value: HeaderValue::from_bytes(b"world").unwrap(), + }, + Header::Field { + name: None, + value: HeaderValue::from_bytes(b"zomg").unwrap(), + }, + ], + ); + + assert_eq!(&[0x40, 0x80 | 4], &res[0..2]); + assert_eq!("hello", huff_decode(&res[2..6])); + assert_eq!(0x80 | 4, res[6]); + assert_eq!("world", huff_decode(&res[7..11])); + + // Next is not indexed + assert_eq!(&[15, 47, 0x80 | 3], &res[11..14]); + assert_eq!("zomg", huff_decode(&res[14..])); + } + + #[test] + fn test_nameless_header_at_resume() { + let mut encoder = Encoder::default(); + let mut dst = BytesMut::from(Vec::with_capacity(11)); + + let mut input = vec![ + Header::Field { + name: Some("hello".parse().unwrap()), + value: HeaderValue::from_bytes(b"world").unwrap(), + }, + Header::Field { + name: None, + value: HeaderValue::from_bytes(b"zomg").unwrap(), + }, + ].into_iter(); + + let resume = match encoder.encode(None, &mut input, &mut dst) { + Encode::Partial(r) => r, + _ => panic!(), + }; + + assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]); + assert_eq!("hello", huff_decode(&dst[2..6])); + assert_eq!(0x80 | 4, dst[6]); + assert_eq!("world", huff_decode(&dst[7..11])); + + dst.clear(); + + match encoder.encode(Some(resume), &mut input, &mut dst) { + Encode::Full => {}, + _ => panic!(), + } + + // Next is not indexed + assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]); + assert_eq!("zomg", huff_decode(&dst[3..])); + } + + #[test] + #[ignore] + fn test_evicted_overflow() { + // Not sure what the best way to do this is. + } + + fn encode(e: &mut Encoder, hdrs: Vec>>) -> BytesMut { + let mut dst = BytesMut::with_capacity(1024); + e.encode(None, &mut hdrs.into_iter(), &mut dst); + dst + } + + fn method(s: &str) -> Header> { + Header::Method(Method::from_bytes(s.as_bytes()).unwrap()) + } + + fn header(name: &str, val: &str) -> Header> { + use http::header::{HeaderName, HeaderValue}; + + let name = HeaderName::from_bytes(name.as_bytes()).unwrap(); + let value = HeaderValue::from_bytes(val.as_bytes()).unwrap(); + + Header::Field { + name: Some(name), + value: value, + } + } + + fn huff_decode(src: &[u8]) -> BytesMut { + let mut buf = BytesMut::new(); + huffman::decode(src, &mut buf).unwrap() + } +} diff --git a/third_party/rust/h2/src/hpack/header.rs b/third_party/rust/h2/src/hpack/header.rs new file mode 100644 index 000000000000..9034a03c5202 --- /dev/null +++ b/third_party/rust/h2/src/hpack/header.rs @@ -0,0 +1,264 @@ +use super::{DecoderError, NeedMore}; + +use bytes::Bytes; +use http::{Method, StatusCode}; +use http::header::{HeaderName, HeaderValue}; +use string::{String, TryFrom}; + +/// HTTP/2.0 Header +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Header { + Field { name: T, value: HeaderValue }, + // TODO: Change these types to `http::uri` types. + Authority(String), + Method(Method), + Scheme(String), + Path(String), + Status(StatusCode), +} + +/// The header field name +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub enum Name<'a> { + Field(&'a HeaderName), + Authority, + Method, + Scheme, + Path, + Status, +} + +pub fn len(name: &HeaderName, value: &HeaderValue) -> usize { + let n: &str = name.as_ref(); + 32 + n.len() + value.len() +} + +impl Header> { + pub fn reify(self) -> Result { + use self::Header::*; + + Ok(match self { + Field { + name: Some(n), + value, + } => Field { + name: n, + value: value, + }, + Field { + name: None, + value, + } => return Err(value), + Authority(v) => Authority(v), + Method(v) => Method(v), + Scheme(v) => Scheme(v), + Path(v) => Path(v), + Status(v) => Status(v), + }) + } +} + +impl Header { + pub fn new(name: Bytes, value: Bytes) -> Result { + if name.len() == 0 { + return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); + } + if name[0] == b':' { + match &name[1..] { + b"authority" => { + let value = String::try_from(value)?; + Ok(Header::Authority(value)) + }, + b"method" => { + let method = Method::from_bytes(&value)?; + Ok(Header::Method(method)) + }, + b"scheme" => { + let value = String::try_from(value)?; + Ok(Header::Scheme(value)) + }, + b"path" => { + let value = String::try_from(value)?; + Ok(Header::Path(value)) + }, + b"status" => { + let status = StatusCode::from_bytes(&value)?; + Ok(Header::Status(status)) + }, + _ => Err(DecoderError::InvalidPseudoheader), + } + } else { + // HTTP/2 requires lower case header names + let name = HeaderName::from_lowercase(&name)?; + let value = HeaderValue::from_bytes(&value)?; + + Ok(Header::Field { + name: name, + value: value, + }) + } + } + + pub fn len(&self) -> usize { + match *self { + Header::Field { + ref name, + ref value, + } => len(name, value), + Header::Authority(ref v) => 32 + 10 + v.len(), + Header::Method(ref v) => 32 + 7 + v.as_ref().len(), + Header::Scheme(ref v) => 32 + 7 + v.len(), + Header::Path(ref v) => 32 + 5 + v.len(), + Header::Status(_) => 32 + 7 + 3, + } + } + + /// Returns the header name + pub fn name(&self) -> Name { + match *self { + Header::Field { + ref name, .. + } => Name::Field(name), + Header::Authority(..) => Name::Authority, + Header::Method(..) => Name::Method, + Header::Scheme(..) => Name::Scheme, + Header::Path(..) => Name::Path, + Header::Status(..) => Name::Status, + } + } + + pub fn value_slice(&self) -> &[u8] { + match *self { + Header::Field { + ref value, .. + } => value.as_ref(), + Header::Authority(ref v) => v.as_ref(), + Header::Method(ref v) => v.as_ref().as_ref(), + Header::Scheme(ref v) => v.as_ref(), + Header::Path(ref v) => v.as_ref(), + Header::Status(ref v) => v.as_str().as_ref(), + } + } + + pub fn value_eq(&self, other: &Header) -> bool { + match *self { + Header::Field { + ref value, .. + } => { + let a = value; + match *other { + Header::Field { + ref value, .. + } => a == value, + _ => false, + } + }, + Header::Authority(ref a) => match *other { + Header::Authority(ref b) => a == b, + _ => false, + }, + Header::Method(ref a) => match *other { + Header::Method(ref b) => a == b, + _ => false, + }, + Header::Scheme(ref a) => match *other { + Header::Scheme(ref b) => a == b, + _ => false, + }, + Header::Path(ref a) => match *other { + Header::Path(ref b) => a == b, + _ => false, + }, + Header::Status(ref a) => match *other { + Header::Status(ref b) => a == b, + _ => false, + }, + } + } + + pub fn is_sensitive(&self) -> bool { + match *self { + Header::Field { + ref value, .. + } => value.is_sensitive(), + // TODO: Technically these other header values can be sensitive too. + _ => false, + } + } + + pub fn skip_value_index(&self) -> bool { + use http::header; + + match *self { + Header::Field { + ref name, .. + } => match *name { + header::AGE | + header::AUTHORIZATION | + header::CONTENT_LENGTH | + header::ETAG | + header::IF_MODIFIED_SINCE | + header::IF_NONE_MATCH | + header::LOCATION | + header::COOKIE | + header::SET_COOKIE => true, + _ => false, + }, + Header::Path(..) => true, + _ => false, + } + } +} + +// Mostly for tests +impl From
for Header> { + fn from(src: Header) -> Self { + match src { + Header::Field { + name, + value, + } => Header::Field { + name: Some(name), + value, + }, + Header::Authority(v) => Header::Authority(v), + Header::Method(v) => Header::Method(v), + Header::Scheme(v) => Header::Scheme(v), + Header::Path(v) => Header::Path(v), + Header::Status(v) => Header::Status(v), + } + } +} + +impl<'a> Name<'a> { + pub fn into_entry(self, value: Bytes) -> Result { + match self { + Name::Field(name) => Ok(Header::Field { + name: name.clone(), + value: HeaderValue::from_bytes(&*value)?, + }), + Name::Authority => Ok(Header::Authority(String::try_from(value)?)), + Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), + Name::Scheme => Ok(Header::Scheme(String::try_from(value)?)), + Name::Path => Ok(Header::Path(String::try_from(value)?)), + Name::Status => { + match StatusCode::from_bytes(&value) { + Ok(status) => Ok(Header::Status(status)), + // TODO: better error handling + Err(_) => Err(DecoderError::InvalidStatusCode), + } + }, + } + } + + pub fn as_slice(&self) -> &[u8] { + match *self { + Name::Field(ref name) => name.as_ref(), + Name::Authority => b":authority", + Name::Method => b":method", + Name::Scheme => b":scheme", + Name::Path => b":path", + Name::Status => b":status", + } + } +} diff --git a/third_party/rust/h2/src/hpack/huffman/mod.rs b/third_party/rust/h2/src/hpack/huffman/mod.rs new file mode 100644 index 000000000000..c0d5df0d4765 --- /dev/null +++ b/third_party/rust/h2/src/hpack/huffman/mod.rs @@ -0,0 +1,213 @@ +mod table; + +use self::table::{DECODE_TABLE, ENCODE_TABLE}; +use hpack::{DecoderError, EncoderError}; + +use bytes::{BufMut, BytesMut}; + +// Constructed in the generated `table.rs` file +struct Decoder { + state: usize, + maybe_eos: bool, +} + +// These flags must match the ones in genhuff.rs + +const MAYBE_EOS: u8 = 1; +const DECODED: u8 = 2; +const ERROR: u8 = 4; + +pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result { + let mut decoder = Decoder::new(); + + // Max compression ratio is >= 0.5 + buf.reserve(src.len() << 1); + + for b in src { + if let Some(b) = decoder.decode4(b >> 4)? { + buf.put_u8(b); + } + + if let Some(b) = decoder.decode4(b & 0xf)? { + buf.put_u8(b); + } + } + + if !decoder.is_final() { + return Err(DecoderError::InvalidHuffmanCode); + } + + Ok(buf.take()) +} + +// TODO: return error when there is not enough room to encode the value +pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { + let mut bits: u64 = 0; + let mut bits_left = 40; + let mut rem = dst.remaining_mut(); + + for &b in src { + let (nbits, code) = ENCODE_TABLE[b as usize]; + + bits |= code << (bits_left - nbits); + bits_left -= nbits; + + while bits_left <= 32 { + if rem == 0 { + return Err(EncoderError::BufferOverflow); + } + + dst.put_u8((bits >> 32) as u8); + + bits <<= 8; + bits_left += 8; + rem -= 1; + } + } + + if bits_left != 40 { + if rem == 0 { + return Err(EncoderError::BufferOverflow); + } + + // This writes the EOS token + bits |= (1 << bits_left) - 1; + dst.put_u8((bits >> 32) as u8); + } + + Ok(()) +} + +impl Decoder { + fn new() -> Decoder { + Decoder { + state: 0, + maybe_eos: false, + } + } + + // Decodes 4 bits + fn decode4(&mut self, input: u8) -> Result, DecoderError> { + // (next-state, byte, flags) + let (next, byte, flags) = DECODE_TABLE[self.state][input as usize]; + + if flags & ERROR == ERROR { + // Data followed the EOS marker + return Err(DecoderError::InvalidHuffmanCode); + } + + let mut ret = None; + + if flags & DECODED == DECODED { + ret = Some(byte); + } + + self.state = next; + self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS; + + Ok(ret) + } + + fn is_final(&self) -> bool { + self.state == 0 || self.maybe_eos + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn decode(src: &[u8]) -> Result { + let mut buf = BytesMut::new(); + super::decode(src, &mut buf) + } + + #[test] + fn decode_single_byte() { + assert_eq!("o", decode(&[0b00111111]).unwrap()); + assert_eq!("0", decode(&[0x0 + 7]).unwrap()); + assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap()); + } + + #[test] + fn single_char_multi_byte() { + assert_eq!("#", decode(&[255, 160 + 15]).unwrap()); + assert_eq!("$", decode(&[255, 200 + 7]).unwrap()); + assert_eq!("\x0a", decode(&[255, 255, 255, 240 + 3]).unwrap()); + } + + #[test] + fn multi_char() { + assert_eq!("!0", decode(&[254, 1]).unwrap()); + assert_eq!(" !", decode(&[0b01010011, 0b11111000]).unwrap()); + } + + #[test] + fn encode_single_byte() { + let mut dst = Vec::with_capacity(1); + + encode(b"o", &mut dst).unwrap(); + assert_eq!(&dst[..], &[0b00111111]); + + dst.clear(); + encode(b"0", &mut dst).unwrap(); + assert_eq!(&dst[..], &[0x0 + 7]); + + dst.clear(); + encode(b"A", &mut dst).unwrap(); + assert_eq!(&dst[..], &[(0x21 << 2) + 3]); + } + + #[test] + fn encode_decode_str() { + const DATA: &'static [&'static str] = &[ + "hello world", + ":method", + ":scheme", + ":authority", + "yahoo.co.jp", + "GET", + "http", + ":path", + "/images/top/sp2/cmn/logo-ns-130528.png", + "example.com", + "hpack-test", + "xxxxxxx1", + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20100101 Firefox/16.0", + "accept", + "Accept", + "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "cookie", + "B=76j09a189a6h4&b=3&s=0b", + "TE", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi non bibendum libero. \ + Etiam ultrices lorem ut.", + ]; + + for s in DATA { + let mut dst = Vec::with_capacity(s.len()); + + encode(s.as_bytes(), &mut dst).unwrap(); + + let decoded = decode(&dst).unwrap(); + + assert_eq!(&decoded[..], s.as_bytes()); + } + } + + #[test] + fn encode_decode_u8() { + const DATA: &'static [&'static [u8]] = + &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; + + for s in DATA { + let mut dst = Vec::with_capacity(s.len()); + + encode(s, &mut dst).unwrap(); + + let decoded = decode(&dst).unwrap(); + + assert_eq!(&decoded[..], &s[..]); + } + } +} diff --git a/third_party/rust/h2/src/hpack/huffman/table.rs b/third_party/rust/h2/src/hpack/huffman/table.rs new file mode 100644 index 000000000000..8fe1a7ac8c93 --- /dev/null +++ b/third_party/rust/h2/src/hpack/huffman/table.rs @@ -0,0 +1,5130 @@ +// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs + +// (num-bits, bits) +pub const ENCODE_TABLE: [(usize, u64); 257] = [ + (13, 0x1ff8), + (23, 0x7fffd8), + (28, 0xfffffe2), + (28, 0xfffffe3), + (28, 0xfffffe4), + (28, 0xfffffe5), + (28, 0xfffffe6), + (28, 0xfffffe7), + (28, 0xfffffe8), + (24, 0xffffea), + (30, 0x3ffffffc), + (28, 0xfffffe9), + (28, 0xfffffea), + (30, 0x3ffffffd), + (28, 0xfffffeb), + (28, 0xfffffec), + (28, 0xfffffed), + (28, 0xfffffee), + (28, 0xfffffef), + (28, 0xffffff0), + (28, 0xffffff1), + (28, 0xffffff2), + (30, 0x3ffffffe), + (28, 0xffffff3), + (28, 0xffffff4), + (28, 0xffffff5), + (28, 0xffffff6), + (28, 0xffffff7), + (28, 0xffffff8), + (28, 0xffffff9), + (28, 0xffffffa), + (28, 0xffffffb), + (6, 0x14), + (10, 0x3f8), + (10, 0x3f9), + (12, 0xffa), + (13, 0x1ff9), + (6, 0x15), + (8, 0xf8), + (11, 0x7fa), + (10, 0x3fa), + (10, 0x3fb), + (8, 0xf9), + (11, 0x7fb), + (8, 0xfa), + (6, 0x16), + (6, 0x17), + (6, 0x18), + (5, 0x0), + (5, 0x1), + (5, 0x2), + (6, 0x19), + (6, 0x1a), + (6, 0x1b), + (6, 0x1c), + (6, 0x1d), + (6, 0x1e), + (6, 0x1f), + (7, 0x5c), + (8, 0xfb), + (15, 0x7ffc), + (6, 0x20), + (12, 0xffb), + (10, 0x3fc), + (13, 0x1ffa), + (6, 0x21), + (7, 0x5d), + (7, 0x5e), + (7, 0x5f), + (7, 0x60), + (7, 0x61), + (7, 0x62), + (7, 0x63), + (7, 0x64), + (7, 0x65), + (7, 0x66), + (7, 0x67), + (7, 0x68), + (7, 0x69), + (7, 0x6a), + (7, 0x6b), + (7, 0x6c), + (7, 0x6d), + (7, 0x6e), + (7, 0x6f), + (7, 0x70), + (7, 0x71), + (7, 0x72), + (8, 0xfc), + (7, 0x73), + (8, 0xfd), + (13, 0x1ffb), + (19, 0x7fff0), + (13, 0x1ffc), + (14, 0x3ffc), + (6, 0x22), + (15, 0x7ffd), + (5, 0x3), + (6, 0x23), + (5, 0x4), + (6, 0x24), + (5, 0x5), + (6, 0x25), + (6, 0x26), + (6, 0x27), + (5, 0x6), + (7, 0x74), + (7, 0x75), + (6, 0x28), + (6, 0x29), + (6, 0x2a), + (5, 0x7), + (6, 0x2b), + (7, 0x76), + (6, 0x2c), + (5, 0x8), + (5, 0x9), + (6, 0x2d), + (7, 0x77), + (7, 0x78), + (7, 0x79), + (7, 0x7a), + (7, 0x7b), + (15, 0x7ffe), + (11, 0x7fc), + (14, 0x3ffd), + (13, 0x1ffd), + (28, 0xffffffc), + (20, 0xfffe6), + (22, 0x3fffd2), + (20, 0xfffe7), + (20, 0xfffe8), + (22, 0x3fffd3), + (22, 0x3fffd4), + (22, 0x3fffd5), + (23, 0x7fffd9), + (22, 0x3fffd6), + (23, 0x7fffda), + (23, 0x7fffdb), + (23, 0x7fffdc), + (23, 0x7fffdd), + (23, 0x7fffde), + (24, 0xffffeb), + (23, 0x7fffdf), + (24, 0xffffec), + (24, 0xffffed), + (22, 0x3fffd7), + (23, 0x7fffe0), + (24, 0xffffee), + (23, 0x7fffe1), + (23, 0x7fffe2), + (23, 0x7fffe3), + (23, 0x7fffe4), + (21, 0x1fffdc), + (22, 0x3fffd8), + (23, 0x7fffe5), + (22, 0x3fffd9), + (23, 0x7fffe6), + (23, 0x7fffe7), + (24, 0xffffef), + (22, 0x3fffda), + (21, 0x1fffdd), + (20, 0xfffe9), + (22, 0x3fffdb), + (22, 0x3fffdc), + (23, 0x7fffe8), + (23, 0x7fffe9), + (21, 0x1fffde), + (23, 0x7fffea), + (22, 0x3fffdd), + (22, 0x3fffde), + (24, 0xfffff0), + (21, 0x1fffdf), + (22, 0x3fffdf), + (23, 0x7fffeb), + (23, 0x7fffec), + (21, 0x1fffe0), + (21, 0x1fffe1), + (22, 0x3fffe0), + (21, 0x1fffe2), + (23, 0x7fffed), + (22, 0x3fffe1), + (23, 0x7fffee), + (23, 0x7fffef), + (20, 0xfffea), + (22, 0x3fffe2), + (22, 0x3fffe3), + (22, 0x3fffe4), + (23, 0x7ffff0), + (22, 0x3fffe5), + (22, 0x3fffe6), + (23, 0x7ffff1), + (26, 0x3ffffe0), + (26, 0x3ffffe1), + (20, 0xfffeb), + (19, 0x7fff1), + (22, 0x3fffe7), + (23, 0x7ffff2), + (22, 0x3fffe8), + (25, 0x1ffffec), + (26, 0x3ffffe2), + (26, 0x3ffffe3), + (26, 0x3ffffe4), + (27, 0x7ffffde), + (27, 0x7ffffdf), + (26, 0x3ffffe5), + (24, 0xfffff1), + (25, 0x1ffffed), + (19, 0x7fff2), + (21, 0x1fffe3), + (26, 0x3ffffe6), + (27, 0x7ffffe0), + (27, 0x7ffffe1), + (26, 0x3ffffe7), + (27, 0x7ffffe2), + (24, 0xfffff2), + (21, 0x1fffe4), + (21, 0x1fffe5), + (26, 0x3ffffe8), + (26, 0x3ffffe9), + (28, 0xffffffd), + (27, 0x7ffffe3), + (27, 0x7ffffe4), + (27, 0x7ffffe5), + (20, 0xfffec), + (24, 0xfffff3), + (20, 0xfffed), + (21, 0x1fffe6), + (22, 0x3fffe9), + (21, 0x1fffe7), + (21, 0x1fffe8), + (23, 0x7ffff3), + (22, 0x3fffea), + (22, 0x3fffeb), + (25, 0x1ffffee), + (25, 0x1ffffef), + (24, 0xfffff4), + (24, 0xfffff5), + (26, 0x3ffffea), + (23, 0x7ffff4), + (26, 0x3ffffeb), + (27, 0x7ffffe6), + (26, 0x3ffffec), + (26, 0x3ffffed), + (27, 0x7ffffe7), + (27, 0x7ffffe8), + (27, 0x7ffffe9), + (27, 0x7ffffea), + (27, 0x7ffffeb), + (28, 0xffffffe), + (27, 0x7ffffec), + (27, 0x7ffffed), + (27, 0x7ffffee), + (27, 0x7ffffef), + (27, 0x7fffff0), + (26, 0x3ffffee), + (30, 0x3fffffff), +]; + +// (next-state, byte, flags) +pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = [ + // 0 + [ + (4, 0, 0x00), + (5, 0, 0x00), + (7, 0, 0x00), + (8, 0, 0x00), + (11, 0, 0x00), + (12, 0, 0x00), + (16, 0, 0x00), + (19, 0, 0x00), + (25, 0, 0x00), + (28, 0, 0x00), + (32, 0, 0x00), + (35, 0, 0x00), + (42, 0, 0x00), + (49, 0, 0x00), + (57, 0, 0x00), + (64, 0, 0x01), + ], + // 1 + [ + (0, 48, 0x02), + (0, 49, 0x02), + (0, 50, 0x02), + (0, 97, 0x02), + (0, 99, 0x02), + (0, 101, 0x02), + (0, 105, 0x02), + (0, 111, 0x02), + (0, 115, 0x02), + (0, 116, 0x02), + (13, 0, 0x00), + (14, 0, 0x00), + (17, 0, 0x00), + (18, 0, 0x00), + (20, 0, 0x00), + (21, 0, 0x00), + ], + // 2 + [ + (1, 48, 0x02), + (22, 48, 0x03), + (1, 49, 0x02), + (22, 49, 0x03), + (1, 50, 0x02), + (22, 50, 0x03), + (1, 97, 0x02), + (22, 97, 0x03), + (1, 99, 0x02), + (22, 99, 0x03), + (1, 101, 0x02), + (22, 101, 0x03), + (1, 105, 0x02), + (22, 105, 0x03), + (1, 111, 0x02), + (22, 111, 0x03), + ], + // 3 + [ + (2, 48, 0x02), + (9, 48, 0x02), + (23, 48, 0x02), + (40, 48, 0x03), + (2, 49, 0x02), + (9, 49, 0x02), + (23, 49, 0x02), + (40, 49, 0x03), + (2, 50, 0x02), + (9, 50, 0x02), + (23, 50, 0x02), + (40, 50, 0x03), + (2, 97, 0x02), + (9, 97, 0x02), + (23, 97, 0x02), + (40, 97, 0x03), + ], + // 4 + [ + (3, 48, 0x02), + (6, 48, 0x02), + (10, 48, 0x02), + (15, 48, 0x02), + (24, 48, 0x02), + (31, 48, 0x02), + (41, 48, 0x02), + (56, 48, 0x03), + (3, 49, 0x02), + (6, 49, 0x02), + (10, 49, 0x02), + (15, 49, 0x02), + (24, 49, 0x02), + (31, 49, 0x02), + (41, 49, 0x02), + (56, 49, 0x03), + ], + // 5 + [ + (3, 50, 0x02), + (6, 50, 0x02), + (10, 50, 0x02), + (15, 50, 0x02), + (24, 50, 0x02), + (31, 50, 0x02), + (41, 50, 0x02), + (56, 50, 0x03), + (3, 97, 0x02), + (6, 97, 0x02), + (10, 97, 0x02), + (15, 97, 0x02), + (24, 97, 0x02), + (31, 97, 0x02), + (41, 97, 0x02), + (56, 97, 0x03), + ], + // 6 + [ + (2, 99, 0x02), + (9, 99, 0x02), + (23, 99, 0x02), + (40, 99, 0x03), + (2, 101, 0x02), + (9, 101, 0x02), + (23, 101, 0x02), + (40, 101, 0x03), + (2, 105, 0x02), + (9, 105, 0x02), + (23, 105, 0x02), + (40, 105, 0x03), + (2, 111, 0x02), + (9, 111, 0x02), + (23, 111, 0x02), + (40, 111, 0x03), + ], + // 7 + [ + (3, 99, 0x02), + (6, 99, 0x02), + (10, 99, 0x02), + (15, 99, 0x02), + (24, 99, 0x02), + (31, 99, 0x02), + (41, 99, 0x02), + (56, 99, 0x03), + (3, 101, 0x02), + (6, 101, 0x02), + (10, 101, 0x02), + (15, 101, 0x02), + (24, 101, 0x02), + (31, 101, 0x02), + (41, 101, 0x02), + (56, 101, 0x03), + ], + // 8 + [ + (3, 105, 0x02), + (6, 105, 0x02), + (10, 105, 0x02), + (15, 105, 0x02), + (24, 105, 0x02), + (31, 105, 0x02), + (41, 105, 0x02), + (56, 105, 0x03), + (3, 111, 0x02), + (6, 111, 0x02), + (10, 111, 0x02), + (15, 111, 0x02), + (24, 111, 0x02), + (31, 111, 0x02), + (41, 111, 0x02), + (56, 111, 0x03), + ], + // 9 + [ + (1, 115, 0x02), + (22, 115, 0x03), + (1, 116, 0x02), + (22, 116, 0x03), + (0, 32, 0x02), + (0, 37, 0x02), + (0, 45, 0x02), + (0, 46, 0x02), + (0, 47, 0x02), + (0, 51, 0x02), + (0, 52, 0x02), + (0, 53, 0x02), + (0, 54, 0x02), + (0, 55, 0x02), + (0, 56, 0x02), + (0, 57, 0x02), + ], + // 10 + [ + (2, 115, 0x02), + (9, 115, 0x02), + (23, 115, 0x02), + (40, 115, 0x03), + (2, 116, 0x02), + (9, 116, 0x02), + (23, 116, 0x02), + (40, 116, 0x03), + (1, 32, 0x02), + (22, 32, 0x03), + (1, 37, 0x02), + (22, 37, 0x03), + (1, 45, 0x02), + (22, 45, 0x03), + (1, 46, 0x02), + (22, 46, 0x03), + ], + // 11 + [ + (3, 115, 0x02), + (6, 115, 0x02), + (10, 115, 0x02), + (15, 115, 0x02), + (24, 115, 0x02), + (31, 115, 0x02), + (41, 115, 0x02), + (56, 115, 0x03), + (3, 116, 0x02), + (6, 116, 0x02), + (10, 116, 0x02), + (15, 116, 0x02), + (24, 116, 0x02), + (31, 116, 0x02), + (41, 116, 0x02), + (56, 116, 0x03), + ], + // 12 + [ + (2, 32, 0x02), + (9, 32, 0x02), + (23, 32, 0x02), + (40, 32, 0x03), + (2, 37, 0x02), + (9, 37, 0x02), + (23, 37, 0x02), + (40, 37, 0x03), + (2, 45, 0x02), + (9, 45, 0x02), + (23, 45, 0x02), + (40, 45, 0x03), + (2, 46, 0x02), + (9, 46, 0x02), + (23, 46, 0x02), + (40, 46, 0x03), + ], + // 13 + [ + (3, 32, 0x02), + (6, 32, 0x02), + (10, 32, 0x02), + (15, 32, 0x02), + (24, 32, 0x02), + (31, 32, 0x02), + (41, 32, 0x02), + (56, 32, 0x03), + (3, 37, 0x02), + (6, 37, 0x02), + (10, 37, 0x02), + (15, 37, 0x02), + (24, 37, 0x02), + (31, 37, 0x02), + (41, 37, 0x02), + (56, 37, 0x03), + ], + // 14 + [ + (3, 45, 0x02), + (6, 45, 0x02), + (10, 45, 0x02), + (15, 45, 0x02), + (24, 45, 0x02), + (31, 45, 0x02), + (41, 45, 0x02), + (56, 45, 0x03), + (3, 46, 0x02), + (6, 46, 0x02), + (10, 46, 0x02), + (15, 46, 0x02), + (24, 46, 0x02), + (31, 46, 0x02), + (41, 46, 0x02), + (56, 46, 0x03), + ], + // 15 + [ + (1, 47, 0x02), + (22, 47, 0x03), + (1, 51, 0x02), + (22, 51, 0x03), + (1, 52, 0x02), + (22, 52, 0x03), + (1, 53, 0x02), + (22, 53, 0x03), + (1, 54, 0x02), + (22, 54, 0x03), + (1, 55, 0x02), + (22, 55, 0x03), + (1, 56, 0x02), + (22, 56, 0x03), + (1, 57, 0x02), + (22, 57, 0x03), + ], + // 16 + [ + (2, 47, 0x02), + (9, 47, 0x02), + (23, 47, 0x02), + (40, 47, 0x03), + (2, 51, 0x02), + (9, 51, 0x02), + (23, 51, 0x02), + (40, 51, 0x03), + (2, 52, 0x02), + (9, 52, 0x02), + (23, 52, 0x02), + (40, 52, 0x03), + (2, 53, 0x02), + (9, 53, 0x02), + (23, 53, 0x02), + (40, 53, 0x03), + ], + // 17 + [ + (3, 47, 0x02), + (6, 47, 0x02), + (10, 47, 0x02), + (15, 47, 0x02), + (24, 47, 0x02), + (31, 47, 0x02), + (41, 47, 0x02), + (56, 47, 0x03), + (3, 51, 0x02), + (6, 51, 0x02), + (10, 51, 0x02), + (15, 51, 0x02), + (24, 51, 0x02), + (31, 51, 0x02), + (41, 51, 0x02), + (56, 51, 0x03), + ], + // 18 + [ + (3, 52, 0x02), + (6, 52, 0x02), + (10, 52, 0x02), + (15, 52, 0x02), + (24, 52, 0x02), + (31, 52, 0x02), + (41, 52, 0x02), + (56, 52, 0x03), + (3, 53, 0x02), + (6, 53, 0x02), + (10, 53, 0x02), + (15, 53, 0x02), + (24, 53, 0x02), + (31, 53, 0x02), + (41, 53, 0x02), + (56, 53, 0x03), + ], + // 19 + [ + (2, 54, 0x02), + (9, 54, 0x02), + (23, 54, 0x02), + (40, 54, 0x03), + (2, 55, 0x02), + (9, 55, 0x02), + (23, 55, 0x02), + (40, 55, 0x03), + (2, 56, 0x02), + (9, 56, 0x02), + (23, 56, 0x02), + (40, 56, 0x03), + (2, 57, 0x02), + (9, 57, 0x02), + (23, 57, 0x02), + (40, 57, 0x03), + ], + // 20 + [ + (3, 54, 0x02), + (6, 54, 0x02), + (10, 54, 0x02), + (15, 54, 0x02), + (24, 54, 0x02), + (31, 54, 0x02), + (41, 54, 0x02), + (56, 54, 0x03), + (3, 55, 0x02), + (6, 55, 0x02), + (10, 55, 0x02), + (15, 55, 0x02), + (24, 55, 0x02), + (31, 55, 0x02), + (41, 55, 0x02), + (56, 55, 0x03), + ], + // 21 + [ + (3, 56, 0x02), + (6, 56, 0x02), + (10, 56, 0x02), + (15, 56, 0x02), + (24, 56, 0x02), + (31, 56, 0x02), + (41, 56, 0x02), + (56, 56, 0x03), + (3, 57, 0x02), + (6, 57, 0x02), + (10, 57, 0x02), + (15, 57, 0x02), + (24, 57, 0x02), + (31, 57, 0x02), + (41, 57, 0x02), + (56, 57, 0x03), + ], + // 22 + [ + (26, 0, 0x00), + (27, 0, 0x00), + (29, 0, 0x00), + (30, 0, 0x00), + (33, 0, 0x00), + (34, 0, 0x00), + (36, 0, 0x00), + (37, 0, 0x00), + (43, 0, 0x00), + (46, 0, 0x00), + (50, 0, 0x00), + (53, 0, 0x00), + (58, 0, 0x00), + (61, 0, 0x00), + (65, 0, 0x00), + (68, 0, 0x01), + ], + // 23 + [ + (0, 61, 0x02), + (0, 65, 0x02), + (0, 95, 0x02), + (0, 98, 0x02), + (0, 100, 0x02), + (0, 102, 0x02), + (0, 103, 0x02), + (0, 104, 0x02), + (0, 108, 0x02), + (0, 109, 0x02), + (0, 110, 0x02), + (0, 112, 0x02), + (0, 114, 0x02), + (0, 117, 0x02), + (38, 0, 0x00), + (39, 0, 0x00), + ], + // 24 + [ + (1, 61, 0x02), + (22, 61, 0x03), + (1, 65, 0x02), + (22, 65, 0x03), + (1, 95, 0x02), + (22, 95, 0x03), + (1, 98, 0x02), + (22, 98, 0x03), + (1, 100, 0x02), + (22, 100, 0x03), + (1, 102, 0x02), + (22, 102, 0x03), + (1, 103, 0x02), + (22, 103, 0x03), + (1, 104, 0x02), + (22, 104, 0x03), + ], + // 25 + [ + (2, 61, 0x02), + (9, 61, 0x02), + (23, 61, 0x02), + (40, 61, 0x03), + (2, 65, 0x02), + (9, 65, 0x02), + (23, 65, 0x02), + (40, 65, 0x03), + (2, 95, 0x02), + (9, 95, 0x02), + (23, 95, 0x02), + (40, 95, 0x03), + (2, 98, 0x02), + (9, 98, 0x02), + (23, 98, 0x02), + (40, 98, 0x03), + ], + // 26 + [ + (3, 61, 0x02), + (6, 61, 0x02), + (10, 61, 0x02), + (15, 61, 0x02), + (24, 61, 0x02), + (31, 61, 0x02), + (41, 61, 0x02), + (56, 61, 0x03), + (3, 65, 0x02), + (6, 65, 0x02), + (10, 65, 0x02), + (15, 65, 0x02), + (24, 65, 0x02), + (31, 65, 0x02), + (41, 65, 0x02), + (56, 65, 0x03), + ], + // 27 + [ + (3, 95, 0x02), + (6, 95, 0x02), + (10, 95, 0x02), + (15, 95, 0x02), + (24, 95, 0x02), + (31, 95, 0x02), + (41, 95, 0x02), + (56, 95, 0x03), + (3, 98, 0x02), + (6, 98, 0x02), + (10, 98, 0x02), + (15, 98, 0x02), + (24, 98, 0x02), + (31, 98, 0x02), + (41, 98, 0x02), + (56, 98, 0x03), + ], + // 28 + [ + (2, 100, 0x02), + (9, 100, 0x02), + (23, 100, 0x02), + (40, 100, 0x03), + (2, 102, 0x02), + (9, 102, 0x02), + (23, 102, 0x02), + (40, 102, 0x03), + (2, 103, 0x02), + (9, 103, 0x02), + (23, 103, 0x02), + (40, 103, 0x03), + (2, 104, 0x02), + (9, 104, 0x02), + (23, 104, 0x02), + (40, 104, 0x03), + ], + // 29 + [ + (3, 100, 0x02), + (6, 100, 0x02), + (10, 100, 0x02), + (15, 100, 0x02), + (24, 100, 0x02), + (31, 100, 0x02), + (41, 100, 0x02), + (56, 100, 0x03), + (3, 102, 0x02), + (6, 102, 0x02), + (10, 102, 0x02), + (15, 102, 0x02), + (24, 102, 0x02), + (31, 102, 0x02), + (41, 102, 0x02), + (56, 102, 0x03), + ], + // 30 + [ + (3, 103, 0x02), + (6, 103, 0x02), + (10, 103, 0x02), + (15, 103, 0x02), + (24, 103, 0x02), + (31, 103, 0x02), + (41, 103, 0x02), + (56, 103, 0x03), + (3, 104, 0x02), + (6, 104, 0x02), + (10, 104, 0x02), + (15, 104, 0x02), + (24, 104, 0x02), + (31, 104, 0x02), + (41, 104, 0x02), + (56, 104, 0x03), + ], + // 31 + [ + (1, 108, 0x02), + (22, 108, 0x03), + (1, 109, 0x02), + (22, 109, 0x03), + (1, 110, 0x02), + (22, 110, 0x03), + (1, 112, 0x02), + (22, 112, 0x03), + (1, 114, 0x02), + (22, 114, 0x03), + (1, 117, 0x02), + (22, 117, 0x03), + (0, 58, 0x02), + (0, 66, 0x02), + (0, 67, 0x02), + (0, 68, 0x02), + ], + // 32 + [ + (2, 108, 0x02), + (9, 108, 0x02), + (23, 108, 0x02), + (40, 108, 0x03), + (2, 109, 0x02), + (9, 109, 0x02), + (23, 109, 0x02), + (40, 109, 0x03), + (2, 110, 0x02), + (9, 110, 0x02), + (23, 110, 0x02), + (40, 110, 0x03), + (2, 112, 0x02), + (9, 112, 0x02), + (23, 112, 0x02), + (40, 112, 0x03), + ], + // 33 + [ + (3, 108, 0x02), + (6, 108, 0x02), + (10, 108, 0x02), + (15, 108, 0x02), + (24, 108, 0x02), + (31, 108, 0x02), + (41, 108, 0x02), + (56, 108, 0x03), + (3, 109, 0x02), + (6, 109, 0x02), + (10, 109, 0x02), + (15, 109, 0x02), + (24, 109, 0x02), + (31, 109, 0x02), + (41, 109, 0x02), + (56, 109, 0x03), + ], + // 34 + [ + (3, 110, 0x02), + (6, 110, 0x02), + (10, 110, 0x02), + (15, 110, 0x02), + (24, 110, 0x02), + (31, 110, 0x02), + (41, 110, 0x02), + (56, 110, 0x03), + (3, 112, 0x02), + (6, 112, 0x02), + (10, 112, 0x02), + (15, 112, 0x02), + (24, 112, 0x02), + (31, 112, 0x02), + (41, 112, 0x02), + (56, 112, 0x03), + ], + // 35 + [ + (2, 114, 0x02), + (9, 114, 0x02), + (23, 114, 0x02), + (40, 114, 0x03), + (2, 117, 0x02), + (9, 117, 0x02), + (23, 117, 0x02), + (40, 117, 0x03), + (1, 58, 0x02), + (22, 58, 0x03), + (1, 66, 0x02), + (22, 66, 0x03), + (1, 67, 0x02), + (22, 67, 0x03), + (1, 68, 0x02), + (22, 68, 0x03), + ], + // 36 + [ + (3, 114, 0x02), + (6, 114, 0x02), + (10, 114, 0x02), + (15, 114, 0x02), + (24, 114, 0x02), + (31, 114, 0x02), + (41, 114, 0x02), + (56, 114, 0x03), + (3, 117, 0x02), + (6, 117, 0x02), + (10, 117, 0x02), + (15, 117, 0x02), + (24, 117, 0x02), + (31, 117, 0x02), + (41, 117, 0x02), + (56, 117, 0x03), + ], + // 37 + [ + (2, 58, 0x02), + (9, 58, 0x02), + (23, 58, 0x02), + (40, 58, 0x03), + (2, 66, 0x02), + (9, 66, 0x02), + (23, 66, 0x02), + (40, 66, 0x03), + (2, 67, 0x02), + (9, 67, 0x02), + (23, 67, 0x02), + (40, 67, 0x03), + (2, 68, 0x02), + (9, 68, 0x02), + (23, 68, 0x02), + (40, 68, 0x03), + ], + // 38 + [ + (3, 58, 0x02), + (6, 58, 0x02), + (10, 58, 0x02), + (15, 58, 0x02), + (24, 58, 0x02), + (31, 58, 0x02), + (41, 58, 0x02), + (56, 58, 0x03), + (3, 66, 0x02), + (6, 66, 0x02), + (10, 66, 0x02), + (15, 66, 0x02), + (24, 66, 0x02), + (31, 66, 0x02), + (41, 66, 0x02), + (56, 66, 0x03), + ], + // 39 + [ + (3, 67, 0x02), + (6, 67, 0x02), + (10, 67, 0x02), + (15, 67, 0x02), + (24, 67, 0x02), + (31, 67, 0x02), + (41, 67, 0x02), + (56, 67, 0x03), + (3, 68, 0x02), + (6, 68, 0x02), + (10, 68, 0x02), + (15, 68, 0x02), + (24, 68, 0x02), + (31, 68, 0x02), + (41, 68, 0x02), + (56, 68, 0x03), + ], + // 40 + [ + (44, 0, 0x00), + (45, 0, 0x00), + (47, 0, 0x00), + (48, 0, 0x00), + (51, 0, 0x00), + (52, 0, 0x00), + (54, 0, 0x00), + (55, 0, 0x00), + (59, 0, 0x00), + (60, 0, 0x00), + (62, 0, 0x00), + (63, 0, 0x00), + (66, 0, 0x00), + (67, 0, 0x00), + (69, 0, 0x00), + (72, 0, 0x01), + ], + // 41 + [ + (0, 69, 0x02), + (0, 70, 0x02), + (0, 71, 0x02), + (0, 72, 0x02), + (0, 73, 0x02), + (0, 74, 0x02), + (0, 75, 0x02), + (0, 76, 0x02), + (0, 77, 0x02), + (0, 78, 0x02), + (0, 79, 0x02), + (0, 80, 0x02), + (0, 81, 0x02), + (0, 82, 0x02), + (0, 83, 0x02), + (0, 84, 0x02), + ], + // 42 + [ + (1, 69, 0x02), + (22, 69, 0x03), + (1, 70, 0x02), + (22, 70, 0x03), + (1, 71, 0x02), + (22, 71, 0x03), + (1, 72, 0x02), + (22, 72, 0x03), + (1, 73, 0x02), + (22, 73, 0x03), + (1, 74, 0x02), + (22, 74, 0x03), + (1, 75, 0x02), + (22, 75, 0x03), + (1, 76, 0x02), + (22, 76, 0x03), + ], + // 43 + [ + (2, 69, 0x02), + (9, 69, 0x02), + (23, 69, 0x02), + (40, 69, 0x03), + (2, 70, 0x02), + (9, 70, 0x02), + (23, 70, 0x02), + (40, 70, 0x03), + (2, 71, 0x02), + (9, 71, 0x02), + (23, 71, 0x02), + (40, 71, 0x03), + (2, 72, 0x02), + (9, 72, 0x02), + (23, 72, 0x02), + (40, 72, 0x03), + ], + // 44 + [ + (3, 69, 0x02), + (6, 69, 0x02), + (10, 69, 0x02), + (15, 69, 0x02), + (24, 69, 0x02), + (31, 69, 0x02), + (41, 69, 0x02), + (56, 69, 0x03), + (3, 70, 0x02), + (6, 70, 0x02), + (10, 70, 0x02), + (15, 70, 0x02), + (24, 70, 0x02), + (31, 70, 0x02), + (41, 70, 0x02), + (56, 70, 0x03), + ], + // 45 + [ + (3, 71, 0x02), + (6, 71, 0x02), + (10, 71, 0x02), + (15, 71, 0x02), + (24, 71, 0x02), + (31, 71, 0x02), + (41, 71, 0x02), + (56, 71, 0x03), + (3, 72, 0x02), + (6, 72, 0x02), + (10, 72, 0x02), + (15, 72, 0x02), + (24, 72, 0x02), + (31, 72, 0x02), + (41, 72, 0x02), + (56, 72, 0x03), + ], + // 46 + [ + (2, 73, 0x02), + (9, 73, 0x02), + (23, 73, 0x02), + (40, 73, 0x03), + (2, 74, 0x02), + (9, 74, 0x02), + (23, 74, 0x02), + (40, 74, 0x03), + (2, 75, 0x02), + (9, 75, 0x02), + (23, 75, 0x02), + (40, 75, 0x03), + (2, 76, 0x02), + (9, 76, 0x02), + (23, 76, 0x02), + (40, 76, 0x03), + ], + // 47 + [ + (3, 73, 0x02), + (6, 73, 0x02), + (10, 73, 0x02), + (15, 73, 0x02), + (24, 73, 0x02), + (31, 73, 0x02), + (41, 73, 0x02), + (56, 73, 0x03), + (3, 74, 0x02), + (6, 74, 0x02), + (10, 74, 0x02), + (15, 74, 0x02), + (24, 74, 0x02), + (31, 74, 0x02), + (41, 74, 0x02), + (56, 74, 0x03), + ], + // 48 + [ + (3, 75, 0x02), + (6, 75, 0x02), + (10, 75, 0x02), + (15, 75, 0x02), + (24, 75, 0x02), + (31, 75, 0x02), + (41, 75, 0x02), + (56, 75, 0x03), + (3, 76, 0x02), + (6, 76, 0x02), + (10, 76, 0x02), + (15, 76, 0x02), + (24, 76, 0x02), + (31, 76, 0x02), + (41, 76, 0x02), + (56, 76, 0x03), + ], + // 49 + [ + (1, 77, 0x02), + (22, 77, 0x03), + (1, 78, 0x02), + (22, 78, 0x03), + (1, 79, 0x02), + (22, 79, 0x03), + (1, 80, 0x02), + (22, 80, 0x03), + (1, 81, 0x02), + (22, 81, 0x03), + (1, 82, 0x02), + (22, 82, 0x03), + (1, 83, 0x02), + (22, 83, 0x03), + (1, 84, 0x02), + (22, 84, 0x03), + ], + // 50 + [ + (2, 77, 0x02), + (9, 77, 0x02), + (23, 77, 0x02), + (40, 77, 0x03), + (2, 78, 0x02), + (9, 78, 0x02), + (23, 78, 0x02), + (40, 78, 0x03), + (2, 79, 0x02), + (9, 79, 0x02), + (23, 79, 0x02), + (40, 79, 0x03), + (2, 80, 0x02), + (9, 80, 0x02), + (23, 80, 0x02), + (40, 80, 0x03), + ], + // 51 + [ + (3, 77, 0x02), + (6, 77, 0x02), + (10, 77, 0x02), + (15, 77, 0x02), + (24, 77, 0x02), + (31, 77, 0x02), + (41, 77, 0x02), + (56, 77, 0x03), + (3, 78, 0x02), + (6, 78, 0x02), + (10, 78, 0x02), + (15, 78, 0x02), + (24, 78, 0x02), + (31, 78, 0x02), + (41, 78, 0x02), + (56, 78, 0x03), + ], + // 52 + [ + (3, 79, 0x02), + (6, 79, 0x02), + (10, 79, 0x02), + (15, 79, 0x02), + (24, 79, 0x02), + (31, 79, 0x02), + (41, 79, 0x02), + (56, 79, 0x03), + (3, 80, 0x02), + (6, 80, 0x02), + (10, 80, 0x02), + (15, 80, 0x02), + (24, 80, 0x02), + (31, 80, 0x02), + (41, 80, 0x02), + (56, 80, 0x03), + ], + // 53 + [ + (2, 81, 0x02), + (9, 81, 0x02), + (23, 81, 0x02), + (40, 81, 0x03), + (2, 82, 0x02), + (9, 82, 0x02), + (23, 82, 0x02), + (40, 82, 0x03), + (2, 83, 0x02), + (9, 83, 0x02), + (23, 83, 0x02), + (40, 83, 0x03), + (2, 84, 0x02), + (9, 84, 0x02), + (23, 84, 0x02), + (40, 84, 0x03), + ], + // 54 + [ + (3, 81, 0x02), + (6, 81, 0x02), + (10, 81, 0x02), + (15, 81, 0x02), + (24, 81, 0x02), + (31, 81, 0x02), + (41, 81, 0x02), + (56, 81, 0x03), + (3, 82, 0x02), + (6, 82, 0x02), + (10, 82, 0x02), + (15, 82, 0x02), + (24, 82, 0x02), + (31, 82, 0x02), + (41, 82, 0x02), + (56, 82, 0x03), + ], + // 55 + [ + (3, 83, 0x02), + (6, 83, 0x02), + (10, 83, 0x02), + (15, 83, 0x02), + (24, 83, 0x02), + (31, 83, 0x02), + (41, 83, 0x02), + (56, 83, 0x03), + (3, 84, 0x02), + (6, 84, 0x02), + (10, 84, 0x02), + (15, 84, 0x02), + (24, 84, 0x02), + (31, 84, 0x02), + (41, 84, 0x02), + (56, 84, 0x03), + ], + // 56 + [ + (0, 85, 0x02), + (0, 86, 0x02), + (0, 87, 0x02), + (0, 89, 0x02), + (0, 106, 0x02), + (0, 107, 0x02), + (0, 113, 0x02), + (0, 118, 0x02), + (0, 119, 0x02), + (0, 120, 0x02), + (0, 121, 0x02), + (0, 122, 0x02), + (70, 0, 0x00), + (71, 0, 0x00), + (73, 0, 0x00), + (74, 0, 0x01), + ], + // 57 + [ + (1, 85, 0x02), + (22, 85, 0x03), + (1, 86, 0x02), + (22, 86, 0x03), + (1, 87, 0x02), + (22, 87, 0x03), + (1, 89, 0x02), + (22, 89, 0x03), + (1, 106, 0x02), + (22, 106, 0x03), + (1, 107, 0x02), + (22, 107, 0x03), + (1, 113, 0x02), + (22, 113, 0x03), + (1, 118, 0x02), + (22, 118, 0x03), + ], + // 58 + [ + (2, 85, 0x02), + (9, 85, 0x02), + (23, 85, 0x02), + (40, 85, 0x03), + (2, 86, 0x02), + (9, 86, 0x02), + (23, 86, 0x02), + (40, 86, 0x03), + (2, 87, 0x02), + (9, 87, 0x02), + (23, 87, 0x02), + (40, 87, 0x03), + (2, 89, 0x02), + (9, 89, 0x02), + (23, 89, 0x02), + (40, 89, 0x03), + ], + // 59 + [ + (3, 85, 0x02), + (6, 85, 0x02), + (10, 85, 0x02), + (15, 85, 0x02), + (24, 85, 0x02), + (31, 85, 0x02), + (41, 85, 0x02), + (56, 85, 0x03), + (3, 86, 0x02), + (6, 86, 0x02), + (10, 86, 0x02), + (15, 86, 0x02), + (24, 86, 0x02), + (31, 86, 0x02), + (41, 86, 0x02), + (56, 86, 0x03), + ], + // 60 + [ + (3, 87, 0x02), + (6, 87, 0x02), + (10, 87, 0x02), + (15, 87, 0x02), + (24, 87, 0x02), + (31, 87, 0x02), + (41, 87, 0x02), + (56, 87, 0x03), + (3, 89, 0x02), + (6, 89, 0x02), + (10, 89, 0x02), + (15, 89, 0x02), + (24, 89, 0x02), + (31, 89, 0x02), + (41, 89, 0x02), + (56, 89, 0x03), + ], + // 61 + [ + (2, 106, 0x02), + (9, 106, 0x02), + (23, 106, 0x02), + (40, 106, 0x03), + (2, 107, 0x02), + (9, 107, 0x02), + (23, 107, 0x02), + (40, 107, 0x03), + (2, 113, 0x02), + (9, 113, 0x02), + (23, 113, 0x02), + (40, 113, 0x03), + (2, 118, 0x02), + (9, 118, 0x02), + (23, 118, 0x02), + (40, 118, 0x03), + ], + // 62 + [ + (3, 106, 0x02), + (6, 106, 0x02), + (10, 106, 0x02), + (15, 106, 0x02), + (24, 106, 0x02), + (31, 106, 0x02), + (41, 106, 0x02), + (56, 106, 0x03), + (3, 107, 0x02), + (6, 107, 0x02), + (10, 107, 0x02), + (15, 107, 0x02), + (24, 107, 0x02), + (31, 107, 0x02), + (41, 107, 0x02), + (56, 107, 0x03), + ], + // 63 + [ + (3, 113, 0x02), + (6, 113, 0x02), + (10, 113, 0x02), + (15, 113, 0x02), + (24, 113, 0x02), + (31, 113, 0x02), + (41, 113, 0x02), + (56, 113, 0x03), + (3, 118, 0x02), + (6, 118, 0x02), + (10, 118, 0x02), + (15, 118, 0x02), + (24, 118, 0x02), + (31, 118, 0x02), + (41, 118, 0x02), + (56, 118, 0x03), + ], + // 64 + [ + (1, 119, 0x02), + (22, 119, 0x03), + (1, 120, 0x02), + (22, 120, 0x03), + (1, 121, 0x02), + (22, 121, 0x03), + (1, 122, 0x02), + (22, 122, 0x03), + (0, 38, 0x02), + (0, 42, 0x02), + (0, 44, 0x02), + (0, 59, 0x02), + (0, 88, 0x02), + (0, 90, 0x02), + (75, 0, 0x00), + (78, 0, 0x00), + ], + // 65 + [ + (2, 119, 0x02), + (9, 119, 0x02), + (23, 119, 0x02), + (40, 119, 0x03), + (2, 120, 0x02), + (9, 120, 0x02), + (23, 120, 0x02), + (40, 120, 0x03), + (2, 121, 0x02), + (9, 121, 0x02), + (23, 121, 0x02), + (40, 121, 0x03), + (2, 122, 0x02), + (9, 122, 0x02), + (23, 122, 0x02), + (40, 122, 0x03), + ], + // 66 + [ + (3, 119, 0x02), + (6, 119, 0x02), + (10, 119, 0x02), + (15, 119, 0x02), + (24, 119, 0x02), + (31, 119, 0x02), + (41, 119, 0x02), + (56, 119, 0x03), + (3, 120, 0x02), + (6, 120, 0x02), + (10, 120, 0x02), + (15, 120, 0x02), + (24, 120, 0x02), + (31, 120, 0x02), + (41, 120, 0x02), + (56, 120, 0x03), + ], + // 67 + [ + (3, 121, 0x02), + (6, 121, 0x02), + (10, 121, 0x02), + (15, 121, 0x02), + (24, 121, 0x02), + (31, 121, 0x02), + (41, 121, 0x02), + (56, 121, 0x03), + (3, 122, 0x02), + (6, 122, 0x02), + (10, 122, 0x02), + (15, 122, 0x02), + (24, 122, 0x02), + (31, 122, 0x02), + (41, 122, 0x02), + (56, 122, 0x03), + ], + // 68 + [ + (1, 38, 0x02), + (22, 38, 0x03), + (1, 42, 0x02), + (22, 42, 0x03), + (1, 44, 0x02), + (22, 44, 0x03), + (1, 59, 0x02), + (22, 59, 0x03), + (1, 88, 0x02), + (22, 88, 0x03), + (1, 90, 0x02), + (22, 90, 0x03), + (76, 0, 0x00), + (77, 0, 0x00), + (79, 0, 0x00), + (81, 0, 0x00), + ], + // 69 + [ + (2, 38, 0x02), + (9, 38, 0x02), + (23, 38, 0x02), + (40, 38, 0x03), + (2, 42, 0x02), + (9, 42, 0x02), + (23, 42, 0x02), + (40, 42, 0x03), + (2, 44, 0x02), + (9, 44, 0x02), + (23, 44, 0x02), + (40, 44, 0x03), + (2, 59, 0x02), + (9, 59, 0x02), + (23, 59, 0x02), + (40, 59, 0x03), + ], + // 70 + [ + (3, 38, 0x02), + (6, 38, 0x02), + (10, 38, 0x02), + (15, 38, 0x02), + (24, 38, 0x02), + (31, 38, 0x02), + (41, 38, 0x02), + (56, 38, 0x03), + (3, 42, 0x02), + (6, 42, 0x02), + (10, 42, 0x02), + (15, 42, 0x02), + (24, 42, 0x02), + (31, 42, 0x02), + (41, 42, 0x02), + (56, 42, 0x03), + ], + // 71 + [ + (3, 44, 0x02), + (6, 44, 0x02), + (10, 44, 0x02), + (15, 44, 0x02), + (24, 44, 0x02), + (31, 44, 0x02), + (41, 44, 0x02), + (56, 44, 0x03), + (3, 59, 0x02), + (6, 59, 0x02), + (10, 59, 0x02), + (15, 59, 0x02), + (24, 59, 0x02), + (31, 59, 0x02), + (41, 59, 0x02), + (56, 59, 0x03), + ], + // 72 + [ + (2, 88, 0x02), + (9, 88, 0x02), + (23, 88, 0x02), + (40, 88, 0x03), + (2, 90, 0x02), + (9, 90, 0x02), + (23, 90, 0x02), + (40, 90, 0x03), + (0, 33, 0x02), + (0, 34, 0x02), + (0, 40, 0x02), + (0, 41, 0x02), + (0, 63, 0x02), + (80, 0, 0x00), + (82, 0, 0x00), + (84, 0, 0x00), + ], + // 73 + [ + (3, 88, 0x02), + (6, 88, 0x02), + (10, 88, 0x02), + (15, 88, 0x02), + (24, 88, 0x02), + (31, 88, 0x02), + (41, 88, 0x02), + (56, 88, 0x03), + (3, 90, 0x02), + (6, 90, 0x02), + (10, 90, 0x02), + (15, 90, 0x02), + (24, 90, 0x02), + (31, 90, 0x02), + (41, 90, 0x02), + (56, 90, 0x03), + ], + // 74 + [ + (1, 33, 0x02), + (22, 33, 0x03), + (1, 34, 0x02), + (22, 34, 0x03), + (1, 40, 0x02), + (22, 40, 0x03), + (1, 41, 0x02), + (22, 41, 0x03), + (1, 63, 0x02), + (22, 63, 0x03), + (0, 39, 0x02), + (0, 43, 0x02), + (0, 124, 0x02), + (83, 0, 0x00), + (85, 0, 0x00), + (88, 0, 0x00), + ], + // 75 + [ + (2, 33, 0x02), + (9, 33, 0x02), + (23, 33, 0x02), + (40, 33, 0x03), + (2, 34, 0x02), + (9, 34, 0x02), + (23, 34, 0x02), + (40, 34, 0x03), + (2, 40, 0x02), + (9, 40, 0x02), + (23, 40, 0x02), + (40, 40, 0x03), + (2, 41, 0x02), + (9, 41, 0x02), + (23, 41, 0x02), + (40, 41, 0x03), + ], + // 76 + [ + (3, 33, 0x02), + (6, 33, 0x02), + (10, 33, 0x02), + (15, 33, 0x02), + (24, 33, 0x02), + (31, 33, 0x02), + (41, 33, 0x02), + (56, 33, 0x03), + (3, 34, 0x02), + (6, 34, 0x02), + (10, 34, 0x02), + (15, 34, 0x02), + (24, 34, 0x02), + (31, 34, 0x02), + (41, 34, 0x02), + (56, 34, 0x03), + ], + // 77 + [ + (3, 40, 0x02), + (6, 40, 0x02), + (10, 40, 0x02), + (15, 40, 0x02), + (24, 40, 0x02), + (31, 40, 0x02), + (41, 40, 0x02), + (56, 40, 0x03), + (3, 41, 0x02), + (6, 41, 0x02), + (10, 41, 0x02), + (15, 41, 0x02), + (24, 41, 0x02), + (31, 41, 0x02), + (41, 41, 0x02), + (56, 41, 0x03), + ], + // 78 + [ + (2, 63, 0x02), + (9, 63, 0x02), + (23, 63, 0x02), + (40, 63, 0x03), + (1, 39, 0x02), + (22, 39, 0x03), + (1, 43, 0x02), + (22, 43, 0x03), + (1, 124, 0x02), + (22, 124, 0x03), + (0, 35, 0x02), + (0, 62, 0x02), + (86, 0, 0x00), + (87, 0, 0x00), + (89, 0, 0x00), + (90, 0, 0x00), + ], + // 79 + [ + (3, 63, 0x02), + (6, 63, 0x02), + (10, 63, 0x02), + (15, 63, 0x02), + (24, 63, 0x02), + (31, 63, 0x02), + (41, 63, 0x02), + (56, 63, 0x03), + (2, 39, 0x02), + (9, 39, 0x02), + (23, 39, 0x02), + (40, 39, 0x03), + (2, 43, 0x02), + (9, 43, 0x02), + (23, 43, 0x02), + (40, 43, 0x03), + ], + // 80 + [ + (3, 39, 0x02), + (6, 39, 0x02), + (10, 39, 0x02), + (15, 39, 0x02), + (24, 39, 0x02), + (31, 39, 0x02), + (41, 39, 0x02), + (56, 39, 0x03), + (3, 43, 0x02), + (6, 43, 0x02), + (10, 43, 0x02), + (15, 43, 0x02), + (24, 43, 0x02), + (31, 43, 0x02), + (41, 43, 0x02), + (56, 43, 0x03), + ], + // 81 + [ + (2, 124, 0x02), + (9, 124, 0x02), + (23, 124, 0x02), + (40, 124, 0x03), + (1, 35, 0x02), + (22, 35, 0x03), + (1, 62, 0x02), + (22, 62, 0x03), + (0, 0, 0x02), + (0, 36, 0x02), + (0, 64, 0x02), + (0, 91, 0x02), + (0, 93, 0x02), + (0, 126, 0x02), + (91, 0, 0x00), + (92, 0, 0x00), + ], + // 82 + [ + (3, 124, 0x02), + (6, 124, 0x02), + (10, 124, 0x02), + (15, 124, 0x02), + (24, 124, 0x02), + (31, 124, 0x02), + (41, 124, 0x02), + (56, 124, 0x03), + (2, 35, 0x02), + (9, 35, 0x02), + (23, 35, 0x02), + (40, 35, 0x03), + (2, 62, 0x02), + (9, 62, 0x02), + (23, 62, 0x02), + (40, 62, 0x03), + ], + // 83 + [ + (3, 35, 0x02), + (6, 35, 0x02), + (10, 35, 0x02), + (15, 35, 0x02), + (24, 35, 0x02), + (31, 35, 0x02), + (41, 35, 0x02), + (56, 35, 0x03), + (3, 62, 0x02), + (6, 62, 0x02), + (10, 62, 0x02), + (15, 62, 0x02), + (24, 62, 0x02), + (31, 62, 0x02), + (41, 62, 0x02), + (56, 62, 0x03), + ], + // 84 + [ + (1, 0, 0x02), + (22, 0, 0x03), + (1, 36, 0x02), + (22, 36, 0x03), + (1, 64, 0x02), + (22, 64, 0x03), + (1, 91, 0x02), + (22, 91, 0x03), + (1, 93, 0x02), + (22, 93, 0x03), + (1, 126, 0x02), + (22, 126, 0x03), + (0, 94, 0x02), + (0, 125, 0x02), + (93, 0, 0x00), + (94, 0, 0x00), + ], + // 85 + [ + (2, 0, 0x02), + (9, 0, 0x02), + (23, 0, 0x02), + (40, 0, 0x03), + (2, 36, 0x02), + (9, 36, 0x02), + (23, 36, 0x02), + (40, 36, 0x03), + (2, 64, 0x02), + (9, 64, 0x02), + (23, 64, 0x02), + (40, 64, 0x03), + (2, 91, 0x02), + (9, 91, 0x02), + (23, 91, 0x02), + (40, 91, 0x03), + ], + // 86 + [ + (3, 0, 0x02), + (6, 0, 0x02), + (10, 0, 0x02), + (15, 0, 0x02), + (24, 0, 0x02), + (31, 0, 0x02), + (41, 0, 0x02), + (56, 0, 0x03), + (3, 36, 0x02), + (6, 36, 0x02), + (10, 36, 0x02), + (15, 36, 0x02), + (24, 36, 0x02), + (31, 36, 0x02), + (41, 36, 0x02), + (56, 36, 0x03), + ], + // 87 + [ + (3, 64, 0x02), + (6, 64, 0x02), + (10, 64, 0x02), + (15, 64, 0x02), + (24, 64, 0x02), + (31, 64, 0x02), + (41, 64, 0x02), + (56, 64, 0x03), + (3, 91, 0x02), + (6, 91, 0x02), + (10, 91, 0x02), + (15, 91, 0x02), + (24, 91, 0x02), + (31, 91, 0x02), + (41, 91, 0x02), + (56, 91, 0x03), + ], + // 88 + [ + (2, 93, 0x02), + (9, 93, 0x02), + (23, 93, 0x02), + (40, 93, 0x03), + (2, 126, 0x02), + (9, 126, 0x02), + (23, 126, 0x02), + (40, 126, 0x03), + (1, 94, 0x02), + (22, 94, 0x03), + (1, 125, 0x02), + (22, 125, 0x03), + (0, 60, 0x02), + (0, 96, 0x02), + (0, 123, 0x02), + (95, 0, 0x00), + ], + // 89 + [ + (3, 93, 0x02), + (6, 93, 0x02), + (10, 93, 0x02), + (15, 93, 0x02), + (24, 93, 0x02), + (31, 93, 0x02), + (41, 93, 0x02), + (56, 93, 0x03), + (3, 126, 0x02), + (6, 126, 0x02), + (10, 126, 0x02), + (15, 126, 0x02), + (24, 126, 0x02), + (31, 126, 0x02), + (41, 126, 0x02), + (56, 126, 0x03), + ], + // 90 + [ + (2, 94, 0x02), + (9, 94, 0x02), + (23, 94, 0x02), + (40, 94, 0x03), + (2, 125, 0x02), + (9, 125, 0x02), + (23, 125, 0x02), + (40, 125, 0x03), + (1, 60, 0x02), + (22, 60, 0x03), + (1, 96, 0x02), + (22, 96, 0x03), + (1, 123, 0x02), + (22, 123, 0x03), + (96, 0, 0x00), + (110, 0, 0x00), + ], + // 91 + [ + (3, 94, 0x02), + (6, 94, 0x02), + (10, 94, 0x02), + (15, 94, 0x02), + (24, 94, 0x02), + (31, 94, 0x02), + (41, 94, 0x02), + (56, 94, 0x03), + (3, 125, 0x02), + (6, 125, 0x02), + (10, 125, 0x02), + (15, 125, 0x02), + (24, 125, 0x02), + (31, 125, 0x02), + (41, 125, 0x02), + (56, 125, 0x03), + ], + // 92 + [ + (2, 60, 0x02), + (9, 60, 0x02), + (23, 60, 0x02), + (40, 60, 0x03), + (2, 96, 0x02), + (9, 96, 0x02), + (23, 96, 0x02), + (40, 96, 0x03), + (2, 123, 0x02), + (9, 123, 0x02), + (23, 123, 0x02), + (40, 123, 0x03), + (97, 0, 0x00), + (101, 0, 0x00), + (111, 0, 0x00), + (133, 0, 0x00), + ], + // 93 + [ + (3, 60, 0x02), + (6, 60, 0x02), + (10, 60, 0x02), + (15, 60, 0x02), + (24, 60, 0x02), + (31, 60, 0x02), + (41, 60, 0x02), + (56, 60, 0x03), + (3, 96, 0x02), + (6, 96, 0x02), + (10, 96, 0x02), + (15, 96, 0x02), + (24, 96, 0x02), + (31, 96, 0x02), + (41, 96, 0x02), + (56, 96, 0x03), + ], + // 94 + [ + (3, 123, 0x02), + (6, 123, 0x02), + (10, 123, 0x02), + (15, 123, 0x02), + (24, 123, 0x02), + (31, 123, 0x02), + (41, 123, 0x02), + (56, 123, 0x03), + (98, 0, 0x00), + (99, 0, 0x00), + (102, 0, 0x00), + (105, 0, 0x00), + (112, 0, 0x00), + (119, 0, 0x00), + (134, 0, 0x00), + (153, 0, 0x00), + ], + // 95 + [ + (0, 92, 0x02), + (0, 195, 0x02), + (0, 208, 0x02), + (100, 0, 0x00), + (103, 0, 0x00), + (104, 0, 0x00), + (106, 0, 0x00), + (107, 0, 0x00), + (113, 0, 0x00), + (116, 0, 0x00), + (120, 0, 0x00), + (126, 0, 0x00), + (135, 0, 0x00), + (142, 0, 0x00), + (154, 0, 0x00), + (169, 0, 0x00), + ], + // 96 + [ + (1, 92, 0x02), + (22, 92, 0x03), + (1, 195, 0x02), + (22, 195, 0x03), + (1, 208, 0x02), + (22, 208, 0x03), + (0, 128, 0x02), + (0, 130, 0x02), + (0, 131, 0x02), + (0, 162, 0x02), + (0, 184, 0x02), + (0, 194, 0x02), + (0, 224, 0x02), + (0, 226, 0x02), + (108, 0, 0x00), + (109, 0, 0x00), + ], + // 97 + [ + (2, 92, 0x02), + (9, 92, 0x02), + (23, 92, 0x02), + (40, 92, 0x03), + (2, 195, 0x02), + (9, 195, 0x02), + (23, 195, 0x02), + (40, 195, 0x03), + (2, 208, 0x02), + (9, 208, 0x02), + (23, 208, 0x02), + (40, 208, 0x03), + (1, 128, 0x02), + (22, 128, 0x03), + (1, 130, 0x02), + (22, 130, 0x03), + ], + // 98 + [ + (3, 92, 0x02), + (6, 92, 0x02), + (10, 92, 0x02), + (15, 92, 0x02), + (24, 92, 0x02), + (31, 92, 0x02), + (41, 92, 0x02), + (56, 92, 0x03), + (3, 195, 0x02), + (6, 195, 0x02), + (10, 195, 0x02), + (15, 195, 0x02), + (24, 195, 0x02), + (31, 195, 0x02), + (41, 195, 0x02), + (56, 195, 0x03), + ], + // 99 + [ + (3, 208, 0x02), + (6, 208, 0x02), + (10, 208, 0x02), + (15, 208, 0x02), + (24, 208, 0x02), + (31, 208, 0x02), + (41, 208, 0x02), + (56, 208, 0x03), + (2, 128, 0x02), + (9, 128, 0x02), + (23, 128, 0x02), + (40, 128, 0x03), + (2, 130, 0x02), + (9, 130, 0x02), + (23, 130, 0x02), + (40, 130, 0x03), + ], + // 100 + [ + (3, 128, 0x02), + (6, 128, 0x02), + (10, 128, 0x02), + (15, 128, 0x02), + (24, 128, 0x02), + (31, 128, 0x02), + (41, 128, 0x02), + (56, 128, 0x03), + (3, 130, 0x02), + (6, 130, 0x02), + (10, 130, 0x02), + (15, 130, 0x02), + (24, 130, 0x02), + (31, 130, 0x02), + (41, 130, 0x02), + (56, 130, 0x03), + ], + // 101 + [ + (1, 131, 0x02), + (22, 131, 0x03), + (1, 162, 0x02), + (22, 162, 0x03), + (1, 184, 0x02), + (22, 184, 0x03), + (1, 194, 0x02), + (22, 194, 0x03), + (1, 224, 0x02), + (22, 224, 0x03), + (1, 226, 0x02), + (22, 226, 0x03), + (0, 153, 0x02), + (0, 161, 0x02), + (0, 167, 0x02), + (0, 172, 0x02), + ], + // 102 + [ + (2, 131, 0x02), + (9, 131, 0x02), + (23, 131, 0x02), + (40, 131, 0x03), + (2, 162, 0x02), + (9, 162, 0x02), + (23, 162, 0x02), + (40, 162, 0x03), + (2, 184, 0x02), + (9, 184, 0x02), + (23, 184, 0x02), + (40, 184, 0x03), + (2, 194, 0x02), + (9, 194, 0x02), + (23, 194, 0x02), + (40, 194, 0x03), + ], + // 103 + [ + (3, 131, 0x02), + (6, 131, 0x02), + (10, 131, 0x02), + (15, 131, 0x02), + (24, 131, 0x02), + (31, 131, 0x02), + (41, 131, 0x02), + (56, 131, 0x03), + (3, 162, 0x02), + (6, 162, 0x02), + (10, 162, 0x02), + (15, 162, 0x02), + (24, 162, 0x02), + (31, 162, 0x02), + (41, 162, 0x02), + (56, 162, 0x03), + ], + // 104 + [ + (3, 184, 0x02), + (6, 184, 0x02), + (10, 184, 0x02), + (15, 184, 0x02), + (24, 184, 0x02), + (31, 184, 0x02), + (41, 184, 0x02), + (56, 184, 0x03), + (3, 194, 0x02), + (6, 194, 0x02), + (10, 194, 0x02), + (15, 194, 0x02), + (24, 194, 0x02), + (31, 194, 0x02), + (41, 194, 0x02), + (56, 194, 0x03), + ], + // 105 + [ + (2, 224, 0x02), + (9, 224, 0x02), + (23, 224, 0x02), + (40, 224, 0x03), + (2, 226, 0x02), + (9, 226, 0x02), + (23, 226, 0x02), + (40, 226, 0x03), + (1, 153, 0x02), + (22, 153, 0x03), + (1, 161, 0x02), + (22, 161, 0x03), + (1, 167, 0x02), + (22, 167, 0x03), + (1, 172, 0x02), + (22, 172, 0x03), + ], + // 106 + [ + (3, 224, 0x02), + (6, 224, 0x02), + (10, 224, 0x02), + (15, 224, 0x02), + (24, 224, 0x02), + (31, 224, 0x02), + (41, 224, 0x02), + (56, 224, 0x03), + (3, 226, 0x02), + (6, 226, 0x02), + (10, 226, 0x02), + (15, 226, 0x02), + (24, 226, 0x02), + (31, 226, 0x02), + (41, 226, 0x02), + (56, 226, 0x03), + ], + // 107 + [ + (2, 153, 0x02), + (9, 153, 0x02), + (23, 153, 0x02), + (40, 153, 0x03), + (2, 161, 0x02), + (9, 161, 0x02), + (23, 161, 0x02), + (40, 161, 0x03), + (2, 167, 0x02), + (9, 167, 0x02), + (23, 167, 0x02), + (40, 167, 0x03), + (2, 172, 0x02), + (9, 172, 0x02), + (23, 172, 0x02), + (40, 172, 0x03), + ], + // 108 + [ + (3, 153, 0x02), + (6, 153, 0x02), + (10, 153, 0x02), + (15, 153, 0x02), + (24, 153, 0x02), + (31, 153, 0x02), + (41, 153, 0x02), + (56, 153, 0x03), + (3, 161, 0x02), + (6, 161, 0x02), + (10, 161, 0x02), + (15, 161, 0x02), + (24, 161, 0x02), + (31, 161, 0x02), + (41, 161, 0x02), + (56, 161, 0x03), + ], + // 109 + [ + (3, 167, 0x02), + (6, 167, 0x02), + (10, 167, 0x02), + (15, 167, 0x02), + (24, 167, 0x02), + (31, 167, 0x02), + (41, 167, 0x02), + (56, 167, 0x03), + (3, 172, 0x02), + (6, 172, 0x02), + (10, 172, 0x02), + (15, 172, 0x02), + (24, 172, 0x02), + (31, 172, 0x02), + (41, 172, 0x02), + (56, 172, 0x03), + ], + // 110 + [ + (114, 0, 0x00), + (115, 0, 0x00), + (117, 0, 0x00), + (118, 0, 0x00), + (121, 0, 0x00), + (123, 0, 0x00), + (127, 0, 0x00), + (130, 0, 0x00), + (136, 0, 0x00), + (139, 0, 0x00), + (143, 0, 0x00), + (146, 0, 0x00), + (155, 0, 0x00), + (162, 0, 0x00), + (170, 0, 0x00), + (180, 0, 0x00), + ], + // 111 + [ + (0, 176, 0x02), + (0, 177, 0x02), + (0, 179, 0x02), + (0, 209, 0x02), + (0, 216, 0x02), + (0, 217, 0x02), + (0, 227, 0x02), + (0, 229, 0x02), + (0, 230, 0x02), + (122, 0, 0x00), + (124, 0, 0x00), + (125, 0, 0x00), + (128, 0, 0x00), + (129, 0, 0x00), + (131, 0, 0x00), + (132, 0, 0x00), + ], + // 112 + [ + (1, 176, 0x02), + (22, 176, 0x03), + (1, 177, 0x02), + (22, 177, 0x03), + (1, 179, 0x02), + (22, 179, 0x03), + (1, 209, 0x02), + (22, 209, 0x03), + (1, 216, 0x02), + (22, 216, 0x03), + (1, 217, 0x02), + (22, 217, 0x03), + (1, 227, 0x02), + (22, 227, 0x03), + (1, 229, 0x02), + (22, 229, 0x03), + ], + // 113 + [ + (2, 176, 0x02), + (9, 176, 0x02), + (23, 176, 0x02), + (40, 176, 0x03), + (2, 177, 0x02), + (9, 177, 0x02), + (23, 177, 0x02), + (40, 177, 0x03), + (2, 179, 0x02), + (9, 179, 0x02), + (23, 179, 0x02), + (40, 179, 0x03), + (2, 209, 0x02), + (9, 209, 0x02), + (23, 209, 0x02), + (40, 209, 0x03), + ], + // 114 + [ + (3, 176, 0x02), + (6, 176, 0x02), + (10, 176, 0x02), + (15, 176, 0x02), + (24, 176, 0x02), + (31, 176, 0x02), + (41, 176, 0x02), + (56, 176, 0x03), + (3, 177, 0x02), + (6, 177, 0x02), + (10, 177, 0x02), + (15, 177, 0x02), + (24, 177, 0x02), + (31, 177, 0x02), + (41, 177, 0x02), + (56, 177, 0x03), + ], + // 115 + [ + (3, 179, 0x02), + (6, 179, 0x02), + (10, 179, 0x02), + (15, 179, 0x02), + (24, 179, 0x02), + (31, 179, 0x02), + (41, 179, 0x02), + (56, 179, 0x03), + (3, 209, 0x02), + (6, 209, 0x02), + (10, 209, 0x02), + (15, 209, 0x02), + (24, 209, 0x02), + (31, 209, 0x02), + (41, 209, 0x02), + (56, 209, 0x03), + ], + // 116 + [ + (2, 216, 0x02), + (9, 216, 0x02), + (23, 216, 0x02), + (40, 216, 0x03), + (2, 217, 0x02), + (9, 217, 0x02), + (23, 217, 0x02), + (40, 217, 0x03), + (2, 227, 0x02), + (9, 227, 0x02), + (23, 227, 0x02), + (40, 227, 0x03), + (2, 229, 0x02), + (9, 229, 0x02), + (23, 229, 0x02), + (40, 229, 0x03), + ], + // 117 + [ + (3, 216, 0x02), + (6, 216, 0x02), + (10, 216, 0x02), + (15, 216, 0x02), + (24, 216, 0x02), + (31, 216, 0x02), + (41, 216, 0x02), + (56, 216, 0x03), + (3, 217, 0x02), + (6, 217, 0x02), + (10, 217, 0x02), + (15, 217, 0x02), + (24, 217, 0x02), + (31, 217, 0x02), + (41, 217, 0x02), + (56, 217, 0x03), + ], + // 118 + [ + (3, 227, 0x02), + (6, 227, 0x02), + (10, 227, 0x02), + (15, 227, 0x02), + (24, 227, 0x02), + (31, 227, 0x02), + (41, 227, 0x02), + (56, 227, 0x03), + (3, 229, 0x02), + (6, 229, 0x02), + (10, 229, 0x02), + (15, 229, 0x02), + (24, 229, 0x02), + (31, 229, 0x02), + (41, 229, 0x02), + (56, 229, 0x03), + ], + // 119 + [ + (1, 230, 0x02), + (22, 230, 0x03), + (0, 129, 0x02), + (0, 132, 0x02), + (0, 133, 0x02), + (0, 134, 0x02), + (0, 136, 0x02), + (0, 146, 0x02), + (0, 154, 0x02), + (0, 156, 0x02), + (0, 160, 0x02), + (0, 163, 0x02), + (0, 164, 0x02), + (0, 169, 0x02), + (0, 170, 0x02), + (0, 173, 0x02), + ], + // 120 + [ + (2, 230, 0x02), + (9, 230, 0x02), + (23, 230, 0x02), + (40, 230, 0x03), + (1, 129, 0x02), + (22, 129, 0x03), + (1, 132, 0x02), + (22, 132, 0x03), + (1, 133, 0x02), + (22, 133, 0x03), + (1, 134, 0x02), + (22, 134, 0x03), + (1, 136, 0x02), + (22, 136, 0x03), + (1, 146, 0x02), + (22, 146, 0x03), + ], + // 121 + [ + (3, 230, 0x02), + (6, 230, 0x02), + (10, 230, 0x02), + (15, 230, 0x02), + (24, 230, 0x02), + (31, 230, 0x02), + (41, 230, 0x02), + (56, 230, 0x03), + (2, 129, 0x02), + (9, 129, 0x02), + (23, 129, 0x02), + (40, 129, 0x03), + (2, 132, 0x02), + (9, 132, 0x02), + (23, 132, 0x02), + (40, 132, 0x03), + ], + // 122 + [ + (3, 129, 0x02), + (6, 129, 0x02), + (10, 129, 0x02), + (15, 129, 0x02), + (24, 129, 0x02), + (31, 129, 0x02), + (41, 129, 0x02), + (56, 129, 0x03), + (3, 132, 0x02), + (6, 132, 0x02), + (10, 132, 0x02), + (15, 132, 0x02), + (24, 132, 0x02), + (31, 132, 0x02), + (41, 132, 0x02), + (56, 132, 0x03), + ], + // 123 + [ + (2, 133, 0x02), + (9, 133, 0x02), + (23, 133, 0x02), + (40, 133, 0x03), + (2, 134, 0x02), + (9, 134, 0x02), + (23, 134, 0x02), + (40, 134, 0x03), + (2, 136, 0x02), + (9, 136, 0x02), + (23, 136, 0x02), + (40, 136, 0x03), + (2, 146, 0x02), + (9, 146, 0x02), + (23, 146, 0x02), + (40, 146, 0x03), + ], + // 124 + [ + (3, 133, 0x02), + (6, 133, 0x02), + (10, 133, 0x02), + (15, 133, 0x02), + (24, 133, 0x02), + (31, 133, 0x02), + (41, 133, 0x02), + (56, 133, 0x03), + (3, 134, 0x02), + (6, 134, 0x02), + (10, 134, 0x02), + (15, 134, 0x02), + (24, 134, 0x02), + (31, 134, 0x02), + (41, 134, 0x02), + (56, 134, 0x03), + ], + // 125 + [ + (3, 136, 0x02), + (6, 136, 0x02), + (10, 136, 0x02), + (15, 136, 0x02), + (24, 136, 0x02), + (31, 136, 0x02), + (41, 136, 0x02), + (56, 136, 0x03), + (3, 146, 0x02), + (6, 146, 0x02), + (10, 146, 0x02), + (15, 146, 0x02), + (24, 146, 0x02), + (31, 146, 0x02), + (41, 146, 0x02), + (56, 146, 0x03), + ], + // 126 + [ + (1, 154, 0x02), + (22, 154, 0x03), + (1, 156, 0x02), + (22, 156, 0x03), + (1, 160, 0x02), + (22, 160, 0x03), + (1, 163, 0x02), + (22, 163, 0x03), + (1, 164, 0x02), + (22, 164, 0x03), + (1, 169, 0x02), + (22, 169, 0x03), + (1, 170, 0x02), + (22, 170, 0x03), + (1, 173, 0x02), + (22, 173, 0x03), + ], + // 127 + [ + (2, 154, 0x02), + (9, 154, 0x02), + (23, 154, 0x02), + (40, 154, 0x03), + (2, 156, 0x02), + (9, 156, 0x02), + (23, 156, 0x02), + (40, 156, 0x03), + (2, 160, 0x02), + (9, 160, 0x02), + (23, 160, 0x02), + (40, 160, 0x03), + (2, 163, 0x02), + (9, 163, 0x02), + (23, 163, 0x02), + (40, 163, 0x03), + ], + // 128 + [ + (3, 154, 0x02), + (6, 154, 0x02), + (10, 154, 0x02), + (15, 154, 0x02), + (24, 154, 0x02), + (31, 154, 0x02), + (41, 154, 0x02), + (56, 154, 0x03), + (3, 156, 0x02), + (6, 156, 0x02), + (10, 156, 0x02), + (15, 156, 0x02), + (24, 156, 0x02), + (31, 156, 0x02), + (41, 156, 0x02), + (56, 156, 0x03), + ], + // 129 + [ + (3, 160, 0x02), + (6, 160, 0x02), + (10, 160, 0x02), + (15, 160, 0x02), + (24, 160, 0x02), + (31, 160, 0x02), + (41, 160, 0x02), + (56, 160, 0x03), + (3, 163, 0x02), + (6, 163, 0x02), + (10, 163, 0x02), + (15, 163, 0x02), + (24, 163, 0x02), + (31, 163, 0x02), + (41, 163, 0x02), + (56, 163, 0x03), + ], + // 130 + [ + (2, 164, 0x02), + (9, 164, 0x02), + (23, 164, 0x02), + (40, 164, 0x03), + (2, 169, 0x02), + (9, 169, 0x02), + (23, 169, 0x02), + (40, 169, 0x03), + (2, 170, 0x02), + (9, 170, 0x02), + (23, 170, 0x02), + (40, 170, 0x03), + (2, 173, 0x02), + (9, 173, 0x02), + (23, 173, 0x02), + (40, 173, 0x03), + ], + // 131 + [ + (3, 164, 0x02), + (6, 164, 0x02), + (10, 164, 0x02), + (15, 164, 0x02), + (24, 164, 0x02), + (31, 164, 0x02), + (41, 164, 0x02), + (56, 164, 0x03), + (3, 169, 0x02), + (6, 169, 0x02), + (10, 169, 0x02), + (15, 169, 0x02), + (24, 169, 0x02), + (31, 169, 0x02), + (41, 169, 0x02), + (56, 169, 0x03), + ], + // 132 + [ + (3, 170, 0x02), + (6, 170, 0x02), + (10, 170, 0x02), + (15, 170, 0x02), + (24, 170, 0x02), + (31, 170, 0x02), + (41, 170, 0x02), + (56, 170, 0x03), + (3, 173, 0x02), + (6, 173, 0x02), + (10, 173, 0x02), + (15, 173, 0x02), + (24, 173, 0x02), + (31, 173, 0x02), + (41, 173, 0x02), + (56, 173, 0x03), + ], + // 133 + [ + (137, 0, 0x00), + (138, 0, 0x00), + (140, 0, 0x00), + (141, 0, 0x00), + (144, 0, 0x00), + (145, 0, 0x00), + (147, 0, 0x00), + (150, 0, 0x00), + (156, 0, 0x00), + (159, 0, 0x00), + (163, 0, 0x00), + (166, 0, 0x00), + (171, 0, 0x00), + (174, 0, 0x00), + (181, 0, 0x00), + (190, 0, 0x00), + ], + // 134 + [ + (0, 178, 0x02), + (0, 181, 0x02), + (0, 185, 0x02), + (0, 186, 0x02), + (0, 187, 0x02), + (0, 189, 0x02), + (0, 190, 0x02), + (0, 196, 0x02), + (0, 198, 0x02), + (0, 228, 0x02), + (0, 232, 0x02), + (0, 233, 0x02), + (148, 0, 0x00), + (149, 0, 0x00), + (151, 0, 0x00), + (152, 0, 0x00), + ], + // 135 + [ + (1, 178, 0x02), + (22, 178, 0x03), + (1, 181, 0x02), + (22, 181, 0x03), + (1, 185, 0x02), + (22, 185, 0x03), + (1, 186, 0x02), + (22, 186, 0x03), + (1, 187, 0x02), + (22, 187, 0x03), + (1, 189, 0x02), + (22, 189, 0x03), + (1, 190, 0x02), + (22, 190, 0x03), + (1, 196, 0x02), + (22, 196, 0x03), + ], + // 136 + [ + (2, 178, 0x02), + (9, 178, 0x02), + (23, 178, 0x02), + (40, 178, 0x03), + (2, 181, 0x02), + (9, 181, 0x02), + (23, 181, 0x02), + (40, 181, 0x03), + (2, 185, 0x02), + (9, 185, 0x02), + (23, 185, 0x02), + (40, 185, 0x03), + (2, 186, 0x02), + (9, 186, 0x02), + (23, 186, 0x02), + (40, 186, 0x03), + ], + // 137 + [ + (3, 178, 0x02), + (6, 178, 0x02), + (10, 178, 0x02), + (15, 178, 0x02), + (24, 178, 0x02), + (31, 178, 0x02), + (41, 178, 0x02), + (56, 178, 0x03), + (3, 181, 0x02), + (6, 181, 0x02), + (10, 181, 0x02), + (15, 181, 0x02), + (24, 181, 0x02), + (31, 181, 0x02), + (41, 181, 0x02), + (56, 181, 0x03), + ], + // 138 + [ + (3, 185, 0x02), + (6, 185, 0x02), + (10, 185, 0x02), + (15, 185, 0x02), + (24, 185, 0x02), + (31, 185, 0x02), + (41, 185, 0x02), + (56, 185, 0x03), + (3, 186, 0x02), + (6, 186, 0x02), + (10, 186, 0x02), + (15, 186, 0x02), + (24, 186, 0x02), + (31, 186, 0x02), + (41, 186, 0x02), + (56, 186, 0x03), + ], + // 139 + [ + (2, 187, 0x02), + (9, 187, 0x02), + (23, 187, 0x02), + (40, 187, 0x03), + (2, 189, 0x02), + (9, 189, 0x02), + (23, 189, 0x02), + (40, 189, 0x03), + (2, 190, 0x02), + (9, 190, 0x02), + (23, 190, 0x02), + (40, 190, 0x03), + (2, 196, 0x02), + (9, 196, 0x02), + (23, 196, 0x02), + (40, 196, 0x03), + ], + // 140 + [ + (3, 187, 0x02), + (6, 187, 0x02), + (10, 187, 0x02), + (15, 187, 0x02), + (24, 187, 0x02), + (31, 187, 0x02), + (41, 187, 0x02), + (56, 187, 0x03), + (3, 189, 0x02), + (6, 189, 0x02), + (10, 189, 0x02), + (15, 189, 0x02), + (24, 189, 0x02), + (31, 189, 0x02), + (41, 189, 0x02), + (56, 189, 0x03), + ], + // 141 + [ + (3, 190, 0x02), + (6, 190, 0x02), + (10, 190, 0x02), + (15, 190, 0x02), + (24, 190, 0x02), + (31, 190, 0x02), + (41, 190, 0x02), + (56, 190, 0x03), + (3, 196, 0x02), + (6, 196, 0x02), + (10, 196, 0x02), + (15, 196, 0x02), + (24, 196, 0x02), + (31, 196, 0x02), + (41, 196, 0x02), + (56, 196, 0x03), + ], + // 142 + [ + (1, 198, 0x02), + (22, 198, 0x03), + (1, 228, 0x02), + (22, 228, 0x03), + (1, 232, 0x02), + (22, 232, 0x03), + (1, 233, 0x02), + (22, 233, 0x03), + (0, 1, 0x02), + (0, 135, 0x02), + (0, 137, 0x02), + (0, 138, 0x02), + (0, 139, 0x02), + (0, 140, 0x02), + (0, 141, 0x02), + (0, 143, 0x02), + ], + // 143 + [ + (2, 198, 0x02), + (9, 198, 0x02), + (23, 198, 0x02), + (40, 198, 0x03), + (2, 228, 0x02), + (9, 228, 0x02), + (23, 228, 0x02), + (40, 228, 0x03), + (2, 232, 0x02), + (9, 232, 0x02), + (23, 232, 0x02), + (40, 232, 0x03), + (2, 233, 0x02), + (9, 233, 0x02), + (23, 233, 0x02), + (40, 233, 0x03), + ], + // 144 + [ + (3, 198, 0x02), + (6, 198, 0x02), + (10, 198, 0x02), + (15, 198, 0x02), + (24, 198, 0x02), + (31, 198, 0x02), + (41, 198, 0x02), + (56, 198, 0x03), + (3, 228, 0x02), + (6, 228, 0x02), + (10, 228, 0x02), + (15, 228, 0x02), + (24, 228, 0x02), + (31, 228, 0x02), + (41, 228, 0x02), + (56, 228, 0x03), + ], + // 145 + [ + (3, 232, 0x02), + (6, 232, 0x02), + (10, 232, 0x02), + (15, 232, 0x02), + (24, 232, 0x02), + (31, 232, 0x02), + (41, 232, 0x02), + (56, 232, 0x03), + (3, 233, 0x02), + (6, 233, 0x02), + (10, 233, 0x02), + (15, 233, 0x02), + (24, 233, 0x02), + (31, 233, 0x02), + (41, 233, 0x02), + (56, 233, 0x03), + ], + // 146 + [ + (1, 1, 0x02), + (22, 1, 0x03), + (1, 135, 0x02), + (22, 135, 0x03), + (1, 137, 0x02), + (22, 137, 0x03), + (1, 138, 0x02), + (22, 138, 0x03), + (1, 139, 0x02), + (22, 139, 0x03), + (1, 140, 0x02), + (22, 140, 0x03), + (1, 141, 0x02), + (22, 141, 0x03), + (1, 143, 0x02), + (22, 143, 0x03), + ], + // 147 + [ + (2, 1, 0x02), + (9, 1, 0x02), + (23, 1, 0x02), + (40, 1, 0x03), + (2, 135, 0x02), + (9, 135, 0x02), + (23, 135, 0x02), + (40, 135, 0x03), + (2, 137, 0x02), + (9, 137, 0x02), + (23, 137, 0x02), + (40, 137, 0x03), + (2, 138, 0x02), + (9, 138, 0x02), + (23, 138, 0x02), + (40, 138, 0x03), + ], + // 148 + [ + (3, 1, 0x02), + (6, 1, 0x02), + (10, 1, 0x02), + (15, 1, 0x02), + (24, 1, 0x02), + (31, 1, 0x02), + (41, 1, 0x02), + (56, 1, 0x03), + (3, 135, 0x02), + (6, 135, 0x02), + (10, 135, 0x02), + (15, 135, 0x02), + (24, 135, 0x02), + (31, 135, 0x02), + (41, 135, 0x02), + (56, 135, 0x03), + ], + // 149 + [ + (3, 137, 0x02), + (6, 137, 0x02), + (10, 137, 0x02), + (15, 137, 0x02), + (24, 137, 0x02), + (31, 137, 0x02), + (41, 137, 0x02), + (56, 137, 0x03), + (3, 138, 0x02), + (6, 138, 0x02), + (10, 138, 0x02), + (15, 138, 0x02), + (24, 138, 0x02), + (31, 138, 0x02), + (41, 138, 0x02), + (56, 138, 0x03), + ], + // 150 + [ + (2, 139, 0x02), + (9, 139, 0x02), + (23, 139, 0x02), + (40, 139, 0x03), + (2, 140, 0x02), + (9, 140, 0x02), + (23, 140, 0x02), + (40, 140, 0x03), + (2, 141, 0x02), + (9, 141, 0x02), + (23, 141, 0x02), + (40, 141, 0x03), + (2, 143, 0x02), + (9, 143, 0x02), + (23, 143, 0x02), + (40, 143, 0x03), + ], + // 151 + [ + (3, 139, 0x02), + (6, 139, 0x02), + (10, 139, 0x02), + (15, 139, 0x02), + (24, 139, 0x02), + (31, 139, 0x02), + (41, 139, 0x02), + (56, 139, 0x03), + (3, 140, 0x02), + (6, 140, 0x02), + (10, 140, 0x02), + (15, 140, 0x02), + (24, 140, 0x02), + (31, 140, 0x02), + (41, 140, 0x02), + (56, 140, 0x03), + ], + // 152 + [ + (3, 141, 0x02), + (6, 141, 0x02), + (10, 141, 0x02), + (15, 141, 0x02), + (24, 141, 0x02), + (31, 141, 0x02), + (41, 141, 0x02), + (56, 141, 0x03), + (3, 143, 0x02), + (6, 143, 0x02), + (10, 143, 0x02), + (15, 143, 0x02), + (24, 143, 0x02), + (31, 143, 0x02), + (41, 143, 0x02), + (56, 143, 0x03), + ], + // 153 + [ + (157, 0, 0x00), + (158, 0, 0x00), + (160, 0, 0x00), + (161, 0, 0x00), + (164, 0, 0x00), + (165, 0, 0x00), + (167, 0, 0x00), + (168, 0, 0x00), + (172, 0, 0x00), + (173, 0, 0x00), + (175, 0, 0x00), + (177, 0, 0x00), + (182, 0, 0x00), + (185, 0, 0x00), + (191, 0, 0x00), + (207, 0, 0x00), + ], + // 154 + [ + (0, 147, 0x02), + (0, 149, 0x02), + (0, 150, 0x02), + (0, 151, 0x02), + (0, 152, 0x02), + (0, 155, 0x02), + (0, 157, 0x02), + (0, 158, 0x02), + (0, 165, 0x02), + (0, 166, 0x02), + (0, 168, 0x02), + (0, 174, 0x02), + (0, 175, 0x02), + (0, 180, 0x02), + (0, 182, 0x02), + (0, 183, 0x02), + ], + // 155 + [ + (1, 147, 0x02), + (22, 147, 0x03), + (1, 149, 0x02), + (22, 149, 0x03), + (1, 150, 0x02), + (22, 150, 0x03), + (1, 151, 0x02), + (22, 151, 0x03), + (1, 152, 0x02), + (22, 152, 0x03), + (1, 155, 0x02), + (22, 155, 0x03), + (1, 157, 0x02), + (22, 157, 0x03), + (1, 158, 0x02), + (22, 158, 0x03), + ], + // 156 + [ + (2, 147, 0x02), + (9, 147, 0x02), + (23, 147, 0x02), + (40, 147, 0x03), + (2, 149, 0x02), + (9, 149, 0x02), + (23, 149, 0x02), + (40, 149, 0x03), + (2, 150, 0x02), + (9, 150, 0x02), + (23, 150, 0x02), + (40, 150, 0x03), + (2, 151, 0x02), + (9, 151, 0x02), + (23, 151, 0x02), + (40, 151, 0x03), + ], + // 157 + [ + (3, 147, 0x02), + (6, 147, 0x02), + (10, 147, 0x02), + (15, 147, 0x02), + (24, 147, 0x02), + (31, 147, 0x02), + (41, 147, 0x02), + (56, 147, 0x03), + (3, 149, 0x02), + (6, 149, 0x02), + (10, 149, 0x02), + (15, 149, 0x02), + (24, 149, 0x02), + (31, 149, 0x02), + (41, 149, 0x02), + (56, 149, 0x03), + ], + // 158 + [ + (3, 150, 0x02), + (6, 150, 0x02), + (10, 150, 0x02), + (15, 150, 0x02), + (24, 150, 0x02), + (31, 150, 0x02), + (41, 150, 0x02), + (56, 150, 0x03), + (3, 151, 0x02), + (6, 151, 0x02), + (10, 151, 0x02), + (15, 151, 0x02), + (24, 151, 0x02), + (31, 151, 0x02), + (41, 151, 0x02), + (56, 151, 0x03), + ], + // 159 + [ + (2, 152, 0x02), + (9, 152, 0x02), + (23, 152, 0x02), + (40, 152, 0x03), + (2, 155, 0x02), + (9, 155, 0x02), + (23, 155, 0x02), + (40, 155, 0x03), + (2, 157, 0x02), + (9, 157, 0x02), + (23, 157, 0x02), + (40, 157, 0x03), + (2, 158, 0x02), + (9, 158, 0x02), + (23, 158, 0x02), + (40, 158, 0x03), + ], + // 160 + [ + (3, 152, 0x02), + (6, 152, 0x02), + (10, 152, 0x02), + (15, 152, 0x02), + (24, 152, 0x02), + (31, 152, 0x02), + (41, 152, 0x02), + (56, 152, 0x03), + (3, 155, 0x02), + (6, 155, 0x02), + (10, 155, 0x02), + (15, 155, 0x02), + (24, 155, 0x02), + (31, 155, 0x02), + (41, 155, 0x02), + (56, 155, 0x03), + ], + // 161 + [ + (3, 157, 0x02), + (6, 157, 0x02), + (10, 157, 0x02), + (15, 157, 0x02), + (24, 157, 0x02), + (31, 157, 0x02), + (41, 157, 0x02), + (56, 157, 0x03), + (3, 158, 0x02), + (6, 158, 0x02), + (10, 158, 0x02), + (15, 158, 0x02), + (24, 158, 0x02), + (31, 158, 0x02), + (41, 158, 0x02), + (56, 158, 0x03), + ], + // 162 + [ + (1, 165, 0x02), + (22, 165, 0x03), + (1, 166, 0x02), + (22, 166, 0x03), + (1, 168, 0x02), + (22, 168, 0x03), + (1, 174, 0x02), + (22, 174, 0x03), + (1, 175, 0x02), + (22, 175, 0x03), + (1, 180, 0x02), + (22, 180, 0x03), + (1, 182, 0x02), + (22, 182, 0x03), + (1, 183, 0x02), + (22, 183, 0x03), + ], + // 163 + [ + (2, 165, 0x02), + (9, 165, 0x02), + (23, 165, 0x02), + (40, 165, 0x03), + (2, 166, 0x02), + (9, 166, 0x02), + (23, 166, 0x02), + (40, 166, 0x03), + (2, 168, 0x02), + (9, 168, 0x02), + (23, 168, 0x02), + (40, 168, 0x03), + (2, 174, 0x02), + (9, 174, 0x02), + (23, 174, 0x02), + (40, 174, 0x03), + ], + // 164 + [ + (3, 165, 0x02), + (6, 165, 0x02), + (10, 165, 0x02), + (15, 165, 0x02), + (24, 165, 0x02), + (31, 165, 0x02), + (41, 165, 0x02), + (56, 165, 0x03), + (3, 166, 0x02), + (6, 166, 0x02), + (10, 166, 0x02), + (15, 166, 0x02), + (24, 166, 0x02), + (31, 166, 0x02), + (41, 166, 0x02), + (56, 166, 0x03), + ], + // 165 + [ + (3, 168, 0x02), + (6, 168, 0x02), + (10, 168, 0x02), + (15, 168, 0x02), + (24, 168, 0x02), + (31, 168, 0x02), + (41, 168, 0x02), + (56, 168, 0x03), + (3, 174, 0x02), + (6, 174, 0x02), + (10, 174, 0x02), + (15, 174, 0x02), + (24, 174, 0x02), + (31, 174, 0x02), + (41, 174, 0x02), + (56, 174, 0x03), + ], + // 166 + [ + (2, 175, 0x02), + (9, 175, 0x02), + (23, 175, 0x02), + (40, 175, 0x03), + (2, 180, 0x02), + (9, 180, 0x02), + (23, 180, 0x02), + (40, 180, 0x03), + (2, 182, 0x02), + (9, 182, 0x02), + (23, 182, 0x02), + (40, 182, 0x03), + (2, 183, 0x02), + (9, 183, 0x02), + (23, 183, 0x02), + (40, 183, 0x03), + ], + // 167 + [ + (3, 175, 0x02), + (6, 175, 0x02), + (10, 175, 0x02), + (15, 175, 0x02), + (24, 175, 0x02), + (31, 175, 0x02), + (41, 175, 0x02), + (56, 175, 0x03), + (3, 180, 0x02), + (6, 180, 0x02), + (10, 180, 0x02), + (15, 180, 0x02), + (24, 180, 0x02), + (31, 180, 0x02), + (41, 180, 0x02), + (56, 180, 0x03), + ], + // 168 + [ + (3, 182, 0x02), + (6, 182, 0x02), + (10, 182, 0x02), + (15, 182, 0x02), + (24, 182, 0x02), + (31, 182, 0x02), + (41, 182, 0x02), + (56, 182, 0x03), + (3, 183, 0x02), + (6, 183, 0x02), + (10, 183, 0x02), + (15, 183, 0x02), + (24, 183, 0x02), + (31, 183, 0x02), + (41, 183, 0x02), + (56, 183, 0x03), + ], + // 169 + [ + (0, 188, 0x02), + (0, 191, 0x02), + (0, 197, 0x02), + (0, 231, 0x02), + (0, 239, 0x02), + (176, 0, 0x00), + (178, 0, 0x00), + (179, 0, 0x00), + (183, 0, 0x00), + (184, 0, 0x00), + (186, 0, 0x00), + (187, 0, 0x00), + (192, 0, 0x00), + (199, 0, 0x00), + (208, 0, 0x00), + (223, 0, 0x00), + ], + // 170 + [ + (1, 188, 0x02), + (22, 188, 0x03), + (1, 191, 0x02), + (22, 191, 0x03), + (1, 197, 0x02), + (22, 197, 0x03), + (1, 231, 0x02), + (22, 231, 0x03), + (1, 239, 0x02), + (22, 239, 0x03), + (0, 9, 0x02), + (0, 142, 0x02), + (0, 144, 0x02), + (0, 145, 0x02), + (0, 148, 0x02), + (0, 159, 0x02), + ], + // 171 + [ + (2, 188, 0x02), + (9, 188, 0x02), + (23, 188, 0x02), + (40, 188, 0x03), + (2, 191, 0x02), + (9, 191, 0x02), + (23, 191, 0x02), + (40, 191, 0x03), + (2, 197, 0x02), + (9, 197, 0x02), + (23, 197, 0x02), + (40, 197, 0x03), + (2, 231, 0x02), + (9, 231, 0x02), + (23, 231, 0x02), + (40, 231, 0x03), + ], + // 172 + [ + (3, 188, 0x02), + (6, 188, 0x02), + (10, 188, 0x02), + (15, 188, 0x02), + (24, 188, 0x02), + (31, 188, 0x02), + (41, 188, 0x02), + (56, 188, 0x03), + (3, 191, 0x02), + (6, 191, 0x02), + (10, 191, 0x02), + (15, 191, 0x02), + (24, 191, 0x02), + (31, 191, 0x02), + (41, 191, 0x02), + (56, 191, 0x03), + ], + // 173 + [ + (3, 197, 0x02), + (6, 197, 0x02), + (10, 197, 0x02), + (15, 197, 0x02), + (24, 197, 0x02), + (31, 197, 0x02), + (41, 197, 0x02), + (56, 197, 0x03), + (3, 231, 0x02), + (6, 231, 0x02), + (10, 231, 0x02), + (15, 231, 0x02), + (24, 231, 0x02), + (31, 231, 0x02), + (41, 231, 0x02), + (56, 231, 0x03), + ], + // 174 + [ + (2, 239, 0x02), + (9, 239, 0x02), + (23, 239, 0x02), + (40, 239, 0x03), + (1, 9, 0x02), + (22, 9, 0x03), + (1, 142, 0x02), + (22, 142, 0x03), + (1, 144, 0x02), + (22, 144, 0x03), + (1, 145, 0x02), + (22, 145, 0x03), + (1, 148, 0x02), + (22, 148, 0x03), + (1, 159, 0x02), + (22, 159, 0x03), + ], + // 175 + [ + (3, 239, 0x02), + (6, 239, 0x02), + (10, 239, 0x02), + (15, 239, 0x02), + (24, 239, 0x02), + (31, 239, 0x02), + (41, 239, 0x02), + (56, 239, 0x03), + (2, 9, 0x02), + (9, 9, 0x02), + (23, 9, 0x02), + (40, 9, 0x03), + (2, 142, 0x02), + (9, 142, 0x02), + (23, 142, 0x02), + (40, 142, 0x03), + ], + // 176 + [ + (3, 9, 0x02), + (6, 9, 0x02), + (10, 9, 0x02), + (15, 9, 0x02), + (24, 9, 0x02), + (31, 9, 0x02), + (41, 9, 0x02), + (56, 9, 0x03), + (3, 142, 0x02), + (6, 142, 0x02), + (10, 142, 0x02), + (15, 142, 0x02), + (24, 142, 0x02), + (31, 142, 0x02), + (41, 142, 0x02), + (56, 142, 0x03), + ], + // 177 + [ + (2, 144, 0x02), + (9, 144, 0x02), + (23, 144, 0x02), + (40, 144, 0x03), + (2, 145, 0x02), + (9, 145, 0x02), + (23, 145, 0x02), + (40, 145, 0x03), + (2, 148, 0x02), + (9, 148, 0x02), + (23, 148, 0x02), + (40, 148, 0x03), + (2, 159, 0x02), + (9, 159, 0x02), + (23, 159, 0x02), + (40, 159, 0x03), + ], + // 178 + [ + (3, 144, 0x02), + (6, 144, 0x02), + (10, 144, 0x02), + (15, 144, 0x02), + (24, 144, 0x02), + (31, 144, 0x02), + (41, 144, 0x02), + (56, 144, 0x03), + (3, 145, 0x02), + (6, 145, 0x02), + (10, 145, 0x02), + (15, 145, 0x02), + (24, 145, 0x02), + (31, 145, 0x02), + (41, 145, 0x02), + (56, 145, 0x03), + ], + // 179 + [ + (3, 148, 0x02), + (6, 148, 0x02), + (10, 148, 0x02), + (15, 148, 0x02), + (24, 148, 0x02), + (31, 148, 0x02), + (41, 148, 0x02), + (56, 148, 0x03), + (3, 159, 0x02), + (6, 159, 0x02), + (10, 159, 0x02), + (15, 159, 0x02), + (24, 159, 0x02), + (31, 159, 0x02), + (41, 159, 0x02), + (56, 159, 0x03), + ], + // 180 + [ + (0, 171, 0x02), + (0, 206, 0x02), + (0, 215, 0x02), + (0, 225, 0x02), + (0, 236, 0x02), + (0, 237, 0x02), + (188, 0, 0x00), + (189, 0, 0x00), + (193, 0, 0x00), + (196, 0, 0x00), + (200, 0, 0x00), + (203, 0, 0x00), + (209, 0, 0x00), + (216, 0, 0x00), + (224, 0, 0x00), + (238, 0, 0x00), + ], + // 181 + [ + (1, 171, 0x02), + (22, 171, 0x03), + (1, 206, 0x02), + (22, 206, 0x03), + (1, 215, 0x02), + (22, 215, 0x03), + (1, 225, 0x02), + (22, 225, 0x03), + (1, 236, 0x02), + (22, 236, 0x03), + (1, 237, 0x02), + (22, 237, 0x03), + (0, 199, 0x02), + (0, 207, 0x02), + (0, 234, 0x02), + (0, 235, 0x02), + ], + // 182 + [ + (2, 171, 0x02), + (9, 171, 0x02), + (23, 171, 0x02), + (40, 171, 0x03), + (2, 206, 0x02), + (9, 206, 0x02), + (23, 206, 0x02), + (40, 206, 0x03), + (2, 215, 0x02), + (9, 215, 0x02), + (23, 215, 0x02), + (40, 215, 0x03), + (2, 225, 0x02), + (9, 225, 0x02), + (23, 225, 0x02), + (40, 225, 0x03), + ], + // 183 + [ + (3, 171, 0x02), + (6, 171, 0x02), + (10, 171, 0x02), + (15, 171, 0x02), + (24, 171, 0x02), + (31, 171, 0x02), + (41, 171, 0x02), + (56, 171, 0x03), + (3, 206, 0x02), + (6, 206, 0x02), + (10, 206, 0x02), + (15, 206, 0x02), + (24, 206, 0x02), + (31, 206, 0x02), + (41, 206, 0x02), + (56, 206, 0x03), + ], + // 184 + [ + (3, 215, 0x02), + (6, 215, 0x02), + (10, 215, 0x02), + (15, 215, 0x02), + (24, 215, 0x02), + (31, 215, 0x02), + (41, 215, 0x02), + (56, 215, 0x03), + (3, 225, 0x02), + (6, 225, 0x02), + (10, 225, 0x02), + (15, 225, 0x02), + (24, 225, 0x02), + (31, 225, 0x02), + (41, 225, 0x02), + (56, 225, 0x03), + ], + // 185 + [ + (2, 236, 0x02), + (9, 236, 0x02), + (23, 236, 0x02), + (40, 236, 0x03), + (2, 237, 0x02), + (9, 237, 0x02), + (23, 237, 0x02), + (40, 237, 0x03), + (1, 199, 0x02), + (22, 199, 0x03), + (1, 207, 0x02), + (22, 207, 0x03), + (1, 234, 0x02), + (22, 234, 0x03), + (1, 235, 0x02), + (22, 235, 0x03), + ], + // 186 + [ + (3, 236, 0x02), + (6, 236, 0x02), + (10, 236, 0x02), + (15, 236, 0x02), + (24, 236, 0x02), + (31, 236, 0x02), + (41, 236, 0x02), + (56, 236, 0x03), + (3, 237, 0x02), + (6, 237, 0x02), + (10, 237, 0x02), + (15, 237, 0x02), + (24, 237, 0x02), + (31, 237, 0x02), + (41, 237, 0x02), + (56, 237, 0x03), + ], + // 187 + [ + (2, 199, 0x02), + (9, 199, 0x02), + (23, 199, 0x02), + (40, 199, 0x03), + (2, 207, 0x02), + (9, 207, 0x02), + (23, 207, 0x02), + (40, 207, 0x03), + (2, 234, 0x02), + (9, 234, 0x02), + (23, 234, 0x02), + (40, 234, 0x03), + (2, 235, 0x02), + (9, 235, 0x02), + (23, 235, 0x02), + (40, 235, 0x03), + ], + // 188 + [ + (3, 199, 0x02), + (6, 199, 0x02), + (10, 199, 0x02), + (15, 199, 0x02), + (24, 199, 0x02), + (31, 199, 0x02), + (41, 199, 0x02), + (56, 199, 0x03), + (3, 207, 0x02), + (6, 207, 0x02), + (10, 207, 0x02), + (15, 207, 0x02), + (24, 207, 0x02), + (31, 207, 0x02), + (41, 207, 0x02), + (56, 207, 0x03), + ], + // 189 + [ + (3, 234, 0x02), + (6, 234, 0x02), + (10, 234, 0x02), + (15, 234, 0x02), + (24, 234, 0x02), + (31, 234, 0x02), + (41, 234, 0x02), + (56, 234, 0x03), + (3, 235, 0x02), + (6, 235, 0x02), + (10, 235, 0x02), + (15, 235, 0x02), + (24, 235, 0x02), + (31, 235, 0x02), + (41, 235, 0x02), + (56, 235, 0x03), + ], + // 190 + [ + (194, 0, 0x00), + (195, 0, 0x00), + (197, 0, 0x00), + (198, 0, 0x00), + (201, 0, 0x00), + (202, 0, 0x00), + (204, 0, 0x00), + (205, 0, 0x00), + (210, 0, 0x00), + (213, 0, 0x00), + (217, 0, 0x00), + (220, 0, 0x00), + (225, 0, 0x00), + (231, 0, 0x00), + (239, 0, 0x00), + (246, 0, 0x00), + ], + // 191 + [ + (0, 192, 0x02), + (0, 193, 0x02), + (0, 200, 0x02), + (0, 201, 0x02), + (0, 202, 0x02), + (0, 205, 0x02), + (0, 210, 0x02), + (0, 213, 0x02), + (0, 218, 0x02), + (0, 219, 0x02), + (0, 238, 0x02), + (0, 240, 0x02), + (0, 242, 0x02), + (0, 243, 0x02), + (0, 255, 0x02), + (206, 0, 0x00), + ], + // 192 + [ + (1, 192, 0x02), + (22, 192, 0x03), + (1, 193, 0x02), + (22, 193, 0x03), + (1, 200, 0x02), + (22, 200, 0x03), + (1, 201, 0x02), + (22, 201, 0x03), + (1, 202, 0x02), + (22, 202, 0x03), + (1, 205, 0x02), + (22, 205, 0x03), + (1, 210, 0x02), + (22, 210, 0x03), + (1, 213, 0x02), + (22, 213, 0x03), + ], + // 193 + [ + (2, 192, 0x02), + (9, 192, 0x02), + (23, 192, 0x02), + (40, 192, 0x03), + (2, 193, 0x02), + (9, 193, 0x02), + (23, 193, 0x02), + (40, 193, 0x03), + (2, 200, 0x02), + (9, 200, 0x02), + (23, 200, 0x02), + (40, 200, 0x03), + (2, 201, 0x02), + (9, 201, 0x02), + (23, 201, 0x02), + (40, 201, 0x03), + ], + // 194 + [ + (3, 192, 0x02), + (6, 192, 0x02), + (10, 192, 0x02), + (15, 192, 0x02), + (24, 192, 0x02), + (31, 192, 0x02), + (41, 192, 0x02), + (56, 192, 0x03), + (3, 193, 0x02), + (6, 193, 0x02), + (10, 193, 0x02), + (15, 193, 0x02), + (24, 193, 0x02), + (31, 193, 0x02), + (41, 193, 0x02), + (56, 193, 0x03), + ], + // 195 + [ + (3, 200, 0x02), + (6, 200, 0x02), + (10, 200, 0x02), + (15, 200, 0x02), + (24, 200, 0x02), + (31, 200, 0x02), + (41, 200, 0x02), + (56, 200, 0x03), + (3, 201, 0x02), + (6, 201, 0x02), + (10, 201, 0x02), + (15, 201, 0x02), + (24, 201, 0x02), + (31, 201, 0x02), + (41, 201, 0x02), + (56, 201, 0x03), + ], + // 196 + [ + (2, 202, 0x02), + (9, 202, 0x02), + (23, 202, 0x02), + (40, 202, 0x03), + (2, 205, 0x02), + (9, 205, 0x02), + (23, 205, 0x02), + (40, 205, 0x03), + (2, 210, 0x02), + (9, 210, 0x02), + (23, 210, 0x02), + (40, 210, 0x03), + (2, 213, 0x02), + (9, 213, 0x02), + (23, 213, 0x02), + (40, 213, 0x03), + ], + // 197 + [ + (3, 202, 0x02), + (6, 202, 0x02), + (10, 202, 0x02), + (15, 202, 0x02), + (24, 202, 0x02), + (31, 202, 0x02), + (41, 202, 0x02), + (56, 202, 0x03), + (3, 205, 0x02), + (6, 205, 0x02), + (10, 205, 0x02), + (15, 205, 0x02), + (24, 205, 0x02), + (31, 205, 0x02), + (41, 205, 0x02), + (56, 205, 0x03), + ], + // 198 + [ + (3, 210, 0x02), + (6, 210, 0x02), + (10, 210, 0x02), + (15, 210, 0x02), + (24, 210, 0x02), + (31, 210, 0x02), + (41, 210, 0x02), + (56, 210, 0x03), + (3, 213, 0x02), + (6, 213, 0x02), + (10, 213, 0x02), + (15, 213, 0x02), + (24, 213, 0x02), + (31, 213, 0x02), + (41, 213, 0x02), + (56, 213, 0x03), + ], + // 199 + [ + (1, 218, 0x02), + (22, 218, 0x03), + (1, 219, 0x02), + (22, 219, 0x03), + (1, 238, 0x02), + (22, 238, 0x03), + (1, 240, 0x02), + (22, 240, 0x03), + (1, 242, 0x02), + (22, 242, 0x03), + (1, 243, 0x02), + (22, 243, 0x03), + (1, 255, 0x02), + (22, 255, 0x03), + (0, 203, 0x02), + (0, 204, 0x02), + ], + // 200 + [ + (2, 218, 0x02), + (9, 218, 0x02), + (23, 218, 0x02), + (40, 218, 0x03), + (2, 219, 0x02), + (9, 219, 0x02), + (23, 219, 0x02), + (40, 219, 0x03), + (2, 238, 0x02), + (9, 238, 0x02), + (23, 238, 0x02), + (40, 238, 0x03), + (2, 240, 0x02), + (9, 240, 0x02), + (23, 240, 0x02), + (40, 240, 0x03), + ], + // 201 + [ + (3, 218, 0x02), + (6, 218, 0x02), + (10, 218, 0x02), + (15, 218, 0x02), + (24, 218, 0x02), + (31, 218, 0x02), + (41, 218, 0x02), + (56, 218, 0x03), + (3, 219, 0x02), + (6, 219, 0x02), + (10, 219, 0x02), + (15, 219, 0x02), + (24, 219, 0x02), + (31, 219, 0x02), + (41, 219, 0x02), + (56, 219, 0x03), + ], + // 202 + [ + (3, 238, 0x02), + (6, 238, 0x02), + (10, 238, 0x02), + (15, 238, 0x02), + (24, 238, 0x02), + (31, 238, 0x02), + (41, 238, 0x02), + (56, 238, 0x03), + (3, 240, 0x02), + (6, 240, 0x02), + (10, 240, 0x02), + (15, 240, 0x02), + (24, 240, 0x02), + (31, 240, 0x02), + (41, 240, 0x02), + (56, 240, 0x03), + ], + // 203 + [ + (2, 242, 0x02), + (9, 242, 0x02), + (23, 242, 0x02), + (40, 242, 0x03), + (2, 243, 0x02), + (9, 243, 0x02), + (23, 243, 0x02), + (40, 243, 0x03), + (2, 255, 0x02), + (9, 255, 0x02), + (23, 255, 0x02), + (40, 255, 0x03), + (1, 203, 0x02), + (22, 203, 0x03), + (1, 204, 0x02), + (22, 204, 0x03), + ], + // 204 + [ + (3, 242, 0x02), + (6, 242, 0x02), + (10, 242, 0x02), + (15, 242, 0x02), + (24, 242, 0x02), + (31, 242, 0x02), + (41, 242, 0x02), + (56, 242, 0x03), + (3, 243, 0x02), + (6, 243, 0x02), + (10, 243, 0x02), + (15, 243, 0x02), + (24, 243, 0x02), + (31, 243, 0x02), + (41, 243, 0x02), + (56, 243, 0x03), + ], + // 205 + [ + (3, 255, 0x02), + (6, 255, 0x02), + (10, 255, 0x02), + (15, 255, 0x02), + (24, 255, 0x02), + (31, 255, 0x02), + (41, 255, 0x02), + (56, 255, 0x03), + (2, 203, 0x02), + (9, 203, 0x02), + (23, 203, 0x02), + (40, 203, 0x03), + (2, 204, 0x02), + (9, 204, 0x02), + (23, 204, 0x02), + (40, 204, 0x03), + ], + // 206 + [ + (3, 203, 0x02), + (6, 203, 0x02), + (10, 203, 0x02), + (15, 203, 0x02), + (24, 203, 0x02), + (31, 203, 0x02), + (41, 203, 0x02), + (56, 203, 0x03), + (3, 204, 0x02), + (6, 204, 0x02), + (10, 204, 0x02), + (15, 204, 0x02), + (24, 204, 0x02), + (31, 204, 0x02), + (41, 204, 0x02), + (56, 204, 0x03), + ], + // 207 + [ + (211, 0, 0x00), + (212, 0, 0x00), + (214, 0, 0x00), + (215, 0, 0x00), + (218, 0, 0x00), + (219, 0, 0x00), + (221, 0, 0x00), + (222, 0, 0x00), + (226, 0, 0x00), + (228, 0, 0x00), + (232, 0, 0x00), + (235, 0, 0x00), + (240, 0, 0x00), + (243, 0, 0x00), + (247, 0, 0x00), + (250, 0, 0x00), + ], + // 208 + [ + (0, 211, 0x02), + (0, 212, 0x02), + (0, 214, 0x02), + (0, 221, 0x02), + (0, 222, 0x02), + (0, 223, 0x02), + (0, 241, 0x02), + (0, 244, 0x02), + (0, 245, 0x02), + (0, 246, 0x02), + (0, 247, 0x02), + (0, 248, 0x02), + (0, 250, 0x02), + (0, 251, 0x02), + (0, 252, 0x02), + (0, 253, 0x02), + ], + // 209 + [ + (1, 211, 0x02), + (22, 211, 0x03), + (1, 212, 0x02), + (22, 212, 0x03), + (1, 214, 0x02), + (22, 214, 0x03), + (1, 221, 0x02), + (22, 221, 0x03), + (1, 222, 0x02), + (22, 222, 0x03), + (1, 223, 0x02), + (22, 223, 0x03), + (1, 241, 0x02), + (22, 241, 0x03), + (1, 244, 0x02), + (22, 244, 0x03), + ], + // 210 + [ + (2, 211, 0x02), + (9, 211, 0x02), + (23, 211, 0x02), + (40, 211, 0x03), + (2, 212, 0x02), + (9, 212, 0x02), + (23, 212, 0x02), + (40, 212, 0x03), + (2, 214, 0x02), + (9, 214, 0x02), + (23, 214, 0x02), + (40, 214, 0x03), + (2, 221, 0x02), + (9, 221, 0x02), + (23, 221, 0x02), + (40, 221, 0x03), + ], + // 211 + [ + (3, 211, 0x02), + (6, 211, 0x02), + (10, 211, 0x02), + (15, 211, 0x02), + (24, 211, 0x02), + (31, 211, 0x02), + (41, 211, 0x02), + (56, 211, 0x03), + (3, 212, 0x02), + (6, 212, 0x02), + (10, 212, 0x02), + (15, 212, 0x02), + (24, 212, 0x02), + (31, 212, 0x02), + (41, 212, 0x02), + (56, 212, 0x03), + ], + // 212 + [ + (3, 214, 0x02), + (6, 214, 0x02), + (10, 214, 0x02), + (15, 214, 0x02), + (24, 214, 0x02), + (31, 214, 0x02), + (41, 214, 0x02), + (56, 214, 0x03), + (3, 221, 0x02), + (6, 221, 0x02), + (10, 221, 0x02), + (15, 221, 0x02), + (24, 221, 0x02), + (31, 221, 0x02), + (41, 221, 0x02), + (56, 221, 0x03), + ], + // 213 + [ + (2, 222, 0x02), + (9, 222, 0x02), + (23, 222, 0x02), + (40, 222, 0x03), + (2, 223, 0x02), + (9, 223, 0x02), + (23, 223, 0x02), + (40, 223, 0x03), + (2, 241, 0x02), + (9, 241, 0x02), + (23, 241, 0x02), + (40, 241, 0x03), + (2, 244, 0x02), + (9, 244, 0x02), + (23, 244, 0x02), + (40, 244, 0x03), + ], + // 214 + [ + (3, 222, 0x02), + (6, 222, 0x02), + (10, 222, 0x02), + (15, 222, 0x02), + (24, 222, 0x02), + (31, 222, 0x02), + (41, 222, 0x02), + (56, 222, 0x03), + (3, 223, 0x02), + (6, 223, 0x02), + (10, 223, 0x02), + (15, 223, 0x02), + (24, 223, 0x02), + (31, 223, 0x02), + (41, 223, 0x02), + (56, 223, 0x03), + ], + // 215 + [ + (3, 241, 0x02), + (6, 241, 0x02), + (10, 241, 0x02), + (15, 241, 0x02), + (24, 241, 0x02), + (31, 241, 0x02), + (41, 241, 0x02), + (56, 241, 0x03), + (3, 244, 0x02), + (6, 244, 0x02), + (10, 244, 0x02), + (15, 244, 0x02), + (24, 244, 0x02), + (31, 244, 0x02), + (41, 244, 0x02), + (56, 244, 0x03), + ], + // 216 + [ + (1, 245, 0x02), + (22, 245, 0x03), + (1, 246, 0x02), + (22, 246, 0x03), + (1, 247, 0x02), + (22, 247, 0x03), + (1, 248, 0x02), + (22, 248, 0x03), + (1, 250, 0x02), + (22, 250, 0x03), + (1, 251, 0x02), + (22, 251, 0x03), + (1, 252, 0x02), + (22, 252, 0x03), + (1, 253, 0x02), + (22, 253, 0x03), + ], + // 217 + [ + (2, 245, 0x02), + (9, 245, 0x02), + (23, 245, 0x02), + (40, 245, 0x03), + (2, 246, 0x02), + (9, 246, 0x02), + (23, 246, 0x02), + (40, 246, 0x03), + (2, 247, 0x02), + (9, 247, 0x02), + (23, 247, 0x02), + (40, 247, 0x03), + (2, 248, 0x02), + (9, 248, 0x02), + (23, 248, 0x02), + (40, 248, 0x03), + ], + // 218 + [ + (3, 245, 0x02), + (6, 245, 0x02), + (10, 245, 0x02), + (15, 245, 0x02), + (24, 245, 0x02), + (31, 245, 0x02), + (41, 245, 0x02), + (56, 245, 0x03), + (3, 246, 0x02), + (6, 246, 0x02), + (10, 246, 0x02), + (15, 246, 0x02), + (24, 246, 0x02), + (31, 246, 0x02), + (41, 246, 0x02), + (56, 246, 0x03), + ], + // 219 + [ + (3, 247, 0x02), + (6, 247, 0x02), + (10, 247, 0x02), + (15, 247, 0x02), + (24, 247, 0x02), + (31, 247, 0x02), + (41, 247, 0x02), + (56, 247, 0x03), + (3, 248, 0x02), + (6, 248, 0x02), + (10, 248, 0x02), + (15, 248, 0x02), + (24, 248, 0x02), + (31, 248, 0x02), + (41, 248, 0x02), + (56, 248, 0x03), + ], + // 220 + [ + (2, 250, 0x02), + (9, 250, 0x02), + (23, 250, 0x02), + (40, 250, 0x03), + (2, 251, 0x02), + (9, 251, 0x02), + (23, 251, 0x02), + (40, 251, 0x03), + (2, 252, 0x02), + (9, 252, 0x02), + (23, 252, 0x02), + (40, 252, 0x03), + (2, 253, 0x02), + (9, 253, 0x02), + (23, 253, 0x02), + (40, 253, 0x03), + ], + // 221 + [ + (3, 250, 0x02), + (6, 250, 0x02), + (10, 250, 0x02), + (15, 250, 0x02), + (24, 250, 0x02), + (31, 250, 0x02), + (41, 250, 0x02), + (56, 250, 0x03), + (3, 251, 0x02), + (6, 251, 0x02), + (10, 251, 0x02), + (15, 251, 0x02), + (24, 251, 0x02), + (31, 251, 0x02), + (41, 251, 0x02), + (56, 251, 0x03), + ], + // 222 + [ + (3, 252, 0x02), + (6, 252, 0x02), + (10, 252, 0x02), + (15, 252, 0x02), + (24, 252, 0x02), + (31, 252, 0x02), + (41, 252, 0x02), + (56, 252, 0x03), + (3, 253, 0x02), + (6, 253, 0x02), + (10, 253, 0x02), + (15, 253, 0x02), + (24, 253, 0x02), + (31, 253, 0x02), + (41, 253, 0x02), + (56, 253, 0x03), + ], + // 223 + [ + (0, 254, 0x02), + (227, 0, 0x00), + (229, 0, 0x00), + (230, 0, 0x00), + (233, 0, 0x00), + (234, 0, 0x00), + (236, 0, 0x00), + (237, 0, 0x00), + (241, 0, 0x00), + (242, 0, 0x00), + (244, 0, 0x00), + (245, 0, 0x00), + (248, 0, 0x00), + (249, 0, 0x00), + (251, 0, 0x00), + (252, 0, 0x00), + ], + // 224 + [ + (1, 254, 0x02), + (22, 254, 0x03), + (0, 2, 0x02), + (0, 3, 0x02), + (0, 4, 0x02), + (0, 5, 0x02), + (0, 6, 0x02), + (0, 7, 0x02), + (0, 8, 0x02), + (0, 11, 0x02), + (0, 12, 0x02), + (0, 14, 0x02), + (0, 15, 0x02), + (0, 16, 0x02), + (0, 17, 0x02), + (0, 18, 0x02), + ], + // 225 + [ + (2, 254, 0x02), + (9, 254, 0x02), + (23, 254, 0x02), + (40, 254, 0x03), + (1, 2, 0x02), + (22, 2, 0x03), + (1, 3, 0x02), + (22, 3, 0x03), + (1, 4, 0x02), + (22, 4, 0x03), + (1, 5, 0x02), + (22, 5, 0x03), + (1, 6, 0x02), + (22, 6, 0x03), + (1, 7, 0x02), + (22, 7, 0x03), + ], + // 226 + [ + (3, 254, 0x02), + (6, 254, 0x02), + (10, 254, 0x02), + (15, 254, 0x02), + (24, 254, 0x02), + (31, 254, 0x02), + (41, 254, 0x02), + (56, 254, 0x03), + (2, 2, 0x02), + (9, 2, 0x02), + (23, 2, 0x02), + (40, 2, 0x03), + (2, 3, 0x02), + (9, 3, 0x02), + (23, 3, 0x02), + (40, 3, 0x03), + ], + // 227 + [ + (3, 2, 0x02), + (6, 2, 0x02), + (10, 2, 0x02), + (15, 2, 0x02), + (24, 2, 0x02), + (31, 2, 0x02), + (41, 2, 0x02), + (56, 2, 0x03), + (3, 3, 0x02), + (6, 3, 0x02), + (10, 3, 0x02), + (15, 3, 0x02), + (24, 3, 0x02), + (31, 3, 0x02), + (41, 3, 0x02), + (56, 3, 0x03), + ], + // 228 + [ + (2, 4, 0x02), + (9, 4, 0x02), + (23, 4, 0x02), + (40, 4, 0x03), + (2, 5, 0x02), + (9, 5, 0x02), + (23, 5, 0x02), + (40, 5, 0x03), + (2, 6, 0x02), + (9, 6, 0x02), + (23, 6, 0x02), + (40, 6, 0x03), + (2, 7, 0x02), + (9, 7, 0x02), + (23, 7, 0x02), + (40, 7, 0x03), + ], + // 229 + [ + (3, 4, 0x02), + (6, 4, 0x02), + (10, 4, 0x02), + (15, 4, 0x02), + (24, 4, 0x02), + (31, 4, 0x02), + (41, 4, 0x02), + (56, 4, 0x03), + (3, 5, 0x02), + (6, 5, 0x02), + (10, 5, 0x02), + (15, 5, 0x02), + (24, 5, 0x02), + (31, 5, 0x02), + (41, 5, 0x02), + (56, 5, 0x03), + ], + // 230 + [ + (3, 6, 0x02), + (6, 6, 0x02), + (10, 6, 0x02), + (15, 6, 0x02), + (24, 6, 0x02), + (31, 6, 0x02), + (41, 6, 0x02), + (56, 6, 0x03), + (3, 7, 0x02), + (6, 7, 0x02), + (10, 7, 0x02), + (15, 7, 0x02), + (24, 7, 0x02), + (31, 7, 0x02), + (41, 7, 0x02), + (56, 7, 0x03), + ], + // 231 + [ + (1, 8, 0x02), + (22, 8, 0x03), + (1, 11, 0x02), + (22, 11, 0x03), + (1, 12, 0x02), + (22, 12, 0x03), + (1, 14, 0x02), + (22, 14, 0x03), + (1, 15, 0x02), + (22, 15, 0x03), + (1, 16, 0x02), + (22, 16, 0x03), + (1, 17, 0x02), + (22, 17, 0x03), + (1, 18, 0x02), + (22, 18, 0x03), + ], + // 232 + [ + (2, 8, 0x02), + (9, 8, 0x02), + (23, 8, 0x02), + (40, 8, 0x03), + (2, 11, 0x02), + (9, 11, 0x02), + (23, 11, 0x02), + (40, 11, 0x03), + (2, 12, 0x02), + (9, 12, 0x02), + (23, 12, 0x02), + (40, 12, 0x03), + (2, 14, 0x02), + (9, 14, 0x02), + (23, 14, 0x02), + (40, 14, 0x03), + ], + // 233 + [ + (3, 8, 0x02), + (6, 8, 0x02), + (10, 8, 0x02), + (15, 8, 0x02), + (24, 8, 0x02), + (31, 8, 0x02), + (41, 8, 0x02), + (56, 8, 0x03), + (3, 11, 0x02), + (6, 11, 0x02), + (10, 11, 0x02), + (15, 11, 0x02), + (24, 11, 0x02), + (31, 11, 0x02), + (41, 11, 0x02), + (56, 11, 0x03), + ], + // 234 + [ + (3, 12, 0x02), + (6, 12, 0x02), + (10, 12, 0x02), + (15, 12, 0x02), + (24, 12, 0x02), + (31, 12, 0x02), + (41, 12, 0x02), + (56, 12, 0x03), + (3, 14, 0x02), + (6, 14, 0x02), + (10, 14, 0x02), + (15, 14, 0x02), + (24, 14, 0x02), + (31, 14, 0x02), + (41, 14, 0x02), + (56, 14, 0x03), + ], + // 235 + [ + (2, 15, 0x02), + (9, 15, 0x02), + (23, 15, 0x02), + (40, 15, 0x03), + (2, 16, 0x02), + (9, 16, 0x02), + (23, 16, 0x02), + (40, 16, 0x03), + (2, 17, 0x02), + (9, 17, 0x02), + (23, 17, 0x02), + (40, 17, 0x03), + (2, 18, 0x02), + (9, 18, 0x02), + (23, 18, 0x02), + (40, 18, 0x03), + ], + // 236 + [ + (3, 15, 0x02), + (6, 15, 0x02), + (10, 15, 0x02), + (15, 15, 0x02), + (24, 15, 0x02), + (31, 15, 0x02), + (41, 15, 0x02), + (56, 15, 0x03), + (3, 16, 0x02), + (6, 16, 0x02), + (10, 16, 0x02), + (15, 16, 0x02), + (24, 16, 0x02), + (31, 16, 0x02), + (41, 16, 0x02), + (56, 16, 0x03), + ], + // 237 + [ + (3, 17, 0x02), + (6, 17, 0x02), + (10, 17, 0x02), + (15, 17, 0x02), + (24, 17, 0x02), + (31, 17, 0x02), + (41, 17, 0x02), + (56, 17, 0x03), + (3, 18, 0x02), + (6, 18, 0x02), + (10, 18, 0x02), + (15, 18, 0x02), + (24, 18, 0x02), + (31, 18, 0x02), + (41, 18, 0x02), + (56, 18, 0x03), + ], + // 238 + [ + (0, 19, 0x02), + (0, 20, 0x02), + (0, 21, 0x02), + (0, 23, 0x02), + (0, 24, 0x02), + (0, 25, 0x02), + (0, 26, 0x02), + (0, 27, 0x02), + (0, 28, 0x02), + (0, 29, 0x02), + (0, 30, 0x02), + (0, 31, 0x02), + (0, 127, 0x02), + (0, 220, 0x02), + (0, 249, 0x02), + (253, 0, 0x00), + ], + // 239 + [ + (1, 19, 0x02), + (22, 19, 0x03), + (1, 20, 0x02), + (22, 20, 0x03), + (1, 21, 0x02), + (22, 21, 0x03), + (1, 23, 0x02), + (22, 23, 0x03), + (1, 24, 0x02), + (22, 24, 0x03), + (1, 25, 0x02), + (22, 25, 0x03), + (1, 26, 0x02), + (22, 26, 0x03), + (1, 27, 0x02), + (22, 27, 0x03), + ], + // 240 + [ + (2, 19, 0x02), + (9, 19, 0x02), + (23, 19, 0x02), + (40, 19, 0x03), + (2, 20, 0x02), + (9, 20, 0x02), + (23, 20, 0x02), + (40, 20, 0x03), + (2, 21, 0x02), + (9, 21, 0x02), + (23, 21, 0x02), + (40, 21, 0x03), + (2, 23, 0x02), + (9, 23, 0x02), + (23, 23, 0x02), + (40, 23, 0x03), + ], + // 241 + [ + (3, 19, 0x02), + (6, 19, 0x02), + (10, 19, 0x02), + (15, 19, 0x02), + (24, 19, 0x02), + (31, 19, 0x02), + (41, 19, 0x02), + (56, 19, 0x03), + (3, 20, 0x02), + (6, 20, 0x02), + (10, 20, 0x02), + (15, 20, 0x02), + (24, 20, 0x02), + (31, 20, 0x02), + (41, 20, 0x02), + (56, 20, 0x03), + ], + // 242 + [ + (3, 21, 0x02), + (6, 21, 0x02), + (10, 21, 0x02), + (15, 21, 0x02), + (24, 21, 0x02), + (31, 21, 0x02), + (41, 21, 0x02), + (56, 21, 0x03), + (3, 23, 0x02), + (6, 23, 0x02), + (10, 23, 0x02), + (15, 23, 0x02), + (24, 23, 0x02), + (31, 23, 0x02), + (41, 23, 0x02), + (56, 23, 0x03), + ], + // 243 + [ + (2, 24, 0x02), + (9, 24, 0x02), + (23, 24, 0x02), + (40, 24, 0x03), + (2, 25, 0x02), + (9, 25, 0x02), + (23, 25, 0x02), + (40, 25, 0x03), + (2, 26, 0x02), + (9, 26, 0x02), + (23, 26, 0x02), + (40, 26, 0x03), + (2, 27, 0x02), + (9, 27, 0x02), + (23, 27, 0x02), + (40, 27, 0x03), + ], + // 244 + [ + (3, 24, 0x02), + (6, 24, 0x02), + (10, 24, 0x02), + (15, 24, 0x02), + (24, 24, 0x02), + (31, 24, 0x02), + (41, 24, 0x02), + (56, 24, 0x03), + (3, 25, 0x02), + (6, 25, 0x02), + (10, 25, 0x02), + (15, 25, 0x02), + (24, 25, 0x02), + (31, 25, 0x02), + (41, 25, 0x02), + (56, 25, 0x03), + ], + // 245 + [ + (3, 26, 0x02), + (6, 26, 0x02), + (10, 26, 0x02), + (15, 26, 0x02), + (24, 26, 0x02), + (31, 26, 0x02), + (41, 26, 0x02), + (56, 26, 0x03), + (3, 27, 0x02), + (6, 27, 0x02), + (10, 27, 0x02), + (15, 27, 0x02), + (24, 27, 0x02), + (31, 27, 0x02), + (41, 27, 0x02), + (56, 27, 0x03), + ], + // 246 + [ + (1, 28, 0x02), + (22, 28, 0x03), + (1, 29, 0x02), + (22, 29, 0x03), + (1, 30, 0x02), + (22, 30, 0x03), + (1, 31, 0x02), + (22, 31, 0x03), + (1, 127, 0x02), + (22, 127, 0x03), + (1, 220, 0x02), + (22, 220, 0x03), + (1, 249, 0x02), + (22, 249, 0x03), + (254, 0, 0x00), + (255, 0, 0x00), + ], + // 247 + [ + (2, 28, 0x02), + (9, 28, 0x02), + (23, 28, 0x02), + (40, 28, 0x03), + (2, 29, 0x02), + (9, 29, 0x02), + (23, 29, 0x02), + (40, 29, 0x03), + (2, 30, 0x02), + (9, 30, 0x02), + (23, 30, 0x02), + (40, 30, 0x03), + (2, 31, 0x02), + (9, 31, 0x02), + (23, 31, 0x02), + (40, 31, 0x03), + ], + // 248 + [ + (3, 28, 0x02), + (6, 28, 0x02), + (10, 28, 0x02), + (15, 28, 0x02), + (24, 28, 0x02), + (31, 28, 0x02), + (41, 28, 0x02), + (56, 28, 0x03), + (3, 29, 0x02), + (6, 29, 0x02), + (10, 29, 0x02), + (15, 29, 0x02), + (24, 29, 0x02), + (31, 29, 0x02), + (41, 29, 0x02), + (56, 29, 0x03), + ], + // 249 + [ + (3, 30, 0x02), + (6, 30, 0x02), + (10, 30, 0x02), + (15, 30, 0x02), + (24, 30, 0x02), + (31, 30, 0x02), + (41, 30, 0x02), + (56, 30, 0x03), + (3, 31, 0x02), + (6, 31, 0x02), + (10, 31, 0x02), + (15, 31, 0x02), + (24, 31, 0x02), + (31, 31, 0x02), + (41, 31, 0x02), + (56, 31, 0x03), + ], + // 250 + [ + (2, 127, 0x02), + (9, 127, 0x02), + (23, 127, 0x02), + (40, 127, 0x03), + (2, 220, 0x02), + (9, 220, 0x02), + (23, 220, 0x02), + (40, 220, 0x03), + (2, 249, 0x02), + (9, 249, 0x02), + (23, 249, 0x02), + (40, 249, 0x03), + (0, 10, 0x02), + (0, 13, 0x02), + (0, 22, 0x02), + (0, 0, 0x04), + ], + // 251 + [ + (3, 127, 0x02), + (6, 127, 0x02), + (10, 127, 0x02), + (15, 127, 0x02), + (24, 127, 0x02), + (31, 127, 0x02), + (41, 127, 0x02), + (56, 127, 0x03), + (3, 220, 0x02), + (6, 220, 0x02), + (10, 220, 0x02), + (15, 220, 0x02), + (24, 220, 0x02), + (31, 220, 0x02), + (41, 220, 0x02), + (56, 220, 0x03), + ], + // 252 + [ + (3, 249, 0x02), + (6, 249, 0x02), + (10, 249, 0x02), + (15, 249, 0x02), + (24, 249, 0x02), + (31, 249, 0x02), + (41, 249, 0x02), + (56, 249, 0x03), + (1, 10, 0x02), + (22, 10, 0x03), + (1, 13, 0x02), + (22, 13, 0x03), + (1, 22, 0x02), + (22, 22, 0x03), + (0, 0, 0x04), + (0, 0, 0x05), + ], + // 253 + [ + (2, 10, 0x02), + (9, 10, 0x02), + (23, 10, 0x02), + (40, 10, 0x03), + (2, 13, 0x02), + (9, 13, 0x02), + (23, 13, 0x02), + (40, 13, 0x03), + (2, 22, 0x02), + (9, 22, 0x02), + (23, 22, 0x02), + (40, 22, 0x03), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x05), + ], + // 254 + [ + (3, 10, 0x02), + (6, 10, 0x02), + (10, 10, 0x02), + (15, 10, 0x02), + (24, 10, 0x02), + (31, 10, 0x02), + (41, 10, 0x02), + (56, 10, 0x03), + (3, 13, 0x02), + (6, 13, 0x02), + (10, 13, 0x02), + (15, 13, 0x02), + (24, 13, 0x02), + (31, 13, 0x02), + (41, 13, 0x02), + (56, 13, 0x03), + ], + // 255 + [ + (3, 22, 0x02), + (6, 22, 0x02), + (10, 22, 0x02), + (15, 22, 0x02), + (24, 22, 0x02), + (31, 22, 0x02), + (41, 22, 0x02), + (56, 22, 0x03), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x04), + (0, 0, 0x05), + ], +]; diff --git a/third_party/rust/h2/src/hpack/mod.rs b/third_party/rust/h2/src/hpack/mod.rs new file mode 100644 index 000000000000..956de887937f --- /dev/null +++ b/third_party/rust/h2/src/hpack/mod.rs @@ -0,0 +1,12 @@ +mod encoder; +mod decoder; +mod header; +mod huffman; +mod table; + +#[cfg(test)] +mod test; + +pub use self::decoder::{Decoder, DecoderError, NeedMore}; +pub use self::encoder::{Encode, EncodeState, Encoder, EncoderError}; +pub use self::header::Header; diff --git a/third_party/rust/h2/src/hpack/table.rs b/third_party/rust/h2/src/hpack/table.rs new file mode 100644 index 000000000000..9fe446befd06 --- /dev/null +++ b/third_party/rust/h2/src/hpack/table.rs @@ -0,0 +1,749 @@ +use super::Header; + +use fnv::FnvHasher; +use http::header; +use http::method::Method; + +use std::{cmp, mem, usize}; +use std::collections::VecDeque; +use std::hash::{Hash, Hasher}; + +/// HPACK encoder table +#[derive(Debug)] +pub struct Table { + mask: usize, + indices: Vec>, + slots: VecDeque, + inserted: usize, + // Size is in bytes + size: usize, + max_size: usize, +} + +#[derive(Debug)] +pub enum Index { + // The header is already fully indexed + Indexed(usize, Header), + + // The name is indexed, but not the value + Name(usize, Header), + + // The full header has been inserted into the table. + Inserted(usize), + + // Only the value has been inserted (hpack table idx, slots idx) + InsertedValue(usize, usize), + + // The header is not indexed by this table + NotIndexed(Header), +} + +#[derive(Debug)] +struct Slot { + hash: HashValue, + header: Header, + next: Option, +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +struct Pos { + index: usize, + hash: HashValue, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +struct HashValue(usize); + +const MAX_SIZE: usize = (1 << 16); +const DYN_OFFSET: usize = 62; + +macro_rules! probe_loop { + ($probe_var: ident < $len: expr, $body: expr) => { + debug_assert!($len > 0); + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + }; +} + +impl Table { + pub fn new(max_size: usize, capacity: usize) -> Table { + if capacity == 0 { + Table { + mask: 0, + indices: vec![], + slots: VecDeque::new(), + inserted: 0, + size: 0, + max_size: max_size, + } + } else { + let capacity = cmp::max(to_raw_capacity(capacity).next_power_of_two(), 8); + + Table { + mask: capacity.wrapping_sub(1), + indices: vec![None; capacity], + slots: VecDeque::with_capacity(usable_capacity(capacity)), + inserted: 0, + size: 0, + max_size: max_size, + } + } + } + + #[inline] + pub fn capacity(&self) -> usize { + usable_capacity(self.indices.len()) + } + + pub fn max_size(&self) -> usize { + self.max_size + } + + /// Gets the header stored in the table + pub fn resolve<'a>(&'a self, index: &'a Index) -> &'a Header { + use self::Index::*; + + match *index { + Indexed(_, ref h) => h, + Name(_, ref h) => h, + Inserted(idx) => &self.slots[idx].header, + InsertedValue(_, idx) => &self.slots[idx].header, + NotIndexed(ref h) => h, + } + } + + pub fn resolve_idx(&self, index: &Index) -> usize { + use self::Index::*; + + match *index { + Indexed(idx, ..) => idx, + Name(idx, ..) => idx, + Inserted(idx) => idx + DYN_OFFSET, + InsertedValue(idx, _) => idx, + NotIndexed(_) => panic!("cannot resolve index"), + } + } + + /// Index the header in the HPACK table. + pub fn index(&mut self, header: Header) -> Index { + // Check the static table + let statik = index_static(&header); + + // Don't index certain headers. This logic is borrowed from nghttp2. + if header.skip_value_index() { + // Right now, if this is true, the header name is always in the + // static table. At some point in the future, this might not be true + // and this logic will need to be updated. + return Index::new(statik, header); + } + + // If the header is already indexed by the static table, return that + if let Some((n, true)) = statik { + return Index::Indexed(n, header); + } + + // Don't index large headers + if header.len() * 4 > self.max_size * 3 { + return Index::new(statik, header); + } + + self.index_dynamic(header, statik) + } + + fn index_dynamic(&mut self, header: Header, statik: Option<(usize, bool)>) -> Index { + debug_assert!(self.assert_valid_state("one")); + + if header.len() + self.size < self.max_size || !header.is_sensitive() { + // Only grow internal storage if needed + self.reserve_one(); + } + + if self.indices.is_empty() { + // If `indices` is not empty, then it is impossible for all + // `indices` entries to be `Some`. So, we only need to check for the + // empty case. + return Index::new(statik, header); + } + + let hash = hash_header(&header); + + let desired_pos = desired_pos(self.mask, hash); + let mut probe = desired_pos; + let mut dist = 0; + + // Start at the ideal position, checking all slots + probe_loop!(probe < self.indices.len(), { + if let Some(pos) = self.indices[probe] { + // The slot is already occupied, but check if it has a lower + // displacement. + let their_dist = probe_distance(self.mask, pos.hash, probe); + + let slot_idx = pos.index.wrapping_add(self.inserted); + + if their_dist < dist { + // Index robinhood + return self.index_vacant(header, hash, dist, probe, statik); + } else if pos.hash == hash && self.slots[slot_idx].header.name() == header.name() { + // Matching name, check values + return self.index_occupied(header, hash, pos.index); + } + } else { + return self.index_vacant(header, hash, dist, probe, statik); + } + + dist += 1; + }); + } + + fn index_occupied(&mut self, header: Header, hash: HashValue, mut index: usize) -> Index { + debug_assert!(self.assert_valid_state("top")); + + // There already is a match for the given header name. Check if a value + // matches. The header will also only be inserted if the table is not at + // capacity. + loop { + // Compute the real index into the VecDeque + let real_idx = index.wrapping_add(self.inserted); + + if self.slots[real_idx].header.value_eq(&header) { + // We have a full match! + return Index::Indexed(real_idx + DYN_OFFSET, header); + } + + if let Some(next) = self.slots[real_idx].next { + index = next; + continue; + } + + if header.is_sensitive() { + return Index::Name(real_idx + DYN_OFFSET, header); + } + + self.update_size(header.len(), Some(index)); + + // Insert the new header + self.insert(header, hash); + + // Recompute real_idx as it just changed. + let new_real_idx = index.wrapping_add(self.inserted); + + // The previous node in the linked list may have gotten evicted + // while making room for this header. + if new_real_idx < self.slots.len() { + let idx = 0usize.wrapping_sub(self.inserted); + + self.slots[new_real_idx].next = Some(idx); + } + + debug_assert!(self.assert_valid_state("bottom")); + + // Even if the previous header was evicted, we can still reference + // it when inserting the new one... + return Index::InsertedValue(real_idx + DYN_OFFSET, 0); + } + } + + fn index_vacant( + &mut self, + header: Header, + hash: HashValue, + mut dist: usize, + mut probe: usize, + statik: Option<(usize, bool)>, + ) -> Index { + if header.is_sensitive() { + return Index::new(statik, header); + } + + debug_assert!(self.assert_valid_state("top")); + debug_assert!(dist == 0 || self.indices[probe.wrapping_sub(1) & self.mask].is_some()); + + // Passing in `usize::MAX` for prev_idx since there is no previous + // header in this case. + if self.update_size(header.len(), None) { + while dist != 0 { + let back = probe.wrapping_sub(1) & self.mask; + + if let Some(pos) = self.indices[back] { + let their_dist = probe_distance(self.mask, pos.hash, back); + + if their_dist < (dist - 1) { + probe = back; + dist -= 1; + } else { + break; + } + } else { + probe = back; + dist -= 1; + } + } + } + + debug_assert!(self.assert_valid_state("after update")); + + self.insert(header, hash); + + let pos_idx = 0usize.wrapping_sub(self.inserted); + + let prev = mem::replace( + &mut self.indices[probe], + Some(Pos { + index: pos_idx, + hash: hash, + }), + ); + + if let Some(mut prev) = prev { + // Shift forward + let mut probe = probe + 1; + + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe as usize]; + + prev = match mem::replace(pos, Some(prev)) { + Some(p) => p, + None => break, + }; + }); + } + + debug_assert!(self.assert_valid_state("bottom")); + + if let Some((n, _)) = statik { + Index::InsertedValue(n, 0) + } else { + Index::Inserted(0) + } + } + + fn insert(&mut self, header: Header, hash: HashValue) { + self.inserted = self.inserted.wrapping_add(1); + + self.slots.push_front(Slot { + hash: hash, + header: header, + next: None, + }); + } + + pub fn resize(&mut self, size: usize) { + self.max_size = size; + + if size == 0 { + self.size = 0; + + for i in &mut self.indices { + *i = None; + } + + self.slots.clear(); + self.inserted = 0; + } else { + self.converge(None); + } + } + + fn update_size(&mut self, len: usize, prev_idx: Option) -> bool { + self.size += len; + self.converge(prev_idx) + } + + fn converge(&mut self, prev_idx: Option) -> bool { + let mut ret = false; + + while self.size > self.max_size { + ret = true; + self.evict(prev_idx); + } + + ret + } + + fn evict(&mut self, prev_idx: Option) { + let pos_idx = (self.slots.len() - 1).wrapping_sub(self.inserted); + + debug_assert!(!self.slots.is_empty()); + debug_assert!(self.assert_valid_state("one")); + + // Remove the header + let slot = self.slots.pop_back().unwrap(); + let mut probe = desired_pos(self.mask, slot.hash); + + // Update the size + self.size -= slot.header.len(); + + debug_assert_eq!( + self.indices + .iter() + .filter_map(|p| *p) + .filter(|p| p.index == pos_idx) + .count(), + 1 + ); + + // Find the associated position + probe_loop!(probe < self.indices.len(), { + debug_assert!(!self.indices[probe].is_none()); + + let mut pos = self.indices[probe].unwrap(); + + if pos.index == pos_idx { + if let Some(idx) = slot.next { + pos.index = idx; + self.indices[probe] = Some(pos); + } else if Some(pos.index) == prev_idx { + pos.index = 0usize.wrapping_sub(self.inserted + 1); + self.indices[probe] = Some(pos); + } else { + self.indices[probe] = None; + self.remove_phase_two(probe); + } + + break; + } + }); + + debug_assert!(self.assert_valid_state("two")); + } + + // Shifts all indices that were displaced by the header that has just been + // removed. + fn remove_phase_two(&mut self, probe: usize) { + let mut last_probe = probe; + let mut probe = probe + 1; + + probe_loop!(probe < self.indices.len(), { + if let Some(pos) = self.indices[probe] { + if probe_distance(self.mask, pos.hash, probe) > 0 { + self.indices[last_probe] = self.indices[probe].take(); + } else { + break; + } + } else { + break; + } + + last_probe = probe; + }); + + debug_assert!(self.assert_valid_state("two")); + } + + fn reserve_one(&mut self) { + let len = self.slots.len(); + + if len == self.capacity() { + if len == 0 { + let new_raw_cap = 8; + self.mask = 8 - 1; + self.indices = vec![None; new_raw_cap]; + } else { + let raw_cap = self.indices.len(); + self.grow(raw_cap << 1); + } + } + } + + #[inline] + fn grow(&mut self, new_raw_cap: usize) { + // This path can never be reached when handling the first allocation in + // the map. + + debug_assert!(self.assert_valid_state("top")); + + // find first ideally placed element -- start of cluster + let mut first_ideal = 0; + + for (i, pos) in self.indices.iter().enumerate() { + if let Some(pos) = *pos { + if 0 == probe_distance(self.mask, pos.hash, i) { + first_ideal = i; + break; + } + } + } + + // visit the entries in an order where we can simply reinsert them + // into self.indices without any bucket stealing. + let old_indices = mem::replace(&mut self.indices, vec![None; new_raw_cap]); + self.mask = new_raw_cap.wrapping_sub(1); + + for &pos in &old_indices[first_ideal..] { + self.reinsert_entry_in_order(pos); + } + + for &pos in &old_indices[..first_ideal] { + self.reinsert_entry_in_order(pos); + } + + debug_assert!(self.assert_valid_state("bottom")); + } + + fn reinsert_entry_in_order(&mut self, pos: Option) { + if let Some(pos) = pos { + // Find first empty bucket and insert there + let mut probe = desired_pos(self.mask, pos.hash); + + probe_loop!(probe < self.indices.len(), { + if self.indices[probe].is_none() { + // empty bucket, insert here + self.indices[probe] = Some(pos); + return; + } + + debug_assert!({ + let them = self.indices[probe].unwrap(); + let their_distance = probe_distance(self.mask, them.hash, probe); + let our_distance = probe_distance(self.mask, pos.hash, probe); + + their_distance >= our_distance + }); + }); + } + } + + #[cfg(not(test))] + fn assert_valid_state(&self, _: &'static str) -> bool { + true + } + + #[cfg(test)] + fn assert_valid_state(&self, _msg: &'static str) -> bool { + /* + // Checks that the internal map structure is valid + // + // Ensure all hash codes in indices match the associated slot + for pos in &self.indices { + if let Some(pos) = *pos { + let real_idx = pos.index.wrapping_add(self.inserted); + + if real_idx.wrapping_add(1) != 0 { + assert!(real_idx < self.slots.len(), + "out of index; real={}; len={}, msg={}", + real_idx, self.slots.len(), msg); + + assert_eq!(pos.hash, self.slots[real_idx].hash, + "index hash does not match slot; msg={}", msg); + } + } + } + + // Every index is only available once + for i in 0..self.indices.len() { + if self.indices[i].is_none() { + continue; + } + + for j in i+1..self.indices.len() { + assert_ne!(self.indices[i], self.indices[j], + "duplicate indices; msg={}", msg); + } + } + + for (index, slot) in self.slots.iter().enumerate() { + let mut indexed = None; + + // First, see if the slot is indexed + for (i, pos) in self.indices.iter().enumerate() { + if let Some(pos) = *pos { + let real_idx = pos.index.wrapping_add(self.inserted); + if real_idx == index { + indexed = Some(i); + // Already know that there is no dup, so break + break; + } + } + } + + if let Some(actual) = indexed { + // Ensure that it is accessible.. + let desired = desired_pos(self.mask, slot.hash); + let mut probe = desired; + let mut dist = 0; + + probe_loop!(probe < self.indices.len(), { + assert!(self.indices[probe].is_some(), + "unexpected empty slot; probe={}; hash={:?}; msg={}", + probe, slot.hash, msg); + + let pos = self.indices[probe].unwrap(); + + let their_dist = probe_distance(self.mask, pos.hash, probe); + let real_idx = pos.index.wrapping_add(self.inserted); + + if real_idx == index { + break; + } + + assert!(dist <= their_dist, + "could not find entry; actual={}; desired={};" + + "probe={}, dist={}; their_dist={}; index={}; msg={}", + actual, desired, probe, dist, their_dist, + index.wrapping_sub(self.inserted), msg); + + dist += 1; + }); + } else { + // There is exactly one next link + let cnt = self.slots.iter().map(|s| s.next) + .filter(|n| *n == Some(index.wrapping_sub(self.inserted))) + .count(); + + assert_eq!(1, cnt, "more than one node pointing here; msg={}", msg); + } + } + */ + + // TODO: Ensure linked lists are correct: no cycles, etc... + + true + } +} + +#[cfg(test)] +impl Table { + /// Returns the number of headers in the table + pub fn len(&self) -> usize { + self.slots.len() + } + + /// Returns the table size + pub fn size(&self) -> usize { + self.size + } +} + +impl Index { + fn new(v: Option<(usize, bool)>, e: Header) -> Index { + match v { + None => Index::NotIndexed(e), + Some((n, true)) => Index::Indexed(n, e), + Some((n, false)) => Index::Name(n, e), + } + } +} + +#[inline] +fn usable_capacity(cap: usize) -> usize { + cap - cap / 4 +} + +#[inline] +fn to_raw_capacity(n: usize) -> usize { + n + n / 3 +} + +#[inline] +fn desired_pos(mask: usize, hash: HashValue) -> usize { + (hash.0 & mask) as usize +} + +#[inline] +fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { + current.wrapping_sub(desired_pos(mask, hash)) & mask as usize +} + +fn hash_header(header: &Header) -> HashValue { + const MASK: u64 = (MAX_SIZE as u64) - 1; + + let mut h = FnvHasher::default(); + header.name().hash(&mut h); + HashValue((h.finish() & MASK) as usize) +} + +/// Checks the static table for the header. If found, returns the index and a +/// boolean representing if the value matched as well. +fn index_static(header: &Header) -> Option<(usize, bool)> { + match *header { + Header::Field { + ref name, + ref value, + } => match *name { + header::ACCEPT_CHARSET => Some((15, false)), + header::ACCEPT_ENCODING => if value == "gzip, deflate" { + Some((16, true)) + } else { + Some((16, false)) + }, + header::ACCEPT_LANGUAGE => Some((17, false)), + header::ACCEPT_RANGES => Some((18, false)), + header::ACCEPT => Some((19, false)), + header::ACCESS_CONTROL_ALLOW_ORIGIN => Some((20, false)), + header::AGE => Some((21, false)), + header::ALLOW => Some((22, false)), + header::AUTHORIZATION => Some((23, false)), + header::CACHE_CONTROL => Some((24, false)), + header::CONTENT_DISPOSITION => Some((25, false)), + header::CONTENT_ENCODING => Some((26, false)), + header::CONTENT_LANGUAGE => Some((27, false)), + header::CONTENT_LENGTH => Some((28, false)), + header::CONTENT_LOCATION => Some((29, false)), + header::CONTENT_RANGE => Some((30, false)), + header::CONTENT_TYPE => Some((31, false)), + header::COOKIE => Some((32, false)), + header::DATE => Some((33, false)), + header::ETAG => Some((34, false)), + header::EXPECT => Some((35, false)), + header::EXPIRES => Some((36, false)), + header::FROM => Some((37, false)), + header::HOST => Some((38, false)), + header::IF_MATCH => Some((39, false)), + header::IF_MODIFIED_SINCE => Some((40, false)), + header::IF_NONE_MATCH => Some((41, false)), + header::IF_RANGE => Some((42, false)), + header::IF_UNMODIFIED_SINCE => Some((43, false)), + header::LAST_MODIFIED => Some((44, false)), + header::LINK => Some((45, false)), + header::LOCATION => Some((46, false)), + header::MAX_FORWARDS => Some((47, false)), + header::PROXY_AUTHENTICATE => Some((48, false)), + header::PROXY_AUTHORIZATION => Some((49, false)), + header::RANGE => Some((50, false)), + header::REFERER => Some((51, false)), + header::REFRESH => Some((52, false)), + header::RETRY_AFTER => Some((53, false)), + header::SERVER => Some((54, false)), + header::SET_COOKIE => Some((55, false)), + header::STRICT_TRANSPORT_SECURITY => Some((56, false)), + header::TRANSFER_ENCODING => Some((57, false)), + header::USER_AGENT => Some((58, false)), + header::VARY => Some((59, false)), + header::VIA => Some((60, false)), + header::WWW_AUTHENTICATE => Some((61, false)), + _ => None, + }, + Header::Authority(_) => Some((1, false)), + Header::Method(ref v) => match *v { + Method::GET => Some((2, true)), + Method::POST => Some((3, true)), + _ => Some((2, false)), + }, + Header::Scheme(ref v) => match &**v { + "http" => Some((6, true)), + "https" => Some((7, true)), + _ => Some((6, false)), + }, + Header::Path(ref v) => match &**v { + "/" => Some((4, true)), + "/index.html" => Some((5, true)), + _ => Some((4, false)), + }, + Header::Status(ref v) => match u16::from(*v) { + 200 => Some((8, true)), + 204 => Some((9, true)), + 206 => Some((10, true)), + 304 => Some((11, true)), + 400 => Some((12, true)), + 404 => Some((13, true)), + 500 => Some((14, true)), + _ => Some((8, false)), + }, + } +} diff --git a/third_party/rust/h2/src/hpack/test/fixture.rs b/third_party/rust/h2/src/hpack/test/fixture.rs new file mode 100644 index 000000000000..d7a4883fc764 --- /dev/null +++ b/third_party/rust/h2/src/hpack/test/fixture.rs @@ -0,0 +1,615 @@ +extern crate bytes; +extern crate hex; +extern crate serde_json; + +use hpack::{Decoder, Encoder, Header}; + +use self::bytes::BytesMut; +use self::hex::FromHex; +use self::serde_json::Value; + +use std::fs::File; +use std::io::Cursor; +use std::io::prelude::*; +use std::path::Path; +use std::str; + +fn test_fixture(path: &Path) { + let mut file = File::open(path).unwrap(); + let mut data = String::new(); + file.read_to_string(&mut data).unwrap(); + + let story: Value = serde_json::from_str(&data).unwrap(); + test_story(story); +} + +fn test_story(story: Value) { + let story = story.as_object().unwrap(); + + if let Some(cases) = story.get("cases") { + let mut cases: Vec<_> = cases + .as_array() + .unwrap() + .iter() + .map(|case| { + let case = case.as_object().unwrap(); + + let size = case.get("header_table_size") + .map(|v| v.as_u64().unwrap() as usize); + + let wire = case.get("wire").unwrap().as_str().unwrap(); + let wire: Vec = FromHex::from_hex(wire.as_bytes()).unwrap(); + + let expect: Vec<_> = case.get("headers") + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|h| { + let h = h.as_object().unwrap(); + let (name, val) = h.iter().next().unwrap(); + (name.clone(), val.as_str().unwrap().to_string()) + }) + .collect(); + + Case { + seqno: case.get("seqno").unwrap().as_u64().unwrap(), + wire: wire, + expect: expect, + header_table_size: size, + } + }) + .collect(); + + cases.sort_by_key(|c| c.seqno); + + let mut decoder = Decoder::default(); + + // First, check decoding against the fixtures + for case in &cases { + let mut expect = case.expect.clone(); + + if let Some(size) = case.header_table_size { + decoder.queue_size_update(size); + } + + decoder + .decode(&mut Cursor::new(&mut case.wire.clone().into()), |e| { + let (name, value) = expect.remove(0); + assert_eq!(name, key_str(&e)); + assert_eq!(value, value_str(&e)); + }) + .unwrap(); + + assert_eq!(0, expect.len()); + } + + let mut encoder = Encoder::default(); + let mut decoder = Decoder::default(); + + // Now, encode the headers + for case in &cases { + let mut buf = BytesMut::with_capacity(64 * 1024); + + if let Some(size) = case.header_table_size { + encoder.update_max_size(size); + decoder.queue_size_update(size); + } + + let mut input: Vec<_> = case.expect + .iter() + .map(|&(ref name, ref value)| { + Header::new(name.clone().into(), value.clone().into()) + .unwrap() + .into() + }) + .collect(); + + encoder.encode(None, &mut input.clone().into_iter(), &mut buf); + + decoder + .decode(&mut Cursor::new(&mut buf), |e| { + assert_eq!(e, input.remove(0).reify().unwrap()); + }) + .unwrap(); + + assert_eq!(0, input.len()); + } + } +} + +struct Case { + seqno: u64, + wire: Vec, + expect: Vec<(String, String)>, + header_table_size: Option, +} + +fn key_str(e: &Header) -> &str { + match *e { + Header::Field { + ref name, .. + } => name.as_str(), + Header::Authority(..) => ":authority", + Header::Method(..) => ":method", + Header::Scheme(..) => ":scheme", + Header::Path(..) => ":path", + Header::Status(..) => ":status", + } +} + +fn value_str(e: &Header) -> &str { + match *e { + Header::Field { + ref value, .. + } => value.to_str().unwrap(), + Header::Authority(ref v) => &**v, + Header::Method(ref m) => m.as_str(), + Header::Scheme(ref v) => &**v, + Header::Path(ref v) => &**v, + Header::Status(ref v) => v.as_str(), + } +} + +macro_rules! fixture_mod { + ($module:ident => { + $( + ($fn:ident, $path:expr); + )+ + }) => { + mod $module { + $( + #[test] + fn $fn() { + let path = ::std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .join("fixtures/hpack") + .join($path); + + super::test_fixture(path.as_ref()); + } + )+ + } + } +} + +fixture_mod!( + haskell_http2_linear_huffman => { + (story_00, "haskell-http2-linear-huffman/story_00.json"); + (story_01, "haskell-http2-linear-huffman/story_01.json"); + (story_02, "haskell-http2-linear-huffman/story_02.json"); + (story_03, "haskell-http2-linear-huffman/story_03.json"); + (story_04, "haskell-http2-linear-huffman/story_04.json"); + (story_05, "haskell-http2-linear-huffman/story_05.json"); + (story_06, "haskell-http2-linear-huffman/story_06.json"); + (story_07, "haskell-http2-linear-huffman/story_07.json"); + (story_08, "haskell-http2-linear-huffman/story_08.json"); + (story_09, "haskell-http2-linear-huffman/story_09.json"); + (story_10, "haskell-http2-linear-huffman/story_10.json"); + (story_11, "haskell-http2-linear-huffman/story_11.json"); + (story_12, "haskell-http2-linear-huffman/story_12.json"); + (story_13, "haskell-http2-linear-huffman/story_13.json"); + (story_14, "haskell-http2-linear-huffman/story_14.json"); + (story_15, "haskell-http2-linear-huffman/story_15.json"); + (story_16, "haskell-http2-linear-huffman/story_16.json"); + (story_17, "haskell-http2-linear-huffman/story_17.json"); + (story_18, "haskell-http2-linear-huffman/story_18.json"); + (story_19, "haskell-http2-linear-huffman/story_19.json"); + (story_20, "haskell-http2-linear-huffman/story_20.json"); + (story_21, "haskell-http2-linear-huffman/story_21.json"); + (story_22, "haskell-http2-linear-huffman/story_22.json"); + (story_23, "haskell-http2-linear-huffman/story_23.json"); + (story_24, "haskell-http2-linear-huffman/story_24.json"); + (story_25, "haskell-http2-linear-huffman/story_25.json"); + (story_26, "haskell-http2-linear-huffman/story_26.json"); + (story_27, "haskell-http2-linear-huffman/story_27.json"); + (story_28, "haskell-http2-linear-huffman/story_28.json"); + (story_29, "haskell-http2-linear-huffman/story_29.json"); + (story_30, "haskell-http2-linear-huffman/story_30.json"); + (story_31, "haskell-http2-linear-huffman/story_31.json"); + } +); + +fixture_mod!( + python_hpack => { + (story_00, "python-hpack/story_00.json"); + (story_01, "python-hpack/story_01.json"); + (story_02, "python-hpack/story_02.json"); + (story_03, "python-hpack/story_03.json"); + (story_04, "python-hpack/story_04.json"); + (story_05, "python-hpack/story_05.json"); + (story_06, "python-hpack/story_06.json"); + (story_07, "python-hpack/story_07.json"); + (story_08, "python-hpack/story_08.json"); + (story_09, "python-hpack/story_09.json"); + (story_10, "python-hpack/story_10.json"); + (story_11, "python-hpack/story_11.json"); + (story_12, "python-hpack/story_12.json"); + (story_13, "python-hpack/story_13.json"); + (story_14, "python-hpack/story_14.json"); + (story_15, "python-hpack/story_15.json"); + (story_16, "python-hpack/story_16.json"); + (story_17, "python-hpack/story_17.json"); + (story_18, "python-hpack/story_18.json"); + (story_19, "python-hpack/story_19.json"); + (story_20, "python-hpack/story_20.json"); + (story_21, "python-hpack/story_21.json"); + (story_22, "python-hpack/story_22.json"); + (story_23, "python-hpack/story_23.json"); + (story_24, "python-hpack/story_24.json"); + (story_25, "python-hpack/story_25.json"); + (story_26, "python-hpack/story_26.json"); + (story_27, "python-hpack/story_27.json"); + (story_28, "python-hpack/story_28.json"); + (story_29, "python-hpack/story_29.json"); + (story_30, "python-hpack/story_30.json"); + (story_31, "python-hpack/story_31.json"); + } +); + +fixture_mod!( + nghttp2_16384_4096 => { + (story_00, "nghttp2-16384-4096/story_00.json"); + (story_01, "nghttp2-16384-4096/story_01.json"); + (story_02, "nghttp2-16384-4096/story_02.json"); + (story_03, "nghttp2-16384-4096/story_03.json"); + (story_04, "nghttp2-16384-4096/story_04.json"); + (story_05, "nghttp2-16384-4096/story_05.json"); + (story_06, "nghttp2-16384-4096/story_06.json"); + (story_07, "nghttp2-16384-4096/story_07.json"); + (story_08, "nghttp2-16384-4096/story_08.json"); + (story_09, "nghttp2-16384-4096/story_09.json"); + (story_10, "nghttp2-16384-4096/story_10.json"); + (story_11, "nghttp2-16384-4096/story_11.json"); + (story_12, "nghttp2-16384-4096/story_12.json"); + (story_13, "nghttp2-16384-4096/story_13.json"); + (story_14, "nghttp2-16384-4096/story_14.json"); + (story_15, "nghttp2-16384-4096/story_15.json"); + (story_16, "nghttp2-16384-4096/story_16.json"); + (story_17, "nghttp2-16384-4096/story_17.json"); + (story_18, "nghttp2-16384-4096/story_18.json"); + (story_19, "nghttp2-16384-4096/story_19.json"); + (story_20, "nghttp2-16384-4096/story_20.json"); + (story_21, "nghttp2-16384-4096/story_21.json"); + (story_22, "nghttp2-16384-4096/story_22.json"); + (story_23, "nghttp2-16384-4096/story_23.json"); + (story_24, "nghttp2-16384-4096/story_24.json"); + (story_25, "nghttp2-16384-4096/story_25.json"); + (story_26, "nghttp2-16384-4096/story_26.json"); + (story_27, "nghttp2-16384-4096/story_27.json"); + (story_28, "nghttp2-16384-4096/story_28.json"); + (story_29, "nghttp2-16384-4096/story_29.json"); + (story_30, "nghttp2-16384-4096/story_30.json"); + } +); + +fixture_mod!( + node_http2_hpack => { + (story_00, "node-http2-hpack/story_00.json"); + (story_01, "node-http2-hpack/story_01.json"); + (story_02, "node-http2-hpack/story_02.json"); + (story_03, "node-http2-hpack/story_03.json"); + (story_04, "node-http2-hpack/story_04.json"); + (story_05, "node-http2-hpack/story_05.json"); + (story_06, "node-http2-hpack/story_06.json"); + (story_07, "node-http2-hpack/story_07.json"); + (story_08, "node-http2-hpack/story_08.json"); + (story_09, "node-http2-hpack/story_09.json"); + (story_10, "node-http2-hpack/story_10.json"); + (story_11, "node-http2-hpack/story_11.json"); + (story_12, "node-http2-hpack/story_12.json"); + (story_13, "node-http2-hpack/story_13.json"); + (story_14, "node-http2-hpack/story_14.json"); + (story_15, "node-http2-hpack/story_15.json"); + (story_16, "node-http2-hpack/story_16.json"); + (story_17, "node-http2-hpack/story_17.json"); + (story_18, "node-http2-hpack/story_18.json"); + (story_19, "node-http2-hpack/story_19.json"); + (story_20, "node-http2-hpack/story_20.json"); + (story_21, "node-http2-hpack/story_21.json"); + (story_22, "node-http2-hpack/story_22.json"); + (story_23, "node-http2-hpack/story_23.json"); + (story_24, "node-http2-hpack/story_24.json"); + (story_25, "node-http2-hpack/story_25.json"); + (story_26, "node-http2-hpack/story_26.json"); + (story_27, "node-http2-hpack/story_27.json"); + (story_28, "node-http2-hpack/story_28.json"); + (story_29, "node-http2-hpack/story_29.json"); + (story_30, "node-http2-hpack/story_30.json"); + (story_31, "node-http2-hpack/story_31.json"); + } +); + +fixture_mod!( + nghttp2_change_table_size => { + (story_00, "nghttp2-change-table-size/story_00.json"); + (story_01, "nghttp2-change-table-size/story_01.json"); + (story_02, "nghttp2-change-table-size/story_02.json"); + (story_03, "nghttp2-change-table-size/story_03.json"); + (story_04, "nghttp2-change-table-size/story_04.json"); + (story_05, "nghttp2-change-table-size/story_05.json"); + (story_06, "nghttp2-change-table-size/story_06.json"); + (story_07, "nghttp2-change-table-size/story_07.json"); + (story_08, "nghttp2-change-table-size/story_08.json"); + (story_09, "nghttp2-change-table-size/story_09.json"); + (story_10, "nghttp2-change-table-size/story_10.json"); + (story_11, "nghttp2-change-table-size/story_11.json"); + (story_12, "nghttp2-change-table-size/story_12.json"); + (story_13, "nghttp2-change-table-size/story_13.json"); + (story_14, "nghttp2-change-table-size/story_14.json"); + (story_15, "nghttp2-change-table-size/story_15.json"); + (story_16, "nghttp2-change-table-size/story_16.json"); + (story_17, "nghttp2-change-table-size/story_17.json"); + (story_18, "nghttp2-change-table-size/story_18.json"); + (story_19, "nghttp2-change-table-size/story_19.json"); + (story_20, "nghttp2-change-table-size/story_20.json"); + (story_21, "nghttp2-change-table-size/story_21.json"); + (story_22, "nghttp2-change-table-size/story_22.json"); + (story_23, "nghttp2-change-table-size/story_23.json"); + (story_24, "nghttp2-change-table-size/story_24.json"); + (story_25, "nghttp2-change-table-size/story_25.json"); + (story_26, "nghttp2-change-table-size/story_26.json"); + (story_27, "nghttp2-change-table-size/story_27.json"); + (story_28, "nghttp2-change-table-size/story_28.json"); + (story_29, "nghttp2-change-table-size/story_29.json"); + (story_30, "nghttp2-change-table-size/story_30.json"); + } +); + +fixture_mod!( + haskell_http2_static_huffman => { + (story_00, "haskell-http2-static-huffman/story_00.json"); + (story_01, "haskell-http2-static-huffman/story_01.json"); + (story_02, "haskell-http2-static-huffman/story_02.json"); + (story_03, "haskell-http2-static-huffman/story_03.json"); + (story_04, "haskell-http2-static-huffman/story_04.json"); + (story_05, "haskell-http2-static-huffman/story_05.json"); + (story_06, "haskell-http2-static-huffman/story_06.json"); + (story_07, "haskell-http2-static-huffman/story_07.json"); + (story_08, "haskell-http2-static-huffman/story_08.json"); + (story_09, "haskell-http2-static-huffman/story_09.json"); + (story_10, "haskell-http2-static-huffman/story_10.json"); + (story_11, "haskell-http2-static-huffman/story_11.json"); + (story_12, "haskell-http2-static-huffman/story_12.json"); + (story_13, "haskell-http2-static-huffman/story_13.json"); + (story_14, "haskell-http2-static-huffman/story_14.json"); + (story_15, "haskell-http2-static-huffman/story_15.json"); + (story_16, "haskell-http2-static-huffman/story_16.json"); + (story_17, "haskell-http2-static-huffman/story_17.json"); + (story_18, "haskell-http2-static-huffman/story_18.json"); + (story_19, "haskell-http2-static-huffman/story_19.json"); + (story_20, "haskell-http2-static-huffman/story_20.json"); + (story_21, "haskell-http2-static-huffman/story_21.json"); + (story_22, "haskell-http2-static-huffman/story_22.json"); + (story_23, "haskell-http2-static-huffman/story_23.json"); + (story_24, "haskell-http2-static-huffman/story_24.json"); + (story_25, "haskell-http2-static-huffman/story_25.json"); + (story_26, "haskell-http2-static-huffman/story_26.json"); + (story_27, "haskell-http2-static-huffman/story_27.json"); + (story_28, "haskell-http2-static-huffman/story_28.json"); + (story_29, "haskell-http2-static-huffman/story_29.json"); + (story_30, "haskell-http2-static-huffman/story_30.json"); + (story_31, "haskell-http2-static-huffman/story_31.json"); + } +); + +fixture_mod!( + haskell_http2_naive_huffman => { + (story_00, "haskell-http2-naive-huffman/story_00.json"); + (story_01, "haskell-http2-naive-huffman/story_01.json"); + (story_02, "haskell-http2-naive-huffman/story_02.json"); + (story_03, "haskell-http2-naive-huffman/story_03.json"); + (story_04, "haskell-http2-naive-huffman/story_04.json"); + (story_05, "haskell-http2-naive-huffman/story_05.json"); + (story_06, "haskell-http2-naive-huffman/story_06.json"); + (story_07, "haskell-http2-naive-huffman/story_07.json"); + (story_08, "haskell-http2-naive-huffman/story_08.json"); + (story_09, "haskell-http2-naive-huffman/story_09.json"); + (story_10, "haskell-http2-naive-huffman/story_10.json"); + (story_11, "haskell-http2-naive-huffman/story_11.json"); + (story_12, "haskell-http2-naive-huffman/story_12.json"); + (story_13, "haskell-http2-naive-huffman/story_13.json"); + (story_14, "haskell-http2-naive-huffman/story_14.json"); + (story_15, "haskell-http2-naive-huffman/story_15.json"); + (story_16, "haskell-http2-naive-huffman/story_16.json"); + (story_17, "haskell-http2-naive-huffman/story_17.json"); + (story_18, "haskell-http2-naive-huffman/story_18.json"); + (story_19, "haskell-http2-naive-huffman/story_19.json"); + (story_20, "haskell-http2-naive-huffman/story_20.json"); + (story_21, "haskell-http2-naive-huffman/story_21.json"); + (story_22, "haskell-http2-naive-huffman/story_22.json"); + (story_23, "haskell-http2-naive-huffman/story_23.json"); + (story_24, "haskell-http2-naive-huffman/story_24.json"); + (story_25, "haskell-http2-naive-huffman/story_25.json"); + (story_26, "haskell-http2-naive-huffman/story_26.json"); + (story_27, "haskell-http2-naive-huffman/story_27.json"); + (story_28, "haskell-http2-naive-huffman/story_28.json"); + (story_29, "haskell-http2-naive-huffman/story_29.json"); + (story_30, "haskell-http2-naive-huffman/story_30.json"); + (story_31, "haskell-http2-naive-huffman/story_31.json"); + } +); + +fixture_mod!( + haskell_http2_naive => { + (story_00, "haskell-http2-naive/story_00.json"); + (story_01, "haskell-http2-naive/story_01.json"); + (story_02, "haskell-http2-naive/story_02.json"); + (story_03, "haskell-http2-naive/story_03.json"); + (story_04, "haskell-http2-naive/story_04.json"); + (story_05, "haskell-http2-naive/story_05.json"); + (story_06, "haskell-http2-naive/story_06.json"); + (story_07, "haskell-http2-naive/story_07.json"); + (story_08, "haskell-http2-naive/story_08.json"); + (story_09, "haskell-http2-naive/story_09.json"); + (story_10, "haskell-http2-naive/story_10.json"); + (story_11, "haskell-http2-naive/story_11.json"); + (story_12, "haskell-http2-naive/story_12.json"); + (story_13, "haskell-http2-naive/story_13.json"); + (story_14, "haskell-http2-naive/story_14.json"); + (story_15, "haskell-http2-naive/story_15.json"); + (story_16, "haskell-http2-naive/story_16.json"); + (story_17, "haskell-http2-naive/story_17.json"); + (story_18, "haskell-http2-naive/story_18.json"); + (story_19, "haskell-http2-naive/story_19.json"); + (story_20, "haskell-http2-naive/story_20.json"); + (story_21, "haskell-http2-naive/story_21.json"); + (story_22, "haskell-http2-naive/story_22.json"); + (story_23, "haskell-http2-naive/story_23.json"); + (story_24, "haskell-http2-naive/story_24.json"); + (story_25, "haskell-http2-naive/story_25.json"); + (story_26, "haskell-http2-naive/story_26.json"); + (story_27, "haskell-http2-naive/story_27.json"); + (story_28, "haskell-http2-naive/story_28.json"); + (story_29, "haskell-http2-naive/story_29.json"); + (story_30, "haskell-http2-naive/story_30.json"); + (story_31, "haskell-http2-naive/story_31.json"); + } +); + +fixture_mod!( + haskell_http2_static => { + (story_00, "haskell-http2-static/story_00.json"); + (story_01, "haskell-http2-static/story_01.json"); + (story_02, "haskell-http2-static/story_02.json"); + (story_03, "haskell-http2-static/story_03.json"); + (story_04, "haskell-http2-static/story_04.json"); + (story_05, "haskell-http2-static/story_05.json"); + (story_06, "haskell-http2-static/story_06.json"); + (story_07, "haskell-http2-static/story_07.json"); + (story_08, "haskell-http2-static/story_08.json"); + (story_09, "haskell-http2-static/story_09.json"); + (story_10, "haskell-http2-static/story_10.json"); + (story_11, "haskell-http2-static/story_11.json"); + (story_12, "haskell-http2-static/story_12.json"); + (story_13, "haskell-http2-static/story_13.json"); + (story_14, "haskell-http2-static/story_14.json"); + (story_15, "haskell-http2-static/story_15.json"); + (story_16, "haskell-http2-static/story_16.json"); + (story_17, "haskell-http2-static/story_17.json"); + (story_18, "haskell-http2-static/story_18.json"); + (story_19, "haskell-http2-static/story_19.json"); + (story_20, "haskell-http2-static/story_20.json"); + (story_21, "haskell-http2-static/story_21.json"); + (story_22, "haskell-http2-static/story_22.json"); + (story_23, "haskell-http2-static/story_23.json"); + (story_24, "haskell-http2-static/story_24.json"); + (story_25, "haskell-http2-static/story_25.json"); + (story_26, "haskell-http2-static/story_26.json"); + (story_27, "haskell-http2-static/story_27.json"); + (story_28, "haskell-http2-static/story_28.json"); + (story_29, "haskell-http2-static/story_29.json"); + (story_30, "haskell-http2-static/story_30.json"); + (story_31, "haskell-http2-static/story_31.json"); + } +); + +fixture_mod!( + nghttp2 => { + (story_00, "nghttp2/story_00.json"); + (story_01, "nghttp2/story_01.json"); + (story_02, "nghttp2/story_02.json"); + (story_03, "nghttp2/story_03.json"); + (story_04, "nghttp2/story_04.json"); + (story_05, "nghttp2/story_05.json"); + (story_06, "nghttp2/story_06.json"); + (story_07, "nghttp2/story_07.json"); + (story_08, "nghttp2/story_08.json"); + (story_09, "nghttp2/story_09.json"); + (story_10, "nghttp2/story_10.json"); + (story_11, "nghttp2/story_11.json"); + (story_12, "nghttp2/story_12.json"); + (story_13, "nghttp2/story_13.json"); + (story_14, "nghttp2/story_14.json"); + (story_15, "nghttp2/story_15.json"); + (story_16, "nghttp2/story_16.json"); + (story_17, "nghttp2/story_17.json"); + (story_18, "nghttp2/story_18.json"); + (story_19, "nghttp2/story_19.json"); + (story_20, "nghttp2/story_20.json"); + (story_21, "nghttp2/story_21.json"); + (story_22, "nghttp2/story_22.json"); + (story_23, "nghttp2/story_23.json"); + (story_24, "nghttp2/story_24.json"); + (story_25, "nghttp2/story_25.json"); + (story_26, "nghttp2/story_26.json"); + (story_27, "nghttp2/story_27.json"); + (story_28, "nghttp2/story_28.json"); + (story_29, "nghttp2/story_29.json"); + (story_30, "nghttp2/story_30.json"); + (story_31, "nghttp2/story_31.json"); + } +); + +fixture_mod!( + haskell_http2_linear => { + (story_00, "haskell-http2-linear/story_00.json"); + (story_01, "haskell-http2-linear/story_01.json"); + (story_02, "haskell-http2-linear/story_02.json"); + (story_03, "haskell-http2-linear/story_03.json"); + (story_04, "haskell-http2-linear/story_04.json"); + (story_05, "haskell-http2-linear/story_05.json"); + (story_06, "haskell-http2-linear/story_06.json"); + (story_07, "haskell-http2-linear/story_07.json"); + (story_08, "haskell-http2-linear/story_08.json"); + (story_09, "haskell-http2-linear/story_09.json"); + (story_10, "haskell-http2-linear/story_10.json"); + (story_11, "haskell-http2-linear/story_11.json"); + (story_12, "haskell-http2-linear/story_12.json"); + (story_13, "haskell-http2-linear/story_13.json"); + (story_14, "haskell-http2-linear/story_14.json"); + (story_15, "haskell-http2-linear/story_15.json"); + (story_16, "haskell-http2-linear/story_16.json"); + (story_17, "haskell-http2-linear/story_17.json"); + (story_18, "haskell-http2-linear/story_18.json"); + (story_19, "haskell-http2-linear/story_19.json"); + (story_20, "haskell-http2-linear/story_20.json"); + (story_21, "haskell-http2-linear/story_21.json"); + (story_22, "haskell-http2-linear/story_22.json"); + (story_23, "haskell-http2-linear/story_23.json"); + (story_24, "haskell-http2-linear/story_24.json"); + (story_25, "haskell-http2-linear/story_25.json"); + (story_26, "haskell-http2-linear/story_26.json"); + (story_27, "haskell-http2-linear/story_27.json"); + (story_28, "haskell-http2-linear/story_28.json"); + (story_29, "haskell-http2-linear/story_29.json"); + (story_30, "haskell-http2-linear/story_30.json"); + (story_31, "haskell-http2-linear/story_31.json"); + } +); + +fixture_mod!( + go_hpack => { + (story_00, "go-hpack/story_00.json"); + (story_01, "go-hpack/story_01.json"); + (story_02, "go-hpack/story_02.json"); + (story_03, "go-hpack/story_03.json"); + (story_04, "go-hpack/story_04.json"); + (story_05, "go-hpack/story_05.json"); + (story_06, "go-hpack/story_06.json"); + (story_07, "go-hpack/story_07.json"); + (story_08, "go-hpack/story_08.json"); + (story_09, "go-hpack/story_09.json"); + (story_10, "go-hpack/story_10.json"); + (story_11, "go-hpack/story_11.json"); + (story_12, "go-hpack/story_12.json"); + (story_13, "go-hpack/story_13.json"); + (story_14, "go-hpack/story_14.json"); + (story_15, "go-hpack/story_15.json"); + (story_16, "go-hpack/story_16.json"); + (story_17, "go-hpack/story_17.json"); + (story_18, "go-hpack/story_18.json"); + (story_19, "go-hpack/story_19.json"); + (story_20, "go-hpack/story_20.json"); + (story_21, "go-hpack/story_21.json"); + (story_22, "go-hpack/story_22.json"); + (story_23, "go-hpack/story_23.json"); + (story_24, "go-hpack/story_24.json"); + (story_25, "go-hpack/story_25.json"); + (story_26, "go-hpack/story_26.json"); + (story_27, "go-hpack/story_27.json"); + (story_28, "go-hpack/story_28.json"); + (story_29, "go-hpack/story_29.json"); + (story_30, "go-hpack/story_30.json"); + (story_31, "go-hpack/story_31.json"); + } +); diff --git a/third_party/rust/h2/src/hpack/test/fuzz.rs b/third_party/rust/h2/src/hpack/test/fuzz.rs new file mode 100644 index 000000000000..b5be1a89e43f --- /dev/null +++ b/third_party/rust/h2/src/hpack/test/fuzz.rs @@ -0,0 +1,357 @@ +extern crate bytes; +extern crate quickcheck; +extern crate rand; + +use hpack::{Decoder, Encode, Encoder, Header}; + +use http::header::{HeaderName, HeaderValue}; + +use self::bytes::{Bytes, BytesMut}; +use self::quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; +use self::rand::{Rng, SeedableRng, StdRng}; + +use std::io::Cursor; + +const MAX_CHUNK: usize = 2 * 1024; + +#[test] +fn hpack_fuzz() { + fn prop(fuzz: FuzzHpack) -> TestResult { + fuzz.run(); + TestResult::from_bool(true) + } + + QuickCheck::new() + .tests(100) + .quickcheck(prop as fn(FuzzHpack) -> TestResult) +} + +#[derive(Debug, Clone)] +struct FuzzHpack { + // The magic seed that makes the test case reproducible + seed: [usize; 4], + + // The set of headers to encode / decode + frames: Vec, + + // The list of chunk sizes to do it in + chunks: Vec, + + // Number of times reduced + reduced: usize, +} + +#[derive(Debug, Clone)] +struct HeaderFrame { + resizes: Vec, + headers: Vec>>, +} + +impl FuzzHpack { + fn new(seed: [usize; 4]) -> FuzzHpack { + // Seed the RNG + let mut rng = StdRng::from_seed(&seed); + + // Generates a bunch of source headers + let mut source: Vec>> = vec![]; + + for _ in 0..2000 { + source.push(gen_header(&mut rng)); + } + + // Actual test run headers + let num: usize = rng.gen_range(40, 500); + + let mut frames: Vec = vec![]; + let mut added = 0; + + let skew: i32 = rng.gen_range(1, 5); + + // Rough number of headers to add + while added < num { + let mut frame = HeaderFrame { + resizes: vec![], + headers: vec![], + }; + + match rng.gen_range(0, 20) { + 0 => { + // Two resizes + let high = rng.gen_range(128, MAX_CHUNK * 2); + let low = rng.gen_range(0, high); + + frame.resizes.extend(&[low, high]); + }, + 1...3 => { + frame.resizes.push(rng.gen_range(128, MAX_CHUNK * 2)); + }, + _ => {}, + } + + for _ in 0..rng.gen_range(1, (num - added) + 1) { + added += 1; + + let x: f64 = rng.gen_range(0.0, 1.0); + let x = x.powi(skew); + + let i = (x * source.len() as f64) as usize; + frame.headers.push(source[i].clone()); + } + + frames.push(frame); + } + + // Now, generate the buffer sizes used to encode + let mut chunks = vec![]; + + for _ in 0..rng.gen_range(0, 100) { + chunks.push(rng.gen_range(0, MAX_CHUNK)); + } + + FuzzHpack { + seed: seed, + frames: frames, + chunks: chunks, + reduced: 0, + } + } + + fn run(self) { + let mut chunks = self.chunks; + let frames = self.frames; + let mut expect = vec![]; + + let mut encoder = Encoder::default(); + let mut decoder = Decoder::default(); + + for frame in frames { + expect.extend(frame.headers.clone()); + + let mut index = None; + let mut input = frame.headers.into_iter(); + + let mut buf = BytesMut::with_capacity(chunks.pop().unwrap_or(MAX_CHUNK)); + + if let Some(max) = frame.resizes.iter().max() { + decoder.queue_size_update(*max); + } + + // Apply resizes + for resize in &frame.resizes { + encoder.update_max_size(*resize); + } + + loop { + match encoder.encode(index.take(), &mut input, &mut buf) { + Encode::Full => break, + Encode::Partial(i) => { + index = Some(i); + + // Decode the chunk! + decoder + .decode(&mut Cursor::new(&mut buf), |e| { + assert_eq!(e, expect.remove(0).reify().unwrap()); + }) + .unwrap(); + + buf = BytesMut::with_capacity(chunks.pop().unwrap_or(MAX_CHUNK)); + }, + } + } + + // Decode the chunk! + decoder + .decode(&mut Cursor::new(&mut buf), |e| { + assert_eq!(e, expect.remove(0).reify().unwrap()); + }) + .unwrap(); + } + + assert_eq!(0, expect.len()); + } +} + +impl Arbitrary for FuzzHpack { + fn arbitrary(g: &mut G) -> Self { + FuzzHpack::new(quickcheck::Rng::gen(g)) + } +} + +fn gen_header(g: &mut StdRng) -> Header> { + use http::{Method, StatusCode}; + + if g.gen_weighted_bool(10) { + match g.next_u32() % 5 { + 0 => { + let value = gen_string(g, 4, 20); + Header::Authority(to_shared(value)) + }, + 1 => { + let method = match g.next_u32() % 6 { + 0 => Method::GET, + 1 => Method::POST, + 2 => Method::PUT, + 3 => Method::PATCH, + 4 => Method::DELETE, + 5 => { + let n: usize = g.gen_range(3, 7); + let bytes: Vec = (0..n) + .map(|_| g.choose(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap().clone()) + .collect(); + + Method::from_bytes(&bytes).unwrap() + }, + _ => unreachable!(), + }; + + Header::Method(method) + }, + 2 => { + let value = match g.next_u32() % 2 { + 0 => "http", + 1 => "https", + _ => unreachable!(), + }; + + Header::Scheme(to_shared(value.to_string())) + }, + 3 => { + let value = match g.next_u32() % 100 { + 0 => "/".to_string(), + 1 => "/index.html".to_string(), + _ => gen_string(g, 2, 20), + }; + + Header::Path(to_shared(value)) + }, + 4 => { + let status = (g.gen::() % 500) + 100; + + Header::Status(StatusCode::from_u16(status).unwrap()) + }, + _ => unreachable!(), + } + } else { + let name = gen_header_name(g); + let mut value = gen_header_value(g); + + if g.gen_weighted_bool(30) { + value.set_sensitive(true); + } + + Header::Field { + name: Some(name), + value: value, + } + } +} + +fn gen_header_name(g: &mut StdRng) -> HeaderName { + use http::header; + + if g.gen_weighted_bool(2) { + g.choose(&[ + header::ACCEPT, + header::ACCEPT_CHARSET, + header::ACCEPT_ENCODING, + header::ACCEPT_LANGUAGE, + header::ACCEPT_RANGES, + header::ACCESS_CONTROL_ALLOW_CREDENTIALS, + header::ACCESS_CONTROL_ALLOW_HEADERS, + header::ACCESS_CONTROL_ALLOW_METHODS, + header::ACCESS_CONTROL_ALLOW_ORIGIN, + header::ACCESS_CONTROL_EXPOSE_HEADERS, + header::ACCESS_CONTROL_MAX_AGE, + header::ACCESS_CONTROL_REQUEST_HEADERS, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::AGE, + header::ALLOW, + header::ALT_SVC, + header::AUTHORIZATION, + header::CACHE_CONTROL, + header::CONNECTION, + header::CONTENT_DISPOSITION, + header::CONTENT_ENCODING, + header::CONTENT_LANGUAGE, + header::CONTENT_LENGTH, + header::CONTENT_LOCATION, + header::CONTENT_RANGE, + header::CONTENT_SECURITY_POLICY, + header::CONTENT_SECURITY_POLICY_REPORT_ONLY, + header::CONTENT_TYPE, + header::COOKIE, + header::DNT, + header::DATE, + header::ETAG, + header::EXPECT, + header::EXPIRES, + header::FORWARDED, + header::FROM, + header::HOST, + header::IF_MATCH, + header::IF_MODIFIED_SINCE, + header::IF_NONE_MATCH, + header::IF_RANGE, + header::IF_UNMODIFIED_SINCE, + header::LAST_MODIFIED, + header::LINK, + header::LOCATION, + header::MAX_FORWARDS, + header::ORIGIN, + header::PRAGMA, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::PUBLIC_KEY_PINS, + header::PUBLIC_KEY_PINS_REPORT_ONLY, + header::RANGE, + header::REFERER, + header::REFERRER_POLICY, + header::REFRESH, + header::RETRY_AFTER, + header::SERVER, + header::SET_COOKIE, + header::STRICT_TRANSPORT_SECURITY, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::USER_AGENT, + header::UPGRADE, + header::UPGRADE_INSECURE_REQUESTS, + header::VARY, + header::VIA, + header::WARNING, + header::WWW_AUTHENTICATE, + header::X_CONTENT_TYPE_OPTIONS, + header::X_DNS_PREFETCH_CONTROL, + header::X_FRAME_OPTIONS, + header::X_XSS_PROTECTION, + ]).unwrap() + .clone() + } else { + let value = gen_string(g, 1, 25); + HeaderName::from_bytes(value.as_bytes()).unwrap() + } +} + +fn gen_header_value(g: &mut StdRng) -> HeaderValue { + let value = gen_string(g, 0, 70); + HeaderValue::from_bytes(value.as_bytes()).unwrap() +} + +fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { + let bytes: Vec<_> = (min..max) + .map(|_| { + // Chars to pick from + g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----") + .unwrap() + .clone() + }) + .collect(); + + String::from_utf8(bytes).unwrap() +} + +fn to_shared(src: String) -> ::string::String { + let b: Bytes = src.into(); + unsafe { ::string::String::from_utf8_unchecked(b) } +} diff --git a/third_party/rust/h2/src/hpack/test/mod.rs b/third_party/rust/h2/src/hpack/test/mod.rs new file mode 100644 index 000000000000..9b1f27169d11 --- /dev/null +++ b/third_party/rust/h2/src/hpack/test/mod.rs @@ -0,0 +1,2 @@ +mod fixture; +mod fuzz; diff --git a/third_party/rust/h2/src/lib.rs b/third_party/rust/h2/src/lib.rs new file mode 100644 index 000000000000..8af549d7ba0a --- /dev/null +++ b/third_party/rust/h2/src/lib.rs @@ -0,0 +1,134 @@ +//! An asynchronous, HTTP/2.0 server and client implementation. +//! +//! This library implements the [HTTP/2.0] specification. The implementation is +//! asynchronous, using [futures] as the basis for the API. The implementation +//! is also decoupled from TCP or TLS details. The user must handle ALPN and +//! HTTP/1.1 upgrades themselves. +//! +//! # Getting started +//! +//! Add the following to your `Cargo.toml` file: +//! +//! ```toml +//! [dependencies] +//! h2 = "0.1" +//! ``` +//! +//! Next, add this to your crate: +//! +//! ```no_run +//! extern crate h2; +//! ``` +//! +//! # Layout +//! +//! The crate is split into [`client`] and [`server`] modules. Types that are +//! common to both clients and servers are located at the root of the crate. +//! +//! See module level documentation for more details on how to use `h2`. +//! +//! # Handshake +//! +//! Both the client and the server require a connection to already be in a state +//! ready to start the HTTP/2.0 handshake. This library does not provide +//! facilities to do this. +//! +//! There are three ways to reach an appropriate state to start the HTTP/2.0 +//! handshake. +//! +//! * Opening an HTTP/1.1 connection and performing an [upgrade]. +//! * Opening a connection with TLS and use ALPN to negotiate the protocol. +//! * Open a connection with prior knowledge, i.e. both the client and the +//! server assume that the connection is immediately ready to start the +//! HTTP/2.0 handshake once opened. +//! +//! Once the connection is ready to start the HTTP/2.0 handshake, it can be +//! passed to [`server::handshake`] or [`client::handshake`]. At this point, the +//! library will start the handshake process, which consists of: +//! +//! * The client sends the connection preface (a predefined sequence of 24 +//! octets). +//! * Both the client and the server sending a SETTINGS frame. +//! +//! See the [Starting HTTP/2] in the specification for more details. +//! +//! # Flow control +//! +//! [Flow control] is a fundamental feature of HTTP/2.0. The `h2` library +//! exposes flow control to the user. +//! +//! An HTTP/2.0 client or server may not send unlimited data to the peer. When a +//! stream is initiated, both the client and the server are provided with an +//! initial window size for that stream. A window size is the number of bytes +//! the endpoint can send to the peer. At any point in time, the peer may +//! increase this window size by sending a `WINDOW_UPDATE` frame. Once a client +//! or server has sent data filling the window for a stream, no further data may +//! be sent on that stream until the peer increases the window. +//! +//! There is also a **connection level** window governing data sent across all +//! streams. +//! +//! Managing flow control for inbound data is done through [`ReleaseCapacity`]. +//! Managing flow control for outbound data is done through [`SendStream`]. See +//! the struct level documentation for those two types for more details. +//! +//! [HTTP/2.0]: https://http2.github.io/ +//! [futures]: https://docs.rs/futures/ +//! [`client`]: client/index.html +//! [`server`]: server/index.html +//! [Flow control]: http://httpwg.org/specs/rfc7540.html#FlowControl +//! [`ReleaseCapacity`]: struct.ReleaseCapacity.html +//! [`SendStream`]: struct.SendStream.html +//! [Starting HTTP/2]: http://httpwg.org/specs/rfc7540.html#starting +//! [upgrade]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism +//! [`server::handshake`]: server/fn.handshake.html +//! [`client::handshake`]: client/fn.handshake.html + +#![doc(html_root_url = "https://docs.rs/h2/0.1.12")] +#![deny(warnings, missing_debug_implementations, missing_docs)] + +#[macro_use] +extern crate futures; + +#[macro_use] +extern crate tokio_io; + +// HTTP types +extern crate http; + +// Buffer utilities +extern crate bytes; + +// Hash function used for HPACK encoding and tracking stream states. +extern crate fnv; + +extern crate byteorder; +extern crate slab; + +#[macro_use] +extern crate log; +extern crate string; +extern crate indexmap; + +mod error; +#[cfg_attr(feature = "unstable", allow(missing_docs))] +mod codec; +mod hpack; +mod proto; + +#[cfg(not(feature = "unstable"))] +mod frame; + +#[cfg(feature = "unstable")] +#[allow(missing_docs)] +pub mod frame; + +pub mod client; +pub mod server; +mod share; + +pub use error::{Error, Reason}; +pub use share::{SendStream, StreamId, RecvStream, ReleaseCapacity}; + +#[cfg(feature = "unstable")] +pub use codec::{Codec, RecvError, SendError, UserError}; diff --git a/third_party/rust/h2/src/proto/connection.rs b/third_party/rust/h2/src/proto/connection.rs new file mode 100644 index 000000000000..e2e9f4b3909c --- /dev/null +++ b/third_party/rust/h2/src/proto/connection.rs @@ -0,0 +1,410 @@ +use {client, frame, proto, server}; +use codec::RecvError; +use frame::{Reason, StreamId}; + +use frame::DEFAULT_INITIAL_WINDOW_SIZE; +use proto::*; + +use bytes::{Bytes, IntoBuf}; +use futures::Stream; +use tokio_io::{AsyncRead, AsyncWrite}; + +use std::marker::PhantomData; +use std::io; +use std::time::Duration; + +/// An H2 connection +#[derive(Debug)] +pub(crate) struct Connection +where + P: Peer, +{ + /// Tracks the connection level state transitions. + state: State, + + /// An error to report back once complete. + /// + /// This exists separately from State in order to support + /// graceful shutdown. + error: Option, + + /// Read / write frame values + codec: Codec>, + + /// Pending GOAWAY frames to write. + go_away: GoAway, + + /// Ping/pong handler + ping_pong: PingPong, + + /// Connection settings + settings: Settings, + + /// Stream state handler + streams: Streams, + + /// Client or server + _phantom: PhantomData

, +} + +#[derive(Debug, Clone)] +pub(crate) struct Config { + pub next_stream_id: StreamId, + pub initial_max_send_streams: usize, + pub reset_stream_duration: Duration, + pub reset_stream_max: usize, + pub settings: frame::Settings, +} + +#[derive(Debug)] +enum State { + /// Currently open in a sane state + Open, + + /// The codec must be flushed + Closing(Reason), + + /// In a closed state + Closed(Reason), +} + +impl Connection +where + T: AsyncRead + AsyncWrite, + P: Peer, + B: IntoBuf, +{ + pub fn new( + codec: Codec>, + config: Config, + ) -> Connection { + let streams = Streams::new(streams::Config { + local_init_window_sz: config.settings + .initial_window_size() + .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), + initial_max_send_streams: config.initial_max_send_streams, + local_next_stream_id: config.next_stream_id, + local_push_enabled: config.settings.is_push_enabled(), + local_reset_duration: config.reset_stream_duration, + local_reset_max: config.reset_stream_max, + remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, + remote_max_initiated: config.settings + .max_concurrent_streams() + .map(|max| max as usize), + }); + Connection { + state: State::Open, + error: None, + codec: codec, + go_away: GoAway::new(), + ping_pong: PingPong::new(), + settings: Settings::new(), + streams: streams, + _phantom: PhantomData, + } + } + + pub fn set_target_window_size(&mut self, size: WindowSize) { + self.streams.set_target_connection_window_size(size); + } + + /// Returns `Ready` when the connection is ready to receive a frame. + /// + /// Returns `RecvError` as this may raise errors that are caused by delayed + /// processing of received frames. + fn poll_ready(&mut self) -> Poll<(), RecvError> { + // The order of these calls don't really matter too much + try_ready!(self.ping_pong.send_pending_pong(&mut self.codec)); + try_ready!(self.ping_pong.send_pending_ping(&mut self.codec)); + try_ready!( + self.settings + .send_pending_ack(&mut self.codec, &mut self.streams) + ); + try_ready!(self.streams.send_pending_refusal(&mut self.codec)); + + Ok(().into()) + } + + /// Send any pending GOAWAY frames. + /// + /// This will return `Some(reason)` if the connection should be closed + /// afterwards. If this is a graceful shutdown, this returns `None`. + fn poll_go_away(&mut self) -> Poll, io::Error> { + self.go_away.send_pending_go_away(&mut self.codec) + } + + fn go_away(&mut self, id: StreamId, e: Reason) { + let frame = frame::GoAway::new(id, e); + self.streams.send_go_away(id); + self.go_away.go_away(frame); + } + + pub fn go_away_now(&mut self, e: Reason) { + let last_processed_id = self.streams.last_processed_id(); + let frame = frame::GoAway::new(last_processed_id, e); + self.go_away.go_away_now(frame); + } + + fn take_error(&mut self, ours: Reason) -> Poll<(), proto::Error> { + let reason = if let Some(theirs) = self.error.take() { + match (ours, theirs) { + // If either side reported an error, return that + // to the user. + (Reason::NO_ERROR, err) | (err, Reason::NO_ERROR) => err, + // If both sides reported an error, give their + // error back to th user. We assume our error + // was a consequence of their error, and less + // important. + (_, theirs) => theirs, + } + } else { + ours + }; + + if reason == Reason::NO_ERROR { + Ok(().into()) + } else { + Err(proto::Error::Proto(reason)) + } + } + + /// Closes the connection by transitioning to a GOAWAY state + /// iff there are no streams or references + pub fn maybe_close_connection_if_no_streams(&mut self) { + // If we poll() and realize that there are no streams or references + // then we can close the connection by transitioning to GOAWAY + if self.streams.num_active_streams() == 0 && !self.streams.has_streams_or_other_references() { + self.go_away_now(Reason::NO_ERROR); + } + } + + /// Advances the internal state of the connection. + pub fn poll(&mut self) -> Poll<(), proto::Error> { + use codec::RecvError::*; + + loop { + // TODO: probably clean up this glob of code + match self.state { + // When open, continue to poll a frame + State::Open => { + match self.poll2() { + // The connection has shutdown normally + Ok(Async::Ready(())) => return self.take_error(Reason::NO_ERROR), + // The connection is not ready to make progress + Ok(Async::NotReady) => { + // Ensure all window updates have been sent. + // + // This will also handle flushing `self.codec` + try_ready!(self.streams.poll_complete(&mut self.codec)); + + if self.error.is_some() || self.go_away.should_close_on_idle() { + if self.streams.num_active_streams() == 0 { + self.go_away_now(Reason::NO_ERROR); + continue; + } + } + + return Ok(Async::NotReady); + }, + // Attempting to read a frame resulted in a connection level + // error. This is handled by setting a GOAWAY frame followed by + // terminating the connection. + Err(Connection(e)) => { + debug!("Connection::poll; err={:?}", e); + + // We may have already sent a GOAWAY for this error, + // if so, don't send another, just flush and close up. + if let Some(reason) = self.go_away.going_away_reason() { + if reason == e { + trace!(" -> already going away"); + self.state = State::Closing(e); + continue; + } + } + + // Reset all active streams + self.streams.recv_err(&e.into()); + self.go_away_now(e); + }, + // Attempting to read a frame resulted in a stream level error. + // This is handled by resetting the frame then trying to read + // another frame. + Err(Stream { + id, + reason, + }) => { + trace!("stream level error; id={:?}; reason={:?}", id, reason); + self.streams.send_reset(id, reason); + }, + // Attempting to read a frame resulted in an I/O error. All + // active streams must be reset. + // + // TODO: Are I/O errors recoverable? + Err(Io(e)) => { + let e = e.into(); + + // Reset all active streams + self.streams.recv_err(&e); + + // Return the error + return Err(e); + }, + } + } + State::Closing(reason) => { + trace!("connection closing after flush, reason={:?}", reason); + // Flush the codec + try_ready!(self.codec.flush()); + + // Transition the state to error + self.state = State::Closed(reason); + }, + State::Closed(reason) => return self.take_error(reason), + } + } + } + + fn poll2(&mut self) -> Poll<(), RecvError> { + use frame::Frame::*; + + // This happens outside of the loop to prevent needing to do a clock + // check and then comparison of the queue possibly multiple times a + // second (and thus, the clock wouldn't have changed enough to matter). + self.clear_expired_reset_streams(); + + loop { + // First, ensure that the `Connection` is able to receive a frame + // + // The order here matters: + // - poll_go_away may buffer a graceful shutdown GOAWAY frame + // - If it has, we've also added a PING to be sent in poll_ready + if let Some(reason) = try_ready!(self.poll_go_away()) { + if self.go_away.should_close_now() { + return Err(RecvError::Connection(reason)); + } + // Only NO_ERROR should be waiting for idle + debug_assert_eq!(reason, Reason::NO_ERROR, "graceful GOAWAY should be NO_ERROR"); + } + try_ready!(self.poll_ready()); + + match try_ready!(self.codec.poll()) { + Some(Headers(frame)) => { + trace!("recv HEADERS; frame={:?}", frame); + self.streams.recv_headers(frame)?; + }, + Some(Data(frame)) => { + trace!("recv DATA; frame={:?}", frame); + self.streams.recv_data(frame)?; + }, + Some(Reset(frame)) => { + trace!("recv RST_STREAM; frame={:?}", frame); + self.streams.recv_reset(frame)?; + }, + Some(PushPromise(frame)) => { + trace!("recv PUSH_PROMISE; frame={:?}", frame); + self.streams.recv_push_promise(frame)?; + }, + Some(Settings(frame)) => { + trace!("recv SETTINGS; frame={:?}", frame); + self.settings.recv_settings(frame); + }, + Some(GoAway(frame)) => { + trace!("recv GOAWAY; frame={:?}", frame); + // This should prevent starting new streams, + // but should allow continuing to process current streams + // until they are all EOS. Once they are, State should + // transition to GoAway. + self.streams.recv_go_away(&frame)?; + self.error = Some(frame.reason()); + }, + Some(Ping(frame)) => { + trace!("recv PING; frame={:?}", frame); + let status = self.ping_pong.recv_ping(frame); + if status.is_shutdown() { + assert!( + self.go_away.is_going_away(), + "received unexpected shutdown ping" + ); + + let last_processed_id = self.streams.last_processed_id(); + self.go_away(last_processed_id, Reason::NO_ERROR); + } + }, + Some(WindowUpdate(frame)) => { + trace!("recv WINDOW_UPDATE; frame={:?}", frame); + self.streams.recv_window_update(frame)?; + }, + Some(Priority(frame)) => { + trace!("recv PRIORITY; frame={:?}", frame); + // TODO: handle + }, + None => { + trace!("codec closed"); + self.streams.recv_eof(false) + .ok().expect("mutex poisoned"); + return Ok(Async::Ready(())); + }, + } + } + } + + fn clear_expired_reset_streams(&mut self) { + self.streams.clear_expired_reset_streams(); + } +} + +impl Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + pub(crate) fn streams(&self) -> &Streams { + &self.streams + } +} + +impl Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + pub fn next_incoming(&mut self) -> Option> { + self.streams.next_incoming() + } + + // Graceful shutdown only makes sense for server peers. + pub fn go_away_gracefully(&mut self) { + if self.go_away.is_going_away() { + // No reason to start a new one. + return; + } + + // According to http://httpwg.org/specs/rfc7540.html#GOAWAY: + // + // > A server that is attempting to gracefully shut down a connection + // > SHOULD send an initial GOAWAY frame with the last stream + // > identifier set to 2^31-1 and a NO_ERROR code. This signals to the + // > client that a shutdown is imminent and that initiating further + // > requests is prohibited. After allowing time for any in-flight + // > stream creation (at least one round-trip time), the server can + // > send another GOAWAY frame with an updated last stream identifier. + // > This ensures that a connection can be cleanly shut down without + // > losing requests. + self.go_away(StreamId::MAX, Reason::NO_ERROR); + + // We take the advice of waiting 1 RTT literally, and wait + // for a pong before proceeding. + self.ping_pong.ping_shutdown(); + } +} + +impl Drop for Connection +where + P: Peer, + B: IntoBuf, +{ + fn drop(&mut self) { + // Ignore errors as this indicates that the mutex is poisoned. + let _ = self.streams.recv_eof(true); + } +} diff --git a/third_party/rust/h2/src/proto/error.rs b/third_party/rust/h2/src/proto/error.rs new file mode 100644 index 000000000000..2ab9a37d3f43 --- /dev/null +++ b/third_party/rust/h2/src/proto/error.rs @@ -0,0 +1,53 @@ +use codec::{RecvError, SendError}; +use frame::Reason; + +use std::io; + +/// Either an H2 reason or an I/O error +#[derive(Debug)] +pub enum Error { + Proto(Reason), + Io(io::Error), +} + +impl Error { + /// Clone the error for internal purposes. + /// + /// `io::Error` is not `Clone`, so we only copy the `ErrorKind`. + pub(super) fn shallow_clone(&self) -> Error { + match *self { + Error::Proto(reason) => Error::Proto(reason), + Error::Io(ref io) => Error::Io(io::Error::from(io.kind())), + } + } +} + +impl From for Error { + fn from(src: Reason) -> Self { + Error::Proto(src) + } +} + +impl From for Error { + fn from(src: io::Error) -> Self { + Error::Io(src) + } +} + +impl From for RecvError { + fn from(src: Error) -> RecvError { + match src { + Error::Proto(reason) => RecvError::Connection(reason), + Error::Io(e) => RecvError::Io(e), + } + } +} + +impl From for SendError { + fn from(src: Error) -> SendError { + match src { + Error::Proto(reason) => SendError::Connection(reason), + Error::Io(e) => SendError::Io(e), + } + } +} diff --git a/third_party/rust/h2/src/proto/go_away.rs b/third_party/rust/h2/src/proto/go_away.rs new file mode 100644 index 000000000000..5b4f856de27d --- /dev/null +++ b/third_party/rust/h2/src/proto/go_away.rs @@ -0,0 +1,136 @@ +use codec::Codec; +use frame::{self, Reason, StreamId}; + +use bytes::Buf; +use futures::{Async, Poll}; +use std::io; +use tokio_io::AsyncWrite; + +/// Manages our sending of GOAWAY frames. +#[derive(Debug)] +pub(super) struct GoAway { + /// Whether the connection should close now, or wait until idle. + close_now: bool, + /// Records if we've sent any GOAWAY before. + going_away: Option, + + /// A GOAWAY frame that must be buffered in the Codec immediately. + pending: Option, +} + +/// Keeps a memory of any GOAWAY frames we've sent before. +/// +/// This looks very similar to a `frame::GoAway`, but is a separate type. Why? +/// Mostly for documentation purposes. This type is to record status. If it +/// were a `frame::GoAway`, it might appear like we eventually wanted to +/// serialize it. We **only** want to be able to look up these fields at a +/// later time. +/// +/// (Technically, `frame::GoAway` should gain an opaque_debug_data field as +/// well, and we wouldn't want to save that here to accidentally dump in logs, +/// or waste struct space.) +#[derive(Debug)] +struct GoingAway { + /// Stores the highest stream ID of a GOAWAY that has been sent. + /// + /// It's illegal to send a subsequent GOAWAY with a higher ID. + last_processed_id: StreamId, + + /// Records the error code of any GOAWAY frame sent. + reason: Reason, +} + +impl GoAway { + pub fn new() -> Self { + GoAway { + close_now: false, + going_away: None, + pending: None, + } + } + + /// Enqueue a GOAWAY frame to be written. + /// + /// The connection is expected to continue to run until idle. + pub fn go_away(&mut self, f: frame::GoAway) { + if let Some(ref going_away) = self.going_away { + assert!( + f.last_stream_id() <= going_away.last_processed_id, + "GOAWAY stream IDs shouldn't be higher; \ + last_processed_id = {:?}, f.last_stream_id() = {:?}", + going_away.last_processed_id, + f.last_stream_id(), + ); + } + + self.going_away = Some(GoingAway { + last_processed_id: f.last_stream_id(), + reason: f.reason(), + }); + self.pending = Some(f); + } + + pub fn go_away_now(&mut self, f: frame::GoAway) { + self.close_now = true; + if let Some(ref going_away) = self.going_away { + // Prevent sending the same GOAWAY twice. + if going_away.last_processed_id == f.last_stream_id() + && going_away.reason == f.reason() { + return; + } + } + self.go_away(f); + } + + /// Return if a GOAWAY has ever been scheduled. + pub fn is_going_away(&self) -> bool { + self.going_away.is_some() + } + + /// Return the last Reason we've sent. + pub fn going_away_reason(&self) -> Option { + self.going_away + .as_ref() + .map(|g| g.reason) + } + + /// Returns if the connection should close now, or wait until idle. + pub fn should_close_now(&self) -> bool { + self.pending.is_none() && self.close_now + } + + /// Returns if the connection should be closed when idle. + pub fn should_close_on_idle(&self) -> bool { + !self.close_now && self.going_away + .as_ref() + .map(|g| g.last_processed_id != StreamId::MAX) + .unwrap_or(false) + } + + /// Try to write a pending GOAWAY frame to the buffer. + /// + /// If a frame is written, the `Reason` of the GOAWAY is returned. + pub fn send_pending_go_away(&mut self, dst: &mut Codec) -> Poll, io::Error> + where + T: AsyncWrite, + B: Buf, + { + if let Some(frame) = self.pending.take() { + if !dst.poll_ready()?.is_ready() { + self.pending = Some(frame); + return Ok(Async::NotReady); + } + + let reason = frame.reason(); + dst.buffer(frame.into()) + .ok() + .expect("invalid GOAWAY frame"); + + return Ok(Async::Ready(Some(reason))); + } else if self.should_close_now() { + return Ok(Async::Ready(self.going_away_reason())); + } + + Ok(Async::Ready(None)) + } +} diff --git a/third_party/rust/h2/src/proto/mod.rs b/third_party/rust/h2/src/proto/mod.rs new file mode 100644 index 000000000000..f9e815db442e --- /dev/null +++ b/third_party/rust/h2/src/proto/mod.rs @@ -0,0 +1,37 @@ +mod connection; +mod error; +mod go_away; +mod peer; +mod ping_pong; +mod settings; +mod streams; + +pub(crate) use self::connection::{Config, Connection}; +pub(crate) use self::error::Error; +pub(crate) use self::peer::{Peer, Dyn as DynPeer}; +pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams}; +pub(crate) use self::streams::{PollReset, Prioritized, Open}; + +use codec::Codec; + +use self::go_away::GoAway; +use self::ping_pong::PingPong; +use self::settings::Settings; + +use frame::{self, Frame}; + +use futures::{task, Async, Poll}; +use futures::task::Task; + +use bytes::Buf; + +use tokio_io::AsyncWrite; + +pub type PingPayload = [u8; 8]; + +pub type WindowSize = u32; + +// Constants +pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; +pub const DEFAULT_RESET_STREAM_MAX: usize = 10; +pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; diff --git a/third_party/rust/h2/src/proto/peer.rs b/third_party/rust/h2/src/proto/peer.rs new file mode 100644 index 000000000000..8362db6bcb2f --- /dev/null +++ b/third_party/rust/h2/src/proto/peer.rs @@ -0,0 +1,84 @@ +use codec::RecvError; +use error::Reason; +use frame::{Headers, StreamId}; +use proto::Open; + +use http::{Request, Response}; + +use std::fmt; + +/// Either a Client or a Server +pub(crate) trait Peer { + /// Message type polled from the transport + type Poll: fmt::Debug; + + fn dyn() -> Dyn; + + fn is_server() -> bool; + + fn convert_poll_message(headers: Headers) -> Result; + + fn is_local_init(id: StreamId) -> bool { + assert!(!id.is_zero()); + Self::is_server() == id.is_server_initiated() + } +} + +/// A dynamic representation of `Peer`. +/// +/// This is used internally to avoid incurring a generic on all internal types. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) enum Dyn { + Client, + Server, +} + +#[derive(Debug)] +pub enum PollMessage { + Client(Response<()>), + Server(Request<()>), +} + +// ===== impl Dyn ===== + +impl Dyn { + pub fn is_server(&self) -> bool { + *self == Dyn::Server + } + + pub fn is_local_init(&self, id: StreamId) -> bool { + assert!(!id.is_zero()); + self.is_server() == id.is_server_initiated() + } + + pub fn convert_poll_message(&self, headers: Headers) -> Result { + if self.is_server() { + ::server::Peer::convert_poll_message(headers) + .map(PollMessage::Server) + } else { + ::client::Peer::convert_poll_message(headers) + .map(PollMessage::Client) + } + } + + /// Returns true if the remote peer can initiate a stream with the given ID. + pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), RecvError> { + if self.is_server() { + // Ensure that the ID is a valid client initiated ID + if mode.is_push_promise() || !id.is_client_initiated() { + trace!("Cannot open stream {:?} - not client initiated, PROTOCOL_ERROR", id); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + Ok(()) + } else { + // Ensure that the ID is a valid server initiated ID + if !mode.is_push_promise() || !id.is_server_initiated() { + trace!("Cannot open stream {:?} - not server initiated, PROTOCOL_ERROR", id); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + Ok(()) + } + } +} diff --git a/third_party/rust/h2/src/proto/ping_pong.rs b/third_party/rust/h2/src/proto/ping_pong.rs new file mode 100644 index 000000000000..8cce222fa5b3 --- /dev/null +++ b/third_party/rust/h2/src/proto/ping_pong.rs @@ -0,0 +1,125 @@ +use codec::Codec; +use frame::Ping; +use proto::PingPayload; + +use bytes::Buf; +use futures::{Async, Poll}; +use std::io; +use tokio_io::AsyncWrite; + +/// Acknowledges ping requests from the remote. +#[derive(Debug)] +pub struct PingPong { + pending_ping: Option, + pending_pong: Option, +} + +#[derive(Debug)] +struct PendingPing { + payload: PingPayload, + sent: bool, +} + +/// Status returned from `PingPong::recv_ping`. +#[derive(Debug)] +pub(crate) enum ReceivedPing { + MustAck, + Unknown, + Shutdown, +} + +impl PingPong { + pub fn new() -> Self { + PingPong { + pending_ping: None, + pending_pong: None, + } + } + + pub fn ping_shutdown(&mut self) { + assert!(self.pending_ping.is_none()); + + self.pending_ping = Some(PendingPing { + payload: Ping::SHUTDOWN, + sent: false, + }); + } + + /// Process a ping + pub(crate) fn recv_ping(&mut self, ping: Ping) -> ReceivedPing { + // The caller should always check that `send_pongs` returns ready before + // calling `recv_ping`. + assert!(self.pending_pong.is_none()); + + if ping.is_ack() { + if let Some(pending) = self.pending_ping.take() { + if &pending.payload == ping.payload() { + trace!("recv PING ack"); + return ReceivedPing::Shutdown; + } + + // if not the payload we expected, put it back. + self.pending_ping = Some(pending); + } + + // else we were acked a ping we didn't send? + // The spec doesn't require us to do anything about this, + // so for resiliency, just ignore it for now. + warn!("recv PING ack that we never sent: {:?}", ping); + ReceivedPing::Unknown + } else { + // Save the ping's payload to be sent as an acknowledgement. + self.pending_pong = Some(ping.into_payload()); + ReceivedPing::MustAck + } + } + + /// Send any pending pongs. + pub fn send_pending_pong(&mut self, dst: &mut Codec) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + if let Some(pong) = self.pending_pong.take() { + if !dst.poll_ready()?.is_ready() { + self.pending_pong = Some(pong); + return Ok(Async::NotReady); + } + + dst.buffer(Ping::pong(pong).into()) + .expect("invalid pong frame"); + } + + Ok(Async::Ready(())) + } + + /// Send any pending pings. + pub fn send_pending_ping(&mut self, dst: &mut Codec) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + if let Some(ref mut ping) = self.pending_ping { + if !ping.sent { + if !dst.poll_ready()?.is_ready() { + return Ok(Async::NotReady); + } + + dst.buffer(Ping::new(ping.payload).into()) + .expect("invalid ping frame"); + ping.sent = true; + } + } + + Ok(Async::Ready(())) + } +} + +impl ReceivedPing { + pub fn is_shutdown(&self) -> bool { + match *self { + ReceivedPing::Shutdown => true, + _ => false, + } + } +} diff --git a/third_party/rust/h2/src/proto/settings.rs b/third_party/rust/h2/src/proto/settings.rs new file mode 100644 index 000000000000..df6cbd0a9e2d --- /dev/null +++ b/third_party/rust/h2/src/proto/settings.rs @@ -0,0 +1,70 @@ +use codec::RecvError; +use frame; +use proto::*; + +#[derive(Debug)] +pub(crate) struct Settings { + /// Received SETTINGS frame pending processing. The ACK must be written to + /// the socket first then the settings applied **before** receiving any + /// further frames. + pending: Option, +} + +impl Settings { + pub fn new() -> Self { + Settings { + pending: None, + } + } + + pub fn recv_settings(&mut self, frame: frame::Settings) { + if frame.is_ack() { + debug!("received remote settings ack"); + // TODO: handle acks + } else { + assert!(self.pending.is_none()); + self.pending = Some(frame); + } + } + + pub fn send_pending_ack( + &mut self, + dst: &mut Codec, + streams: &mut Streams, + ) -> Poll<(), RecvError> + where + T: AsyncWrite, + B: Buf, + C: Buf, + P: Peer, + { + trace!("send_pending_ack; pending={:?}", self.pending); + + if let Some(ref settings) = self.pending { + if !dst.poll_ready()?.is_ready() { + trace!("failed to send ACK"); + return Ok(Async::NotReady); + } + + // Create an ACK settings frame + let frame = frame::Settings::ack(); + + // Buffer the settings frame + dst.buffer(frame.into()) + .ok() + .expect("invalid settings frame"); + + trace!("ACK sent; applying settings"); + + if let Some(val) = settings.max_frame_size() { + dst.set_max_send_frame_size(val as usize); + } + + streams.apply_remote_settings(settings)?; + } + + self.pending = None; + + Ok(().into()) + } +} diff --git a/third_party/rust/h2/src/proto/streams/buffer.rs b/third_party/rust/h2/src/proto/streams/buffer.rs new file mode 100644 index 000000000000..4a42d95b321e --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/buffer.rs @@ -0,0 +1,112 @@ +use slab::Slab; + +/// Buffers frames for multiple streams. +#[derive(Debug)] +pub struct Buffer { + slab: Slab>, +} + +/// A sequence of frames in a `Buffer` +#[derive(Debug)] +pub struct Deque { + indices: Option, +} + +/// Tracks the head & tail for a sequence of frames in a `Buffer`. +#[derive(Debug, Default, Copy, Clone)] +struct Indices { + head: usize, + tail: usize, +} + +#[derive(Debug)] +struct Slot { + value: T, + next: Option, +} + +impl Buffer { + pub fn new() -> Self { + Buffer { + slab: Slab::new(), + } + } +} + +impl Deque { + pub fn new() -> Self { + Deque { + indices: None, + } + } + + pub fn is_empty(&self) -> bool { + self.indices.is_none() + } + + pub fn push_back(&mut self, buf: &mut Buffer, value: T) { + let key = buf.slab.insert(Slot { + value, + next: None, + }); + + match self.indices { + Some(ref mut idxs) => { + buf.slab[idxs.tail].next = Some(key); + idxs.tail = key; + }, + None => { + self.indices = Some(Indices { + head: key, + tail: key, + }); + }, + } + } + + pub fn push_front(&mut self, buf: &mut Buffer, value: T) { + let key = buf.slab.insert(Slot { + value, + next: None, + }); + + match self.indices { + Some(ref mut idxs) => { + buf.slab[key].next = Some(idxs.head); + idxs.head = key; + }, + None => { + self.indices = Some(Indices { + head: key, + tail: key, + }); + }, + } + } + + pub fn pop_front(&mut self, buf: &mut Buffer) -> Option { + match self.indices { + Some(mut idxs) => { + let mut slot = buf.slab.remove(idxs.head); + + if idxs.head == idxs.tail { + assert!(slot.next.is_none()); + self.indices = None; + } else { + idxs.head = slot.next.take().unwrap(); + self.indices = Some(idxs); + } + + return Some(slot.value); + }, + None => None, + } + } + + pub fn peek_front<'a, T>(&self, buf: &'a Buffer) -> Option<&'a T> { + match self.indices { + Some(idxs) => Some(&buf.slab[idxs.head].value), + None => None, + } + } +} diff --git a/third_party/rust/h2/src/proto/streams/counts.rs b/third_party/rust/h2/src/proto/streams/counts.rs new file mode 100644 index 000000000000..e027426ab7e9 --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/counts.rs @@ -0,0 +1,197 @@ +use super::*; + +use std::usize; + +#[derive(Debug)] +pub(super) struct Counts { + /// Acting as a client or server. This allows us to track which values to + /// inc / dec. + peer: peer::Dyn, + + /// Maximum number of locally initiated streams + max_send_streams: usize, + + /// Current number of remote initiated streams + num_send_streams: usize, + + /// Maximum number of remote initiated streams + max_recv_streams: usize, + + /// Current number of locally initiated streams + num_recv_streams: usize, + + /// Maximum number of pending locally reset streams + max_reset_streams: usize, + + /// Current number of pending locally reset streams + num_reset_streams: usize, +} + +impl Counts { + /// Create a new `Counts` using the provided configuration values. + pub fn new(peer: peer::Dyn, config: &Config) -> Self { + Counts { + peer, + max_send_streams: config.initial_max_send_streams, + num_send_streams: 0, + max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX), + num_recv_streams: 0, + max_reset_streams: config.local_reset_max, + num_reset_streams: 0, + } + } + + /// Returns the current peer + pub fn peer(&self) -> peer::Dyn { + self.peer + } + + pub fn has_streams(&self) -> bool { + self.num_send_streams != 0 || self.num_recv_streams != 0 + } + + /// Returns true if the receive stream concurrency can be incremented + pub fn can_inc_num_recv_streams(&self) -> bool { + self.max_recv_streams > self.num_recv_streams + } + + /// Increments the number of concurrent receive streams. + /// + /// # Panics + /// + /// Panics on failure as this should have been validated before hand. + pub fn inc_num_recv_streams(&mut self, stream: &mut store::Ptr) { + assert!(self.can_inc_num_recv_streams()); + assert!(!stream.is_counted); + + // Increment the number of remote initiated streams + self.num_recv_streams += 1; + stream.is_counted = true; + } + + /// Returns true if the send stream concurrency can be incremented + pub fn can_inc_num_send_streams(&self) -> bool { + self.max_send_streams > self.num_send_streams + } + + /// Increments the number of concurrent send streams. + /// + /// # Panics + /// + /// Panics on failure as this should have been validated before hand. + pub fn inc_num_send_streams(&mut self, stream: &mut store::Ptr) { + assert!(self.can_inc_num_send_streams()); + assert!(!stream.is_counted); + + // Increment the number of remote initiated streams + self.num_send_streams += 1; + stream.is_counted = true; + } + + /// Returns true if the number of pending reset streams can be incremented. + pub fn can_inc_num_reset_streams(&self) -> bool { + self.max_reset_streams > self.num_reset_streams + } + + /// Increments the number of pending reset streams. + /// + /// # Panics + /// + /// Panics on failure as this should have been validated before hand. + pub fn inc_num_reset_streams(&mut self) { + assert!(self.can_inc_num_reset_streams()); + + self.num_reset_streams += 1; + } + + pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { + if let Some(val) = settings.max_concurrent_streams() { + self.max_send_streams = val as usize; + } + } + + /// Run a block of code that could potentially transition a stream's state. + /// + /// If the stream state transitions to closed, this function will perform + /// all necessary cleanup. + /// + /// TODO: Is this function still needed? + pub fn transition(&mut self, mut stream: store::Ptr, f: F) -> U + where + F: FnOnce(&mut Self, &mut store::Ptr) -> U, + { + // TODO: Does this need to be computed before performing the action? + let is_pending_reset = stream.is_pending_reset_expiration(); + + // Run the action + let ret = f(self, &mut stream); + + self.transition_after(stream, is_pending_reset); + + ret + } + + // TODO: move this to macro? + pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { + trace!("transition_after; stream={:?}; state={:?}; is_closed={:?}; \ + pending_send_empty={:?}; buffered_send_data={}; \ + num_recv={}; num_send={}", + stream.id, + stream.state, + stream.is_closed(), + stream.pending_send.is_empty(), + stream.buffered_send_data, + self.num_recv_streams, + self.num_send_streams); + + if stream.is_closed() { + if !stream.is_pending_reset_expiration() { + stream.unlink(); + + if is_reset_counted { + self.dec_num_reset_streams(); + } + } + + if stream.is_counted { + trace!("dec_num_streams; stream={:?}", stream.id); + // Decrement the number of active streams. + self.dec_num_streams(&mut stream); + } + } + + // Release the stream if it requires releasing + if stream.is_released() { + stream.remove(); + } + } + + fn dec_num_streams(&mut self, stream: &mut store::Ptr) { + assert!(stream.is_counted); + + if self.peer.is_local_init(stream.id) { + assert!(self.num_send_streams > 0); + self.num_send_streams -= 1; + stream.is_counted = false; + } else { + assert!(self.num_recv_streams > 0); + self.num_recv_streams -= 1; + stream.is_counted = false; + } + } + + fn dec_num_reset_streams(&mut self) { + assert!(self.num_reset_streams > 0); + self.num_reset_streams -= 1; + } +} + +impl Drop for Counts { + fn drop(&mut self) { + use std::thread; + + if !thread::panicking() { + debug_assert!(!self.has_streams()); + } + } +} diff --git a/third_party/rust/h2/src/proto/streams/flow_control.rs b/third_party/rust/h2/src/proto/streams/flow_control.rs new file mode 100644 index 000000000000..9dd0fb17d9bb --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/flow_control.rs @@ -0,0 +1,254 @@ +use frame::Reason; +use proto::{WindowSize, MAX_WINDOW_SIZE}; + +use std::fmt; + +// We don't want to send WINDOW_UPDATE frames for tiny changes, but instead +// aggregate them when the changes are significant. Many implementations do +// this by keeping a "ratio" of the update version the allowed window size. +// +// While some may wish to represent this ratio as percentage, using a f32, +// we skip having to deal with float math and stick to integers. To do so, +// the "ratio" is represented by 2 i32s, split into the numerator and +// denominator. For example, a 50% ratio is simply represented as 1/2. +// +// An example applying this ratio: If a stream has an allowed window size of +// 100 bytes, WINDOW_UPDATE frames are scheduled when the unclaimed change +// becomes greater than 1/2, or 50 bytes. +const UNCLAIMED_NUMERATOR: i32 = 1; +const UNCLAIMED_DENOMINATOR: i32 = 2; + +#[test] +fn sanity_unclaimed_ratio() { + assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR); + assert!(UNCLAIMED_NUMERATOR >= 0); + assert!(UNCLAIMED_DENOMINATOR > 0); +} + +#[derive(Copy, Clone, Debug)] +pub struct FlowControl { + /// Window the peer knows about. + /// + /// This can go negative if a SETTINGS_INITIAL_WINDOW_SIZE is received. + /// + /// For example, say the peer sends a request and uses 32kb of the window. + /// We send a SETTINGS_INITIAL_WINDOW_SIZE of 16kb. The peer has to adjust + /// its understanding of the capacity of the window, and that would be: + /// + /// ```notrust + /// default (64kb) - used (32kb) - settings_diff (64kb - 16kb): -16kb + /// ``` + window_size: Window, + + /// Window that we know about. + /// + /// This can go negative if a user declares a smaller target window than + /// the peer knows about. + available: Window, +} + +impl FlowControl { + pub fn new() -> FlowControl { + FlowControl { + window_size: Window(0), + available: Window(0), + } + } + + /// Returns the window size as known by the peer + pub fn window_size(&self) -> WindowSize { + self.window_size.as_size() + } + + /// Returns the window size available to the consumer + pub fn available(&self) -> Window { + self.available + } + + /// Returns true if there is unavailable window capacity + pub fn has_unavailable(&self) -> bool { + if self.window_size < 0 { + return false; + } + + self.window_size > self.available + } + + pub fn claim_capacity(&mut self, capacity: WindowSize) { + self.available -= capacity; + } + + pub fn assign_capacity(&mut self, capacity: WindowSize) { + self.available += capacity; + } + + /// If a WINDOW_UPDATE frame should be sent, returns a positive number + /// representing the increment to be used. + /// + /// If there is no available bytes to be reclaimed, or the number of + /// available bytes does not reach the threshold, this returns `None`. + /// + /// This represents pending outbound WINDOW_UPDATE frames. + pub fn unclaimed_capacity(&self) -> Option { + let available = self.available; + + if self.window_size >= available { + return None; + } + + let unclaimed = available.0 - self.window_size.0; + let threshold = self.window_size.0 / UNCLAIMED_DENOMINATOR * UNCLAIMED_NUMERATOR; + + if unclaimed < threshold { + None + } else { + Some(unclaimed as WindowSize) + } + } + + /// Increase the window size. + /// + /// This is called after receiving a WINDOW_UPDATE frame + pub fn inc_window(&mut self, sz: WindowSize) -> Result<(), Reason> { + let (val, overflow) = self.window_size.0.overflowing_add(sz as i32); + + if overflow { + return Err(Reason::FLOW_CONTROL_ERROR); + } + + if val > MAX_WINDOW_SIZE as i32 { + return Err(Reason::FLOW_CONTROL_ERROR); + } + + trace!( + "inc_window; sz={}; old={}; new={}", + sz, + self.window_size, + val + ); + + self.window_size = Window(val); + Ok(()) + } + + /// Decrement the window size. + /// + /// This is called after receiving a SETTINGS frame with a lower + /// INITIAL_WINDOW_SIZE value. + pub fn dec_window(&mut self, sz: WindowSize) { + trace!( + "dec_window; sz={}; window={}, available={}", + sz, + self.window_size, + self.available + ); + // This should not be able to overflow `window_size` from the bottom. + self.window_size -= sz; + } + + /// Decrements the window reflecting data has actually been sent. The caller + /// must ensure that the window has capacity. + pub fn send_data(&mut self, sz: WindowSize) { + trace!( + "send_data; sz={}; window={}; available={}", + sz, + self.window_size, + self.available + ); + + // Ensure that the argument is correct + assert!(sz <= self.window_size); + + // Update values + self.window_size -= sz; + self.available -= sz; + } +} + +/// The current capacity of a flow-controlled Window. +/// +/// This number can go negative when either side has used a certain amount +/// of capacity when the other side advertises a reduction in size. +/// +/// This type tries to centralize the knowledge of addition and subtraction +/// to this capacity, instead of having integer casts throughout the source. +#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +pub struct Window(i32); + +impl Window { + pub fn as_size(&self) -> WindowSize { + if self.0 < 0 { + 0 + } else { + self.0 as WindowSize + } + } + + pub fn checked_size(&self) -> WindowSize { + assert!(self.0 >= 0, "negative Window"); + self.0 as WindowSize + } +} + +impl PartialEq for Window { + fn eq(&self, other: &WindowSize) -> bool { + if self.0 < 0 { + false + } else { + (self.0 as WindowSize).eq(other) + } + } +} + + +impl PartialEq for WindowSize { + fn eq(&self, other: &Window) -> bool { + other.eq(self) + } +} + +impl PartialOrd for Window { + fn partial_cmp(&self, other: &WindowSize) -> Option<::std::cmp::Ordering> { + if self.0 < 0 { + Some(::std::cmp::Ordering::Less) + } else { + (self.0 as WindowSize).partial_cmp(other) + } + } +} + +impl PartialOrd for WindowSize { + fn partial_cmp(&self, other: &Window) -> Option<::std::cmp::Ordering> { + if other.0 < 0 { + Some(::std::cmp::Ordering::Greater) + } else { + self.partial_cmp(&(other.0 as WindowSize)) + } + } +} + + +impl ::std::ops::SubAssign for Window { + fn sub_assign(&mut self, other: WindowSize) { + self.0 -= other as i32; + } +} + +impl ::std::ops::Add for Window { + type Output = Self; + fn add(self, other: WindowSize) -> Self::Output { + Window(self.0 + other as i32) + } +} + +impl ::std::ops::AddAssign for Window { + fn add_assign(&mut self, other: WindowSize) { + self.0 += other as i32; + } +} + +impl fmt::Display for Window { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} diff --git a/third_party/rust/h2/src/proto/streams/mod.rs b/third_party/rust/h2/src/proto/streams/mod.rs new file mode 100644 index 000000000000..6216550c454d --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/mod.rs @@ -0,0 +1,62 @@ +mod buffer; +mod counts; +mod flow_control; +mod prioritize; +mod recv; +mod send; +mod state; +mod store; +mod stream; +mod streams; + +pub(crate) use self::prioritize::Prioritized; +pub(crate) use self::recv::Open; +pub(crate) use self::send::PollReset; +pub(crate) use self::streams::{StreamRef, OpaqueStreamRef, Streams}; + +use self::buffer::Buffer; +use self::counts::Counts; +use self::flow_control::FlowControl; +use self::prioritize::Prioritize; +use self::recv::Recv; +use self::send::Send; +use self::state::State; +use self::store::Store; +use self::stream::Stream; + +use frame::{StreamId, StreamIdOverflow}; +use proto::*; + +use bytes::Bytes; +use http::{Request, Response}; +use std::time::Duration; + +#[derive(Debug)] +pub struct Config { + /// Initial window size of locally initiated streams + pub local_init_window_sz: WindowSize, + + /// Initial maximum number of locally initiated streams. + /// After receiving a Settings frame from the remote peer, + /// the connection will overwrite this value with the + /// MAX_CONCURRENT_STREAMS specified in the frame. + pub initial_max_send_streams: usize, + + /// The stream ID to start the next local stream with + pub local_next_stream_id: StreamId, + + /// If the local peer is willing to receive push promises + pub local_push_enabled: bool, + + /// How long a locally reset stream should ignore frames + pub local_reset_duration: Duration, + + /// Maximum number of locally reset streams to keep at a time + pub local_reset_max: usize, + + /// Initial window size of remote initiated streams + pub remote_init_window_sz: WindowSize, + + /// Maximum number of remote initiated streams + pub remote_max_initiated: Option, +} diff --git a/third_party/rust/h2/src/proto/streams/prioritize.rs b/third_party/rust/h2/src/proto/streams/prioritize.rs new file mode 100644 index 000000000000..6e509b1f2f5c --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/prioritize.rs @@ -0,0 +1,852 @@ +use super::*; +use super::store::Resolve; + +use frame::{Reason, StreamId}; + +use codec::UserError; +use codec::UserError::*; + +use bytes::buf::Take; + +use std::{cmp, fmt, mem}; +use std::io; + +/// # Warning +/// +/// Queued streams are ordered by stream ID, as we need to ensure that +/// lower-numbered streams are sent headers before higher-numbered ones. +/// This is because "idle" stream IDs – those which have been initiated but +/// have yet to receive frames – will be implicitly closed on receipt of a +/// frame on a higher stream ID. If these queues was not ordered by stream +/// IDs, some mechanism would be necessary to ensure that the lowest-numberedh] +/// idle stream is opened first. +#[derive(Debug)] +pub(super) struct Prioritize { + /// Queue of streams waiting for socket capacity to send a frame. + pending_send: store::Queue, + + /// Queue of streams waiting for window capacity to produce data. + pending_capacity: store::Queue, + + /// Streams waiting for capacity due to max concurrency + /// + /// The `SendRequest` handle is `Clone`. This enables initiating requests + /// from many tasks. However, offering this capability while supporting + /// backpressure at some level is tricky. If there are many `SendRequest` + /// handles and a single stream becomes available, which handle gets + /// assigned that stream? Maybe that handle is no longer ready to send a + /// request. + /// + /// The strategy used is to allow each `SendRequest` handle one buffered + /// request. A `SendRequest` handle is ready to send a request if it has no + /// associated buffered requests. This is the same strategy as `mpsc` in the + /// futures library. + pending_open: store::Queue, + + /// Connection level flow control governing sent data + flow: FlowControl, + + /// Stream ID of the last stream opened. + last_opened_id: StreamId, + + /// What `DATA` frame is currently being sent in the codec. + in_flight_data_frame: InFlightData, +} + +#[derive(Debug, Eq, PartialEq)] +enum InFlightData { + /// There is no `DATA` frame in flight. + Nothing, + /// There is a `DATA` frame in flight belonging to the given stream. + DataFrame(store::Key), + /// There was a `DATA` frame, but the stream's queue was since cleared. + Drop, +} + +pub(crate) struct Prioritized { + // The buffer + inner: Take, + + end_of_stream: bool, + + // The stream that this is associated with + stream: store::Key, +} + +// ===== impl Prioritize ===== + +impl Prioritize { + pub fn new(config: &Config) -> Prioritize { + let mut flow = FlowControl::new(); + + flow.inc_window(config.remote_init_window_sz) + .ok() + .expect("invalid initial window size"); + + flow.assign_capacity(config.remote_init_window_sz); + + trace!("Prioritize::new; flow={:?}", flow); + + Prioritize { + pending_send: store::Queue::new(), + pending_capacity: store::Queue::new(), + pending_open: store::Queue::new(), + flow: flow, + last_opened_id: StreamId::ZERO, + in_flight_data_frame: InFlightData::Nothing, + } + } + + /// Queue a frame to be sent to the remote + pub fn queue_frame( + &mut self, + frame: Frame, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + task: &mut Option, + ) { + // Queue the frame in the buffer + stream.pending_send.push_back(buffer, frame); + self.schedule_send(stream, task); + } + + pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { + // If the stream is waiting to be opened, nothing more to do. + if !stream.is_pending_open { + trace!("schedule_send; {:?}", stream.id); + // Queue the stream + self.pending_send.push(stream); + + // Notify the connection. + if let Some(task) = task.take() { + task.notify(); + } + } + } + + pub fn queue_open(&mut self, stream: &mut store::Ptr) { + self.pending_open.push(stream); + } + + /// Send a data frame + pub fn send_data( + &mut self, + frame: frame::Data, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), UserError> + where + B: Buf, + { + let sz = frame.payload().remaining(); + + if sz > MAX_WINDOW_SIZE as usize { + return Err(UserError::PayloadTooBig); + } + + let sz = sz as WindowSize; + + if !stream.state.is_send_streaming() { + if stream.state.is_closed() { + return Err(InactiveStreamId); + } else { + return Err(UnexpectedFrameType); + } + } + + // Update the buffered data counter + stream.buffered_send_data += sz; + + trace!( + "send_data; sz={}; buffered={}; requested={}", + sz, + stream.buffered_send_data, + stream.requested_send_capacity + ); + + // Implicitly request more send capacity if not enough has been + // requested yet. + if stream.requested_send_capacity < stream.buffered_send_data { + // Update the target requested capacity + stream.requested_send_capacity = stream.buffered_send_data; + + self.try_assign_capacity(stream); + } + + if frame.is_end_stream() { + stream.state.send_close(); + self.reserve_capacity(0, stream, counts); + } + + trace!( + "send_data (2); available={}; buffered={}", + stream.send_flow.available(), + stream.buffered_send_data + ); + + // The `stream.buffered_send_data == 0` check is here so that, if a zero + // length data frame is queued to the front (there is no previously + // queued data), it gets sent out immediately even if there is no + // available send window. + // + // Sending out zero length data frames can be done to signal + // end-of-stream. + // + if stream.send_flow.available() > 0 || stream.buffered_send_data == 0 { + // The stream currently has capacity to send the data frame, so + // queue it up and notify the connection task. + self.queue_frame(frame.into(), buffer, stream, task); + } else { + // The stream has no capacity to send the frame now, save it but + // don't notify the connection task. Once additional capacity + // becomes available, the frame will be flushed. + stream + .pending_send + .push_back(buffer, frame.into()); + } + + Ok(()) + } + + /// Request capacity to send data + pub fn reserve_capacity( + &mut self, + capacity: WindowSize, + stream: &mut store::Ptr, + counts: &mut Counts) { + trace!( + "reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", + stream.id, + capacity, + capacity + stream.buffered_send_data, + stream.requested_send_capacity + ); + + // Actual capacity is `capacity` + the current amount of buffered data. + // If it were less, then we could never send out the buffered data. + let capacity = capacity + stream.buffered_send_data; + + if capacity == stream.requested_send_capacity { + // Nothing to do + } else if capacity < stream.requested_send_capacity { + // Update the target requested capacity + stream.requested_send_capacity = capacity; + + // Currently available capacity assigned to the stream + let available = stream.send_flow.available().as_size(); + + // If the stream has more assigned capacity than requested, reclaim + // some for the connection + if available > capacity { + let diff = available - capacity; + + stream.send_flow.claim_capacity(diff); + + self.assign_connection_capacity(diff, stream, counts); + } + } else { + // If trying to *add* capacity, but the stream send side is closed, + // there's nothing to be done. + if stream.state.is_send_closed() { + return; + } + + // Update the target requested capacity + stream.requested_send_capacity = capacity; + + // Try to assign additional capacity to the stream. If none is + // currently available, the stream will be queued to receive some + // when more becomes available. + self.try_assign_capacity(stream); + } + } + + pub fn recv_stream_window_update( + &mut self, + inc: WindowSize, + stream: &mut store::Ptr, + ) -> Result<(), Reason> { + trace!( + "recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", + stream.id, + stream.state, + inc, + stream.send_flow + ); + + if stream.state.is_send_closed() && stream.buffered_send_data == 0 { + // We can't send any data, so don't bother doing anything else. + return Ok(()); + } + + // Update the stream level flow control. + stream.send_flow.inc_window(inc)?; + + // If the stream is waiting on additional capacity, then this will + // assign it (if available on the connection) and notify the producer + self.try_assign_capacity(stream); + + Ok(()) + } + + pub fn recv_connection_window_update( + &mut self, + inc: WindowSize, + store: &mut Store, + counts: &mut Counts, + ) -> Result<(), Reason> { + // Update the connection's window + self.flow.inc_window(inc)?; + + self.assign_connection_capacity(inc, store, counts); + Ok(()) + } + + /// Reclaim all capacity assigned to the stream and re-assign it to the + /// connection + pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { + let available = stream.send_flow.available().as_size(); + stream.send_flow.claim_capacity(available); + // Re-assign all capacity to the connection + self.assign_connection_capacity(available, stream, counts); + } + + /// Reclaim just reserved capacity, not buffered capacity, and re-assign + /// it to the connection + pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { + // only reclaim requested capacity that isn't already buffered + if stream.requested_send_capacity > stream.buffered_send_data { + let reserved = stream.requested_send_capacity - stream.buffered_send_data; + + stream.send_flow.claim_capacity(reserved); + self.assign_connection_capacity(reserved, stream, counts); + } + } + + pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_capacity.pop(store) { + counts.transition(stream, |_, stream| { + trace!("clear_pending_capacity; stream={:?}", stream.id); + }) + } + } + + pub fn assign_connection_capacity( + &mut self, + inc: WindowSize, + store: &mut R, + counts: &mut Counts) + where + R: Resolve, + { + trace!("assign_connection_capacity; inc={}", inc); + + self.flow.assign_capacity(inc); + + // Assign newly acquired capacity to streams pending capacity. + while self.flow.available() > 0 { + let stream = match self.pending_capacity.pop(store) { + Some(stream) => stream, + None => return, + }; + + counts.transition(stream, |_, mut stream| { + // Try to assign capacity to the stream. This will also re-queue the + // stream if there isn't enough connection level capacity to fulfill + // the capacity request. + self.try_assign_capacity(&mut stream); + }) + } + } + + /// Request capacity to send data + fn try_assign_capacity(&mut self, stream: &mut store::Ptr) { + let total_requested = stream.requested_send_capacity; + + // Total requested should never go below actual assigned + // (Note: the window size can go lower than assigned) + debug_assert!(total_requested >= stream.send_flow.available()); + + // The amount of additional capacity that the stream requests. + // Don't assign more than the window has available! + let additional = cmp::min( + total_requested - stream.send_flow.available().as_size(), + // Can't assign more than what is available + stream.send_flow.window_size() - stream.send_flow.available().as_size(), + ); + + trace!( + "try_assign_capacity; requested={}; additional={}; buffered={}; window={}; conn={}", + total_requested, + additional, + stream.buffered_send_data, + stream.send_flow.window_size(), + self.flow.available() + ); + + if additional == 0 { + // Nothing more to do + return; + } + + // If the stream has requested capacity, then it must be in the + // streaming state (more data could be sent) or there is buffered data + // waiting to be sent. + debug_assert!( + stream.state.is_send_streaming() || stream.buffered_send_data > 0, + "state={:?}", + stream.state + ); + + // The amount of currently available capacity on the connection + let conn_available = self.flow.available().as_size(); + + // First check if capacity is immediately available + if conn_available > 0 { + // The amount of capacity to assign to the stream + // TODO: Should prioritization factor into this? + let assign = cmp::min(conn_available, additional); + + trace!(" assigning; num={}", assign); + + // Assign the capacity to the stream + stream.assign_capacity(assign); + + // Claim the capacity from the connection + self.flow.claim_capacity(assign); + } + + trace!( + "try_assign_capacity; available={}; requested={}; buffered={}; has_unavailable={:?}", + stream.send_flow.available(), + stream.requested_send_capacity, + stream.buffered_send_data, + stream.send_flow.has_unavailable() + ); + + if stream.send_flow.available() < stream.requested_send_capacity { + if stream.send_flow.has_unavailable() { + // The stream requires additional capacity and the stream's + // window has available capacity, but the connection window + // does not. + // + // In this case, the stream needs to be queued up for when the + // connection has more capacity. + self.pending_capacity.push(stream); + } + } + + // If data is buffered and the stream is not pending open, then + // schedule the stream for execution + // + // Why do we not push into pending_send when the stream is in pending_open? + // + // We allow users to call send_request() which schedules a stream to be pending_open + // if there is no room according to the concurrency limit (max_send_streams), and we + // also allow data to be buffered for send with send_data() if there is no capacity for + // the stream to send the data, which attempts to place the stream in pending_send. + // If the stream is not open, we don't want the stream to be scheduled for + // execution (pending_send). Note that if the stream is in pending_open, it will be + // pushed to pending_send when there is room for an open stream. + if stream.buffered_send_data > 0 && !stream.is_pending_open { + // TODO: This assertion isn't *exactly* correct. There can still be + // buffered send data while the stream's pending send queue is + // empty. This can happen when a large data frame is in the process + // of being **partially** sent. Once the window has been sent, the + // data frame will be returned to the prioritization layer to be + // re-scheduled. + // + // That said, it would be nice to figure out how to make this + // assertion correctly. + // + // debug_assert!(!stream.pending_send.is_empty()); + + self.pending_send.push(stream); + } + } + + pub fn poll_complete( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + counts: &mut Counts, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + // Ensure codec is ready + try_ready!(dst.poll_ready()); + + // Reclaim any frame that has previously been written + self.reclaim_frame(buffer, store, dst); + + // The max frame length + let max_frame_len = dst.max_send_frame_size(); + + trace!("poll_complete"); + + loop { + self.schedule_pending_open(store, counts); + + match self.pop_frame(buffer, store, max_frame_len, counts) { + Some(frame) => { + trace!("writing frame={:?}", frame); + + debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); + if let Frame::Data(ref frame) = frame { + self.in_flight_data_frame = InFlightData::DataFrame(frame.payload().stream); + } + dst.buffer(frame).ok().expect("invalid frame"); + + // Ensure the codec is ready to try the loop again. + try_ready!(dst.poll_ready()); + + // Because, always try to reclaim... + self.reclaim_frame(buffer, store, dst); + }, + None => { + // Try to flush the codec. + try_ready!(dst.flush()); + + // This might release a data frame... + if !self.reclaim_frame(buffer, store, dst) { + return Ok(().into()); + } + + // No need to poll ready as poll_complete() does this for + // us... + }, + } + } + } + + /// Tries to reclaim a pending data frame from the codec. + /// + /// Returns true if a frame was reclaimed. + /// + /// When a data frame is written to the codec, it may not be written in its + /// entirety (large chunks are split up into potentially many data frames). + /// In this case, the stream needs to be reprioritized. + fn reclaim_frame( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + dst: &mut Codec>, + ) -> bool + where + B: Buf, + { + trace!("try reclaim frame"); + + // First check if there are any data chunks to take back + if let Some(frame) = dst.take_last_data_frame() { + trace!( + " -> reclaimed; frame={:?}; sz={}", + frame, + frame.payload().inner.get_ref().remaining() + ); + + let mut eos = false; + let key = frame.payload().stream; + + match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { + InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), + InFlightData::Drop => { + trace!("not reclaiming frame for cancelled stream"); + return false; + } + InFlightData::DataFrame(k) => { + debug_assert_eq!(k, key); + } + } + + let mut frame = frame.map(|prioritized| { + // TODO: Ensure fully written + eos = prioritized.end_of_stream; + prioritized.inner.into_inner() + }); + + if frame.payload().has_remaining() { + let mut stream = store.resolve(key); + + if eos { + frame.set_end_stream(true); + } + + self.push_back_frame(frame.into(), buffer, &mut stream); + + return true; + } + } + + false + } + + /// Push the frame to the front of the stream's deque, scheduling the + /// stream if needed. + fn push_back_frame(&mut self, + frame: Frame, + buffer: &mut Buffer>, + stream: &mut store::Ptr) + { + // Push the frame to the front of the stream's deque + stream.pending_send.push_front(buffer, frame); + + // If needed, schedule the sender + if stream.send_flow.available() > 0 { + debug_assert!(!stream.pending_send.is_empty()); + self.pending_send.push(stream); + } + } + + pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { + trace!("clear_queue; stream-id={:?}", stream.id); + + // TODO: make this more efficient? + while let Some(frame) = stream.pending_send.pop_front(buffer) { + trace!("dropping; frame={:?}", frame); + } + + stream.buffered_send_data = 0; + stream.requested_send_capacity = 0; + if let InFlightData::DataFrame(key) = self.in_flight_data_frame { + if stream.key() == key { + // This stream could get cleaned up now - don't allow the buffered frame to get reclaimed. + self.in_flight_data_frame = InFlightData::Drop; + } + } + } + + pub fn clear_pending_send(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_send.pop(store) { + let is_pending_reset = stream.is_pending_reset_expiration(); + counts.transition_after(stream, is_pending_reset); + } + } + + pub fn clear_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_open.pop(store) { + let is_pending_reset = stream.is_pending_reset_expiration(); + counts.transition_after(stream, is_pending_reset); + } + } + + fn pop_frame( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + max_len: usize, + counts: &mut Counts, + ) -> Option>> + where + B: Buf, + { + trace!("pop_frame"); + + loop { + match self.pending_send.pop(store) { + Some(mut stream) => { + trace!("pop_frame; stream={:?}; stream.state={:?}", + stream.id, stream.state); + + // It's possible that this stream, besides having data to send, + // is also queued to send a reset, and thus is already in the queue + // to wait for "some time" after a reset. + // + // To be safe, we just always ask the stream. + let is_pending_reset = stream.is_pending_reset_expiration(); + + trace!(" --> stream={:?}; is_pending_reset={:?};", + stream.id, is_pending_reset); + + let frame = match stream.pending_send.pop_front(buffer) { + Some(Frame::Data(mut frame)) => { + // Get the amount of capacity remaining for stream's + // window. + let stream_capacity = stream.send_flow.available(); + let sz = frame.payload().remaining(); + + trace!( + " --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \ + available={}; requested={}; buffered={};", + frame.stream_id(), + sz, + frame.is_end_stream(), + stream_capacity, + stream.send_flow.available(), + stream.requested_send_capacity, + stream.buffered_send_data, + ); + + // Zero length data frames always have capacity to + // be sent. + if sz > 0 && stream_capacity == 0 { + trace!( + " --> stream capacity is 0; requested={}", + stream.requested_send_capacity + ); + + // Ensure that the stream is waiting for + // connection level capacity + // + // TODO: uncomment + // debug_assert!(stream.is_pending_send_capacity); + + // The stream has no more capacity, this can + // happen if the remote reduced the stream + // window. In this case, we need to buffer the + // frame and wait for a window update... + stream + .pending_send + .push_front(buffer, frame.into()); + + continue; + } + + // Only send up to the max frame length + let len = cmp::min(sz, max_len); + + // Only send up to the stream's window capacity + let len = cmp::min(len, stream_capacity.as_size() as usize) as WindowSize; + + // There *must* be be enough connection level + // capacity at this point. + debug_assert!(len <= self.flow.window_size()); + + trace!(" --> sending data frame; len={}", len); + + // Update the flow control + trace!(" -- updating stream flow --"); + stream.send_flow.send_data(len); + + // Decrement the stream's buffered data counter + debug_assert!(stream.buffered_send_data >= len); + stream.buffered_send_data -= len; + stream.requested_send_capacity -= len; + + // Assign the capacity back to the connection that + // was just consumed from the stream in the previous + // line. + self.flow.assign_capacity(len); + + trace!(" -- updating connection flow --"); + self.flow.send_data(len); + + // Wrap the frame's data payload to ensure that the + // correct amount of data gets written. + + let eos = frame.is_end_stream(); + let len = len as usize; + + if frame.payload().remaining() > len { + frame.set_end_stream(false); + } + + Frame::Data(frame.map(|buf| { + Prioritized { + inner: buf.take(len), + end_of_stream: eos, + stream: stream.key(), + } + })) + }, + Some(frame) => frame.map(|_| + unreachable!( + "Frame::map closure will only be called \ + on DATA frames." + ) + ), + None => { + if let Some(reason) = stream.state.get_scheduled_reset() { + stream.state.set_reset(reason); + + let frame = frame::Reset::new(stream.id, reason); + Frame::Reset(frame) + } else { + // If the stream receives a RESET from the peer, it may have + // had data buffered to be sent, but all the frames are cleared + // in clear_queue(). Instead of doing O(N) traversal through queue + // to remove, lets just ignore the stream here. + trace!("removing dangling stream from pending_send"); + // Since this should only happen as a consequence of `clear_queue`, + // we must be in a closed state of some kind. + debug_assert!(stream.state.is_closed()); + counts.transition_after(stream, is_pending_reset); + continue; + } + } + }; + + trace!("pop_frame; frame={:?}", frame); + + if cfg!(debug_assertions) && stream.state.is_idle() { + debug_assert!(stream.id > self.last_opened_id); + self.last_opened_id = stream.id; + } + + if !stream.pending_send.is_empty() || stream.state.is_scheduled_reset() { + // TODO: Only requeue the sender IF it is ready to send + // the next frame. i.e. don't requeue it if the next + // frame is a data frame and the stream does not have + // any more capacity. + self.pending_send.push(&mut stream); + } + + counts.transition_after(stream, is_pending_reset); + + return Some(frame); + }, + None => return None, + } + } + } + + fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { + trace!("schedule_pending_open"); + // check for any pending open streams + while counts.can_inc_num_send_streams() { + if let Some(mut stream) = self.pending_open.pop(store) { + trace!("schedule_pending_open; stream={:?}", stream.id); + + counts.inc_num_send_streams(&mut stream); + self.pending_send.push(&mut stream); + stream.notify_send(); + } else { + return; + } + } + } +} + +// ===== impl Prioritized ===== + +impl Buf for Prioritized +where + B: Buf, +{ + fn remaining(&self) -> usize { + self.inner.remaining() + } + + fn bytes(&self) -> &[u8] { + self.inner.bytes() + } + + fn advance(&mut self, cnt: usize) { + self.inner.advance(cnt) + } +} + +impl fmt::Debug for Prioritized { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Prioritized") + .field("remaining", &self.inner.get_ref().remaining()) + .field("end_of_stream", &self.end_of_stream) + .field("stream", &self.stream) + .finish() + } +} diff --git a/third_party/rust/h2/src/proto/streams/recv.rs b/third_party/rust/h2/src/proto/streams/recv.rs new file mode 100644 index 000000000000..83e49a4f58c3 --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/recv.rs @@ -0,0 +1,952 @@ +use super::*; +use {frame, proto}; +use codec::{RecvError, UserError}; +use frame::{Reason, DEFAULT_INITIAL_WINDOW_SIZE}; + +use http::HeaderMap; + +use std::io; +use std::time::{Duration, Instant}; + +#[derive(Debug)] +pub(super) struct Recv { + /// Initial window size of remote initiated streams + init_window_sz: WindowSize, + + /// Connection level flow control governing received data + flow: FlowControl, + + /// Amount of connection window capacity currently used by outstanding streams. + in_flight_data: WindowSize, + + /// The lowest stream ID that is still idle + next_stream_id: Result, + + /// The stream ID of the last processed stream + last_processed_id: StreamId, + + /// Any streams with a higher ID are ignored. + /// + /// This starts as MAX, but is lowered when a GOAWAY is received. + /// + /// > After sending a GOAWAY frame, the sender can discard frames for + /// > streams initiated by the receiver with identifiers higher than + /// > the identified last stream. + max_stream_id: StreamId, + + /// Streams that have pending window updates + pending_window_updates: store::Queue, + + /// New streams to be accepted + pending_accept: store::Queue, + + /// Locally reset streams that should be reaped when they expire + pending_reset_expired: store::Queue, + + /// How long locally reset streams should ignore received frames + reset_duration: Duration, + + /// Holds frames that are waiting to be read + buffer: Buffer, + + /// Refused StreamId, this represents a frame that must be sent out. + refused: Option, + + /// If push promises are allowed to be recevied. + is_push_enabled: bool, +} + +#[derive(Debug)] +pub(super) enum Event { + Headers(peer::PollMessage), + Data(Bytes), + Trailers(HeaderMap), +} + +#[derive(Debug)] +pub(super) enum RecvHeaderBlockError { + Oversize(T), + State(RecvError), +} + +#[derive(Debug)] +pub(crate) enum Open { + PushPromise, + Headers, +} + +#[derive(Debug, Clone, Copy)] +struct Indices { + head: store::Key, + tail: store::Key, +} + +impl Recv { + pub fn new(peer: peer::Dyn, config: &Config) -> Self { + let next_stream_id = if peer.is_server() { 1 } else { 2 }; + + let mut flow = FlowControl::new(); + + // connections always have the default window size, regardless of + // settings + flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE) + .expect("invalid initial remote window size"); + flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE); + + Recv { + init_window_sz: config.local_init_window_sz, + flow: flow, + in_flight_data: 0 as WindowSize, + next_stream_id: Ok(next_stream_id.into()), + pending_window_updates: store::Queue::new(), + last_processed_id: StreamId::ZERO, + max_stream_id: StreamId::MAX, + pending_accept: store::Queue::new(), + pending_reset_expired: store::Queue::new(), + reset_duration: config.local_reset_duration, + buffer: Buffer::new(), + refused: None, + is_push_enabled: config.local_push_enabled, + } + } + + /// Returns the initial receive window size + pub fn init_window_sz(&self) -> WindowSize { + self.init_window_sz + } + + /// Returns the ID of the last processed stream + pub fn last_processed_id(&self) -> StreamId { + self.last_processed_id + } + + /// Update state reflecting a new, remotely opened stream + /// + /// Returns the stream state if successful. `None` if refused + pub fn open( + &mut self, + id: StreamId, + mode: Open, + counts: &mut Counts, + ) -> Result, RecvError> { + assert!(self.refused.is_none()); + + counts.peer().ensure_can_open(id, mode)?; + + let next_id = self.next_stream_id()?; + if id < next_id { + trace!("id ({:?}) < next_id ({:?}), PROTOCOL_ERROR", id, next_id); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + self.next_stream_id = id.next_id(); + + if !counts.can_inc_num_recv_streams() { + self.refused = Some(id); + return Ok(None); + } + + Ok(Some(id)) + } + + /// Transition the stream state based on receiving headers + /// + /// The caller ensures that the frame represents headers and not trailers. + pub fn recv_headers( + &mut self, + frame: frame::Headers, + stream: &mut store::Ptr, + counts: &mut Counts, + ) -> Result<(), RecvHeaderBlockError>> { + trace!("opening stream; init_window={}", self.init_window_sz); + let is_initial = stream.state.recv_open(frame.is_end_stream())?; + + if is_initial { + // TODO: be smarter about this logic + if frame.stream_id() > self.last_processed_id { + self.last_processed_id = frame.stream_id(); + } + + // Increment the number of concurrent streams + counts.inc_num_recv_streams(stream); + } + + if !stream.content_length.is_head() { + use super::stream::ContentLength; + use http::header; + + if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) { + let content_length = match parse_u64(content_length.as_bytes()) { + Ok(v) => v, + Err(_) => { + return Err(RecvError::Stream { + id: stream.id, + reason: Reason::PROTOCOL_ERROR, + }.into()) + }, + }; + + stream.content_length = ContentLength::Remaining(content_length); + } + } + + if frame.is_over_size() { + // A frame is over size if the decoded header block was bigger than + // SETTINGS_MAX_HEADER_LIST_SIZE. + // + // > A server that receives a larger header block than it is willing + // > to handle can send an HTTP 431 (Request Header Fields Too + // > Large) status code [RFC6585]. A client can discard responses + // > that it cannot process. + // + // So, if peer is a server, we'll send a 431. In either case, + // an error is recorded, which will send a REFUSED_STREAM, + // since we don't want any of the data frames either. + trace!("recv_headers; frame for {:?} is over size", stream.id); + return if counts.peer().is_server() && is_initial { + let mut res = frame::Headers::new( + stream.id, + frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE), + HeaderMap::new() + ); + res.set_end_stream(); + Err(RecvHeaderBlockError::Oversize(Some(res))) + } else { + Err(RecvHeaderBlockError::Oversize(None)) + }; + } + + let message = counts.peer().convert_poll_message(frame)?; + + // Push the frame onto the stream's recv buffer + stream + .pending_recv + .push_back(&mut self.buffer, Event::Headers(message)); + stream.notify_recv(); + + // Only servers can receive a headers frame that initiates the stream. + // This is verified in `Streams` before calling this function. + if counts.peer().is_server() { + self.pending_accept.push(stream); + } + + Ok(()) + } + + /// Called by the server to get the request + /// + /// TODO: Should this fn return `Result`? + pub fn take_request(&mut self, stream: &mut store::Ptr) + -> Request<()> + { + use super::peer::PollMessage::*; + + match stream.pending_recv.pop_front(&mut self.buffer) { + Some(Event::Headers(Server(request))) => request, + _ => panic!(), + } + } + + /// Called by the client to get the response + pub fn poll_response( + &mut self, + stream: &mut store::Ptr, + ) -> Poll, proto::Error> { + use super::peer::PollMessage::*; + + // If the buffer is not empty, then the first frame must be a HEADERS + // frame or the user violated the contract. + match stream.pending_recv.pop_front(&mut self.buffer) { + Some(Event::Headers(Client(response))) => Ok(response.into()), + Some(_) => panic!("poll_response called after response returned"), + None => { + stream.state.ensure_recv_open()?; + + stream.recv_task = Some(task::current()); + Ok(Async::NotReady) + }, + } + } + + /// Transition the stream based on receiving trailers + pub fn recv_trailers( + &mut self, + frame: frame::Headers, + stream: &mut store::Ptr, + ) -> Result<(), RecvError> { + // Transition the state + stream.state.recv_close()?; + + if stream.ensure_content_length_zero().is_err() { + return Err(RecvError::Stream { + id: stream.id, + reason: Reason::PROTOCOL_ERROR, + }); + } + + let trailers = frame.into_fields(); + + // Push the frame onto the stream's recv buffer + stream + .pending_recv + .push_back(&mut self.buffer, Event::Trailers(trailers)); + stream.notify_recv(); + + Ok(()) + } + + /// Releases capacity of the connection + pub fn release_connection_capacity( + &mut self, + capacity: WindowSize, + task: &mut Option, + ) { + trace!("release_connection_capacity; size={}", capacity); + + // Decrement in-flight data + self.in_flight_data -= capacity; + + // Assign capacity to connection + self.flow.assign_capacity(capacity); + + if self.flow.unclaimed_capacity().is_some() { + if let Some(task) = task.take() { + task.notify(); + } + } + } + + /// Releases capacity back to the connection & stream + pub fn release_capacity( + &mut self, + capacity: WindowSize, + stream: &mut store::Ptr, + task: &mut Option, + ) -> Result<(), UserError> { + trace!("release_capacity; size={}", capacity); + + if capacity > stream.in_flight_recv_data { + return Err(UserError::ReleaseCapacityTooBig); + } + + self.release_connection_capacity(capacity, task); + + // Decrement in-flight data + stream.in_flight_recv_data -= capacity; + + // Assign capacity to stream + stream.recv_flow.assign_capacity(capacity); + + + if stream.recv_flow.unclaimed_capacity().is_some() { + // Queue the stream for sending the WINDOW_UPDATE frame. + self.pending_window_updates.push(stream); + + if let Some(task) = task.take() { + task.notify(); + } + } + + Ok(()) + } + + /// Set the "target" connection window size. + /// + /// By default, all new connections start with 64kb of window size. As + /// streams used and release capacity, we will send WINDOW_UPDATEs for the + /// connection to bring it back up to the initial "target". + /// + /// Setting a target means that we will try to tell the peer about + /// WINDOW_UPDATEs so the peer knows it has about `target` window to use + /// for the whole connection. + /// + /// The `task` is an optional parked task for the `Connection` that might + /// be blocked on needing more window capacity. + pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { + trace!( + "set_target_connection_window; target={}; available={}, reserved={}", + target, + self.flow.available(), + self.in_flight_data, + ); + + // The current target connection window is our `available` plus any + // in-flight data reserved by streams. + // + // Update the flow controller with the difference between the new + // target and the current target. + let current = (self.flow.available() + self.in_flight_data).checked_size(); + if target > current { + self.flow.assign_capacity(target - current); + } else { + self.flow.claim_capacity(current - target); + } + + // If changing the target capacity means we gained a bunch of capacity, + // enough that we went over the update threshold, then schedule sending + // a connection WINDOW_UPDATE. + if self.flow.unclaimed_capacity().is_some() { + if let Some(task) = task.take() { + task.notify(); + } + } + } + + pub fn body_is_empty(&self, stream: &store::Ptr) -> bool { + if !stream.state.is_recv_closed() { + return false; + } + + stream + .pending_recv + .peek_front(&self.buffer) + .map(|event| !event.is_data()) + .unwrap_or(true) + } + + pub fn is_end_stream(&self, stream: &store::Ptr) -> bool { + if !stream.state.is_recv_closed() { + return false; + } + + stream + .pending_recv + .is_empty() + } + + pub fn recv_data( + &mut self, + frame: frame::Data, + stream: &mut store::Ptr, + ) -> Result<(), RecvError> { + let sz = frame.payload().len(); + + // This should have been enforced at the codec::FramedRead layer, so + // this is just a sanity check. + assert!(sz <= MAX_WINDOW_SIZE as usize); + + let sz = sz as WindowSize; + + let is_ignoring_frame = stream.state.is_local_reset(); + + if !is_ignoring_frame && !stream.state.is_recv_streaming() { + // TODO: There are cases where this can be a stream error of + // STREAM_CLOSED instead... + + // Receiving a DATA frame when not expecting one is a protocol + // error. + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + trace!( + "recv_data; size={}; connection={}; stream={}", + sz, + self.flow.window_size(), + stream.recv_flow.window_size() + ); + + // Ensure that there is enough capacity on the connection before acting + // on the stream. + self.consume_connection_window(sz)?; + + if is_ignoring_frame { + trace!( + "recv_data frame ignored on locally reset {:?} for some time", + stream.id, + ); + // we just checked for enough connection window capacity, and + // consumed it. Since we are ignoring this frame "for some time", + // we aren't returning the frame to the user. That means they + // have no way to release the capacity back to the connection. So + // we have to release it automatically. + // + // This call doesn't send a WINDOW_UPDATE immediately, just marks + // the capacity as available to be reclaimed. When the available + // capacity meets a threshold, a WINDOW_UPDATE is then sent. + self.release_connection_capacity(sz, &mut None); + return Ok(()); + } + + if stream.recv_flow.window_size() < sz { + // http://httpwg.org/specs/rfc7540.html#WINDOW_UPDATE + // > A receiver MAY respond with a stream error (Section 5.4.2) or + // > connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR if + // > it is unable to accept a frame. + // + // So, for violating the **stream** window, we can send either a + // stream or connection error. We've opted to send a stream + // error. + return Err(RecvError::Stream { + id: stream.id, + reason: Reason::FLOW_CONTROL_ERROR, + }); + } + + // Update stream level flow control + stream.recv_flow.send_data(sz); + + // Track the data as in-flight + stream.in_flight_recv_data += sz; + + if stream.dec_content_length(frame.payload().len()).is_err() { + trace!("content-length overflow"); + return Err(RecvError::Stream { + id: stream.id, + reason: Reason::PROTOCOL_ERROR, + }); + } + + if frame.is_end_stream() { + if stream.ensure_content_length_zero().is_err() { + trace!("content-length underflow"); + return Err(RecvError::Stream { + id: stream.id, + reason: Reason::PROTOCOL_ERROR, + }); + } + + if stream.state.recv_close().is_err() { + trace!("failed to transition to closed state"); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + } + + let event = Event::Data(frame.into_payload()); + + // Push the frame onto the recv buffer + stream.pending_recv.push_back(&mut self.buffer, event); + stream.notify_recv(); + + Ok(()) + } + + pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> { + if self.flow.window_size() < sz { + return Err(RecvError::Connection(Reason::FLOW_CONTROL_ERROR)); + } + + // Update connection level flow control + self.flow.send_data(sz); + + // Track the data as in-flight + self.in_flight_data += sz; + Ok(()) + } + + pub fn recv_push_promise( + &mut self, + frame: frame::PushPromise, + stream: &mut store::Ptr, + ) -> Result<(), RecvError> { + + // TODO: Streams in the reserved states do not count towards the concurrency + // limit. However, it seems like there should be a cap otherwise this + // could grow in memory indefinitely. + + stream.state.reserve_remote()?; + + if frame.is_over_size() { + // A frame is over size if the decoded header block was bigger than + // SETTINGS_MAX_HEADER_LIST_SIZE. + // + // > A server that receives a larger header block than it is willing + // > to handle can send an HTTP 431 (Request Header Fields Too + // > Large) status code [RFC6585]. A client can discard responses + // > that it cannot process. + // + // So, if peer is a server, we'll send a 431. In either case, + // an error is recorded, which will send a REFUSED_STREAM, + // since we don't want any of the data frames either. + trace!("recv_push_promise; frame for {:?} is over size", frame.promised_id()); + return Err(RecvError::Stream { + id: frame.promised_id(), + reason: Reason::REFUSED_STREAM, + }); + } + + Ok(()) + } + + /// Ensures that `id` is not in the `Idle` state. + pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { + if let Ok(next) = self.next_stream_id { + if id >= next { + trace!("stream ID implicitly closed"); + return Err(Reason::PROTOCOL_ERROR); + } + } + // if next_stream_id is overflowed, that's ok. + + Ok(()) + } + + /// Handle remote sending an explicit RST_STREAM. + pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { + // Notify the stream + stream.state.recv_reset(frame.reason(), stream.is_pending_send); + + stream.notify_send(); + stream.notify_recv(); + } + + /// Handle a received error + pub fn recv_err(&mut self, err: &proto::Error, stream: &mut Stream) { + // Receive an error + stream.state.recv_err(err); + + // If a receiver is waiting, notify it + stream.notify_send(); + stream.notify_recv(); + } + + pub fn go_away(&mut self, last_processed_id: StreamId) { + assert!(self.max_stream_id >= last_processed_id); + self.max_stream_id = last_processed_id; + } + + pub fn recv_eof(&mut self, stream: &mut Stream) { + stream.state.recv_eof(); + stream.notify_send(); + stream.notify_recv(); + } + + /// Get the max ID of streams we can receive. + /// + /// This gets lowered if we send a GOAWAY frame. + pub fn max_stream_id(&self) -> StreamId { + self.max_stream_id + } + + fn next_stream_id(&self) -> Result { + if let Ok(id) = self.next_stream_id { + Ok(id) + } else { + Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + } + } + + /// Returns true if the remote peer can reserve a stream with the given ID. + pub fn ensure_can_reserve(&self) + -> Result<(), RecvError> + { + if !self.is_push_enabled { + trace!("recv_push_promise; error push is disabled"); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + Ok(()) + } + + /// Add a locally reset stream to queue to be eventually reaped. + pub fn enqueue_reset_expiration( + &mut self, + stream: &mut store::Ptr, + counts: &mut Counts, + ) { + if !stream.state.is_local_reset() || stream.is_pending_reset_expiration() { + return; + } + + trace!("enqueue_reset_expiration; {:?}", stream.id); + + if !counts.can_inc_num_reset_streams() { + // try to evict 1 stream if possible + // if max allow is 0, this won't be able to evict, + // and then we'll just bail after + if let Some(evicted) = self.pending_reset_expired.pop(stream.store_mut()) { + counts.transition_after(evicted, true); + } + } + + if counts.can_inc_num_reset_streams() { + counts.inc_num_reset_streams(); + self.pending_reset_expired.push(stream); + } + } + + /// Send any pending refusals. + pub fn send_pending_refusal( + &mut self, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + if let Some(stream_id) = self.refused { + try_ready!(dst.poll_ready()); + + // Create the RST_STREAM frame + let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM); + + // Buffer the frame + dst.buffer(frame.into()) + .ok() + .expect("invalid RST_STREAM frame"); + } + + self.refused = None; + + Ok(Async::Ready(())) + } + + pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { + let now = Instant::now(); + let reset_duration = self.reset_duration; + while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { + let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); + now - reset_at > reset_duration + }) { + counts.transition_after(stream, true); + } + } + + pub fn clear_queues(&mut self, + clear_pending_accept: bool, + store: &mut Store, + counts: &mut Counts) + { + self.clear_stream_window_update_queue(store, counts); + self.clear_all_reset_streams(store, counts); + + if clear_pending_accept { + self.clear_all_pending_accept(store, counts); + } + } + + fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_window_updates.pop(store) { + counts.transition(stream, |_, stream| { + trace!("clear_stream_window_update_queue; stream={:?}", stream.id); + }) + } + } + + /// Called on EOF + fn clear_all_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_reset_expired.pop(store) { + counts.transition_after(stream, true); + } + } + + fn clear_all_pending_accept(&mut self, store: &mut Store, counts: &mut Counts) { + while let Some(stream) = self.pending_accept.pop(store) { + counts.transition_after(stream, false); + } + } + + pub fn poll_complete( + &mut self, + store: &mut Store, + counts: &mut Counts, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + // Send any pending connection level window updates + try_ready!(self.send_connection_window_update(dst)); + + // Send any pending stream level window updates + try_ready!(self.send_stream_window_updates(store, counts, dst)); + + Ok(().into()) + } + + /// Send connection level window update + fn send_connection_window_update( + &mut self, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + if let Some(incr) = self.flow.unclaimed_capacity() { + let frame = frame::WindowUpdate::new(StreamId::zero(), incr); + + // Ensure the codec has capacity + try_ready!(dst.poll_ready()); + + // Buffer the WINDOW_UPDATE frame + dst.buffer(frame.into()) + .ok() + .expect("invalid WINDOW_UPDATE frame"); + + // Update flow control + self.flow + .inc_window(incr) + .ok() + .expect("unexpected flow control state"); + } + + Ok(().into()) + } + + /// Send stream level window update + pub fn send_stream_window_updates( + &mut self, + store: &mut Store, + counts: &mut Counts, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + B: Buf, + { + loop { + // Ensure the codec has capacity + try_ready!(dst.poll_ready()); + + // Get the next stream + let stream = match self.pending_window_updates.pop(store) { + Some(stream) => stream, + None => return Ok(().into()), + }; + + counts.transition(stream, |_, stream| { + trace!("pending_window_updates -- pop; stream={:?}", stream.id); + debug_assert!(!stream.is_pending_window_update); + + if !stream.state.is_recv_streaming() { + // No need to send window updates on the stream if the stream is + // no longer receiving data. + // + // TODO: is this correct? We could possibly send a window + // update on a ReservedRemote stream if we already know + // we want to stream the data faster... + return; + } + + // TODO: de-dup + if let Some(incr) = stream.recv_flow.unclaimed_capacity() { + // Create the WINDOW_UPDATE frame + let frame = frame::WindowUpdate::new(stream.id, incr); + + // Buffer it + dst.buffer(frame.into()) + .ok() + .expect("invalid WINDOW_UPDATE frame"); + + // Update flow control + stream + .recv_flow + .inc_window(incr) + .ok() + .expect("unexpected flow control state"); + } + }) + } + } + + pub fn next_incoming(&mut self, store: &mut Store) -> Option { + self.pending_accept.pop(store).map(|ptr| ptr.key()) + } + + pub fn poll_data(&mut self, stream: &mut Stream) -> Poll, proto::Error> { + // TODO: Return error when the stream is reset + match stream.pending_recv.pop_front(&mut self.buffer) { + Some(Event::Data(payload)) => Ok(Some(payload).into()), + Some(event) => { + // Frame is trailer + stream.pending_recv.push_front(&mut self.buffer, event); + + // Notify the recv task. This is done just in case + // `poll_trailers` was called. + // + // It is very likely that `notify_recv` will just be a no-op (as + // the task will be None), so this isn't really much of a + // performance concern. It also means we don't have to track + // state to see if `poll_trailers` was called before `poll_data` + // returned `None`. + stream.notify_recv(); + + // No more data frames + Ok(None.into()) + }, + None => self.schedule_recv(stream), + } + } + + pub fn poll_trailers( + &mut self, + stream: &mut Stream, + ) -> Poll, proto::Error> { + match stream.pending_recv.pop_front(&mut self.buffer) { + Some(Event::Trailers(trailers)) => Ok(Some(trailers).into()), + Some(event) => { + // Frame is not trailers.. not ready to poll trailers yet. + stream.pending_recv.push_front(&mut self.buffer, event); + + Ok(Async::NotReady) + }, + None => self.schedule_recv(stream), + } + } + + fn schedule_recv(&mut self, stream: &mut Stream) -> Poll, proto::Error> { + if stream.state.ensure_recv_open()? { + // Request to get notified once more frames arrive + stream.recv_task = Some(task::current()); + Ok(Async::NotReady) + } else { + // No more frames will be received + Ok(None.into()) + } + } +} + +// ===== impl Event ===== + +impl Event { + fn is_data(&self) -> bool { + match *self { + Event::Data(..) => true, + _ => false, + } + } +} + +// ===== impl Open ===== + +impl Open { + pub fn is_push_promise(&self) -> bool { + use self::Open::*; + + match *self { + PushPromise => true, + _ => false, + } + } +} + +// ===== impl RecvHeaderBlockError ===== + +impl From for RecvHeaderBlockError { + fn from(err: RecvError) -> Self { + RecvHeaderBlockError::State(err) + } +} + +// ===== util ===== + +fn parse_u64(src: &[u8]) -> Result { + if src.len() > 19 { + // At danger for overflow... + return Err(()); + } + + let mut ret = 0; + + for &d in src { + if d < b'0' || d > b'9' { + return Err(()); + } + + ret *= 10; + ret += (d - b'0') as u64; + } + + Ok(ret) +} diff --git a/third_party/rust/h2/src/proto/streams/send.rs b/third_party/rust/h2/src/proto/streams/send.rs new file mode 100644 index 000000000000..06eb02c13ad2 --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/send.rs @@ -0,0 +1,451 @@ +use codec::{RecvError, UserError}; +use frame::{self, Reason}; +use super::{ + store, Buffer, Codec, Config, Counts, Frame, Prioritize, + Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize, +}; + +use bytes::Buf; +use http; +use futures::{Async, Poll}; +use futures::task::Task; +use tokio_io::AsyncWrite; + +use std::io; + +/// Manages state transitions related to outbound frames. +#[derive(Debug)] +pub(super) struct Send { + /// Stream identifier to use for next initialized stream. + next_stream_id: Result, + + /// Initial window size of locally initiated streams + init_window_sz: WindowSize, + + /// Prioritization layer + prioritize: Prioritize, +} + +/// A value to detect which public API has called `poll_reset`. +#[derive(Debug)] +pub(crate) enum PollReset { + AwaitingHeaders, + Streaming, +} + +impl Send { + /// Create a new `Send` + pub fn new(config: &Config) -> Self { + Send { + init_window_sz: config.remote_init_window_sz, + next_stream_id: Ok(config.local_next_stream_id), + prioritize: Prioritize::new(config), + } + } + + /// Returns the initial send window size + pub fn init_window_sz(&self) -> WindowSize { + self.init_window_sz + } + + pub fn open(&mut self) -> Result { + let stream_id = self.ensure_next_stream_id()?; + self.next_stream_id = stream_id.next_id(); + Ok(stream_id) + } + + pub fn send_headers( + &mut self, + frame: frame::Headers, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), UserError> { + trace!( + "send_headers; frame={:?}; init_window={:?}", + frame, + self.init_window_sz + ); + + // 8.1.2.2. Connection-Specific Header Fields + if frame.fields().contains_key(http::header::CONNECTION) + || frame.fields().contains_key(http::header::TRANSFER_ENCODING) + || frame.fields().contains_key(http::header::UPGRADE) + || frame.fields().contains_key("keep-alive") + || frame.fields().contains_key("proxy-connection") + { + debug!("illegal connection-specific headers found"); + return Err(UserError::MalformedHeaders); + } else if let Some(te) = frame.fields().get(http::header::TE) { + if te != "trailers" { + debug!("illegal connection-specific headers found"); + return Err(UserError::MalformedHeaders); + + } + } + + let end_stream = frame.is_end_stream(); + + // Update the state + stream.state.send_open(end_stream)?; + + if counts.peer().is_local_init(frame.stream_id()) { + if counts.can_inc_num_send_streams() { + counts.inc_num_send_streams(stream); + } else { + self.prioritize.queue_open(stream); + } + } + + // Queue the frame for sending + self.prioritize.queue_frame(frame.into(), buffer, stream, task); + + Ok(()) + } + + /// Send an explicit RST_STREAM frame + /// + /// # Arguments + /// + `reason`: the error code for the RST_STREAM frame + /// + `clear_queue`: if true, all pending outbound frames will be cleared, + /// if false, the RST_STREAM frame will be appended to the end of the + /// send queue. + pub fn send_reset( + &mut self, + reason: Reason, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) { + let is_reset = stream.state.is_reset(); + let is_closed = stream.state.is_closed(); + let is_empty = stream.pending_send.is_empty(); + + trace!( + "send_reset(..., reason={:?}, stream={:?}, ..., \ + is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ + state={:?} \ + ", + stream.id, + reason, + is_reset, + is_closed, + is_empty, + stream.state + ); + + if is_reset { + // Don't double reset + trace!( + " -> not sending RST_STREAM ({:?} is already reset)", + stream.id + ); + return; + } + + // Transition the state to reset no matter what. + stream.state.set_reset(reason); + + // If closed AND the send queue is flushed, then the stream cannot be + // reset explicitly, either. Implicit resets can still be queued. + if is_closed && is_empty { + trace!( + " -> not sending explicit RST_STREAM ({:?} was closed \ + and send queue was flushed)", + stream.id + ); + return; + } + + self.recv_err(buffer, stream, counts); + + let frame = frame::Reset::new(stream.id, reason); + + trace!("send_reset -- queueing; frame={:?}", frame); + self.prioritize.queue_frame(frame.into(), buffer, stream, task); + } + + pub fn schedule_implicit_reset( + &mut self, + stream: &mut store::Ptr, + reason: Reason, + counts: &mut Counts, + task: &mut Option, + ) { + if stream.state.is_closed() { + // Stream is already closed, nothing more to do + return; + } + + stream.state.set_scheduled_reset(reason); + + self.prioritize.reclaim_reserved_capacity(stream, counts); + self.prioritize.schedule_send(stream, task); + } + + pub fn send_data( + &mut self, + frame: frame::Data, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), UserError> + where B: Buf, + { + self.prioritize.send_data(frame, buffer, stream, counts, task) + } + + pub fn send_trailers( + &mut self, + frame: frame::Headers, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), UserError> { + // TODO: Should this logic be moved into state.rs? + if !stream.state.is_send_streaming() { + return Err(UserError::UnexpectedFrameType); + } + + stream.state.send_close(); + + trace!("send_trailers -- queuing; frame={:?}", frame); + self.prioritize.queue_frame(frame.into(), buffer, stream, task); + + // Release any excess capacity + self.prioritize.reserve_capacity(0, stream, counts); + + Ok(()) + } + + pub fn poll_complete( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + counts: &mut Counts, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where T: AsyncWrite, + B: Buf, + { + self.prioritize.poll_complete(buffer, store, counts, dst) + } + + /// Request capacity to send data + pub fn reserve_capacity( + &mut self, + capacity: WindowSize, + stream: &mut store::Ptr, + counts: &mut Counts) + { + self.prioritize.reserve_capacity(capacity, stream, counts) + } + + pub fn poll_capacity( + &mut self, + stream: &mut store::Ptr, + ) -> Poll, UserError> { + if !stream.state.is_send_streaming() { + return Ok(Async::Ready(None)); + } + + if !stream.send_capacity_inc { + stream.wait_send(); + return Ok(Async::NotReady); + } + + stream.send_capacity_inc = false; + + Ok(Async::Ready(Some(self.capacity(stream)))) + } + + /// Current available stream send capacity + pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { + let available = stream.send_flow.available().as_size(); + let buffered = stream.buffered_send_data; + + if available <= buffered { + 0 + } else { + available - buffered + } + } + + pub fn poll_reset( + &self, + stream: &mut Stream, + mode: PollReset, + ) -> Poll { + match stream.state.ensure_reason(mode)? { + Some(reason) => Ok(reason.into()), + None => { + stream.wait_send(); + Ok(Async::NotReady) + }, + } + } + + pub fn recv_connection_window_update( + &mut self, + frame: frame::WindowUpdate, + store: &mut Store, + counts: &mut Counts, + ) -> Result<(), Reason> { + self.prioritize + .recv_connection_window_update(frame.size_increment(), store, counts) + } + + pub fn recv_stream_window_update( + &mut self, + sz: WindowSize, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), Reason> { + if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { + debug!("recv_stream_window_update !!; err={:?}", e); + + self.send_reset( + Reason::FLOW_CONTROL_ERROR.into(), + buffer, stream, counts, task); + + return Err(e); + } + + Ok(()) + } + + pub fn recv_reset( + &mut self, + buffer: &mut Buffer>, + stream: &mut store::Ptr + ) { + // Clear all pending outbound frames + self.prioritize.clear_queue(buffer, stream); + } + + pub fn recv_err( + &mut self, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + ) { + // Clear all pending outbound frames + self.prioritize.clear_queue(buffer, stream); + self.prioritize.reclaim_all_capacity(stream, counts); + } + + pub fn apply_remote_settings( + &mut self, + settings: &frame::Settings, + buffer: &mut Buffer>, + store: &mut Store, + counts: &mut Counts, + task: &mut Option, + ) -> Result<(), RecvError> { + // Applies an update to the remote endpoint's initial window size. + // + // Per RFC 7540 §6.9.2: + // + // In addition to changing the flow-control window for streams that are + // not yet active, a SETTINGS frame can alter the initial flow-control + // window size for streams with active flow-control windows (that is, + // streams in the "open" or "half-closed (remote)" state). When the + // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust + // the size of all stream flow-control windows that it maintains by the + // difference between the new value and the old value. + // + // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available + // space in a flow-control window to become negative. A sender MUST + // track the negative flow-control window and MUST NOT send new + // flow-controlled frames until it receives WINDOW_UPDATE frames that + // cause the flow-control window to become positive. + if let Some(val) = settings.initial_window_size() { + let old_val = self.init_window_sz; + self.init_window_sz = val; + + if val < old_val { + // We must decrease the (remote) window on every open stream. + let dec = old_val - val; + trace!("decrementing all windows; dec={}", dec); + + let mut total_reclaimed = 0; + store.for_each(|mut stream| { + let stream = &mut *stream; + + stream.send_flow.dec_window(dec); + + // It's possible that decreasing the window causes + // `window_size` (the stream-specific window) to fall below + // `available` (the portion of the connection-level window + // that we have allocated to the stream). + // In this case, we should take that excess allocation away + // and reassign it to other streams. + let window_size = stream.send_flow.window_size(); + let available = stream.send_flow.available().as_size(); + let reclaimed = if available > window_size { + // Drop down to `window_size`. + let reclaim = available - window_size; + stream.send_flow.claim_capacity(reclaim); + total_reclaimed += reclaim; + reclaim + } else { + 0 + }; + + trace!( + "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", + stream.id, + dec, + reclaimed, + stream.send_flow + ); + + // TODO: Should this notify the producer when the capacity + // of a stream is reduced? Maybe it should if the capacity + // is reduced to zero, allowing the producer to stop work. + + Ok::<_, RecvError>(()) + })?; + + self.prioritize + .assign_connection_capacity(total_reclaimed, store, counts); + } else if val > old_val { + let inc = val - old_val; + + store.for_each(|mut stream| { + self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) + .map_err(RecvError::Connection) + })?; + } + } + + Ok(()) + } + + pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) { + self.prioritize.clear_pending_capacity(store, counts); + self.prioritize.clear_pending_send(store, counts); + self.prioritize.clear_pending_open(store, counts); + } + + pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { + if let Ok(next) = self.next_stream_id { + if id >= next { + return Err(Reason::PROTOCOL_ERROR); + } + } + // if next_stream_id is overflowed, that's ok. + + Ok(()) + } + + pub fn ensure_next_stream_id(&self) -> Result { + self.next_stream_id.map_err(|_| UserError::OverflowedStreamId) + } +} diff --git a/third_party/rust/h2/src/proto/streams/state.rs b/third_party/rust/h2/src/proto/streams/state.rs new file mode 100644 index 000000000000..36257fe32a3a --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/state.rs @@ -0,0 +1,447 @@ +use std::io; + +use codec::{RecvError, UserError}; +use codec::UserError::*; +use frame::Reason; +use proto::{self, PollReset}; + +use self::Inner::*; +use self::Peer::*; + +/// Represents the state of an H2 stream +/// +/// ```not_rust +/// +--------+ +/// send PP | | recv PP +/// ,--------| idle |--------. +/// / | | \ +/// v +--------+ v +/// +----------+ | +----------+ +/// | | | send H / | | +/// ,------| reserved | | recv H | reserved |------. +/// | | (local) | | | (remote) | | +/// | +----------+ v +----------+ | +/// | | +--------+ | | +/// | | recv ES | | send ES | | +/// | send H | ,-------| open |-------. | recv H | +/// | | / | | \ | | +/// | v v +--------+ v v | +/// | +----------+ | +----------+ | +/// | | half | | | half | | +/// | | closed | | send R / | closed | | +/// | | (remote) | | recv R | (local) | | +/// | +----------+ | +----------+ | +/// | | | | | +/// | | send ES / | recv ES / | | +/// | | send R / v send R / | | +/// | | recv R +--------+ recv R | | +/// | send R / `----------->| |<-----------' send R / | +/// | recv R | closed | recv R | +/// `----------------------->| |<----------------------' +/// +--------+ +/// +/// send: endpoint sends this frame +/// recv: endpoint receives this frame +/// +/// H: HEADERS frame (with implied CONTINUATIONs) +/// PP: PUSH_PROMISE frame (with implied CONTINUATIONs) +/// ES: END_STREAM flag +/// R: RST_STREAM frame +/// ``` +#[derive(Debug, Clone)] +pub struct State { + inner: Inner, +} + +#[derive(Debug, Clone, Copy)] +enum Inner { + Idle, + // TODO: these states shouldn't count against concurrency limits: + //ReservedLocal, + ReservedRemote, + Open { local: Peer, remote: Peer }, + HalfClosedLocal(Peer), // TODO: explicitly name this value + HalfClosedRemote(Peer), + Closed(Cause), +} + +#[derive(Debug, Copy, Clone)] +enum Peer { + AwaitingHeaders, + Streaming, +} + +#[derive(Debug, Copy, Clone)] +enum Cause { + EndStream, + Proto(Reason), + LocallyReset(Reason), + Io, + + /// This indicates to the connection that a reset frame must be sent out + /// once the send queue has been flushed. + /// + /// Examples of when this could happen: + /// - User drops all references to a stream, so we want to CANCEL the it. + /// - Header block size was too large, so we want to REFUSE, possibly + /// after sending a 431 response frame. + Scheduled(Reason), +} + +impl State { + /// Opens the send-half of a stream if it is not already open. + pub fn send_open(&mut self, eos: bool) -> Result<(), UserError> { + let local = Streaming; + + self.inner = match self.inner { + Idle => if eos { + HalfClosedLocal(AwaitingHeaders) + } else { + Open { + local, + remote: AwaitingHeaders, + } + }, + Open { + local: AwaitingHeaders, + remote, + } => if eos { + HalfClosedLocal(remote) + } else { + Open { + local, + remote, + } + }, + HalfClosedRemote(AwaitingHeaders) => if eos { + Closed(Cause::EndStream) + } else { + HalfClosedRemote(local) + }, + _ => { + // All other transitions result in a protocol error + return Err(UnexpectedFrameType); + }, + }; + + return Ok(()); + } + + /// Opens the receive-half of the stream when a HEADERS frame is received. + /// + /// Returns true if this transitions the state to Open. + pub fn recv_open(&mut self, eos: bool) -> Result { + let remote = Streaming; + let mut initial = false; + + self.inner = match self.inner { + Idle => { + initial = true; + + if eos { + HalfClosedRemote(AwaitingHeaders) + } else { + Open { + local: AwaitingHeaders, + remote, + } + } + }, + ReservedRemote => { + initial = true; + + if eos { + Closed(Cause::EndStream) + } else { + Open { + local: AwaitingHeaders, + remote, + } + } + }, + Open { + local, + remote: AwaitingHeaders, + } => if eos { + HalfClosedRemote(local) + } else { + Open { + local, + remote, + } + }, + HalfClosedLocal(AwaitingHeaders) => if eos { + Closed(Cause::EndStream) + } else { + HalfClosedLocal(remote) + }, + _ => { + // All other transitions result in a protocol error + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + }, + }; + + return Ok(initial); + } + + /// Transition from Idle -> ReservedRemote + pub fn reserve_remote(&mut self) -> Result<(), RecvError> { + match self.inner { + Idle => { + self.inner = ReservedRemote; + Ok(()) + }, + _ => Err(RecvError::Connection(Reason::PROTOCOL_ERROR)), + } + } + + /// Indicates that the remote side will not send more data to the local. + pub fn recv_close(&mut self) -> Result<(), RecvError> { + match self.inner { + Open { + local, .. + } => { + // The remote side will continue to receive data. + trace!("recv_close: Open => HalfClosedRemote({:?})", local); + self.inner = HalfClosedRemote(local); + Ok(()) + }, + HalfClosedLocal(..) => { + trace!("recv_close: HalfClosedLocal => Closed"); + self.inner = Closed(Cause::EndStream); + Ok(()) + }, + _ => Err(RecvError::Connection(Reason::PROTOCOL_ERROR)), + } + } + + /// The remote explicitly sent a RST_STREAM. + /// + /// # Arguments + /// - `reason`: the reason field of the received RST_STREAM frame. + /// - `queued`: true if this stream has frames in the pending send queue. + pub fn recv_reset(&mut self, reason: Reason, queued: bool) { + match self.inner { + // If the stream is already in a `Closed` state, do nothing, + // provided that there are no frames still in the send queue. + Closed(..) if !queued => {}, + // A notionally `Closed` stream may still have queued frames in + // the following cases: + // + // - if the cause is `Cause::Scheduled(..)` (i.e. we have not + // actually closed the stream yet). + // - if the cause is `Cause::EndStream`: we transition to this + // state when an EOS frame is *enqueued* (so that it's invalid + // to enqueue more frames), not when the EOS frame is *sent*; + // therefore, there may still be frames ahead of the EOS frame + // in the send queue. + // + // In either of these cases, we want to overwrite the stream's + // previous state with the received RST_STREAM, so that the queue + // will be cleared by `Prioritize::pop_frame`. + state => { + trace!( + "recv_reset; reason={:?}; state={:?}; queued={:?}", + reason, state, queued + ); + self.inner = Closed(Cause::Proto(reason)); + }, + + } + } + + /// We noticed a protocol error. + pub fn recv_err(&mut self, err: &proto::Error) { + use proto::Error::*; + + match self.inner { + Closed(..) => {}, + _ => { + trace!("recv_err; err={:?}", err); + self.inner = Closed(match *err { + Proto(reason) => Cause::LocallyReset(reason), + Io(..) => Cause::Io, + }); + }, + } + } + + pub fn recv_eof(&mut self) { + match self.inner { + Closed(..) => {}, + s => { + trace!("recv_eof; state={:?}", s); + self.inner = Closed(Cause::Io); + } + } + } + + /// Indicates that the local side will not send more data to the local. + pub fn send_close(&mut self) { + match self.inner { + Open { + remote, .. + } => { + // The remote side will continue to receive data. + trace!("send_close: Open => HalfClosedLocal({:?})", remote); + self.inner = HalfClosedLocal(remote); + }, + HalfClosedRemote(..) => { + trace!("send_close: HalfClosedRemote => Closed"); + self.inner = Closed(Cause::EndStream); + }, + _ => panic!("transition send_close on unexpected state"), + } + } + + /// Set the stream state to reset locally. + pub fn set_reset(&mut self, reason: Reason) { + self.inner = Closed(Cause::LocallyReset(reason)); + } + + /// Set the stream state to a scheduled reset. + pub fn set_scheduled_reset(&mut self, reason: Reason) { + debug_assert!(!self.is_closed()); + self.inner = Closed(Cause::Scheduled(reason)); + } + + pub fn get_scheduled_reset(&self) -> Option { + match self.inner { + Closed(Cause::Scheduled(reason)) => Some(reason), + _ => None, + } + } + + pub fn is_scheduled_reset(&self) -> bool { + match self.inner { + Closed(Cause::Scheduled(..)) => true, + _ => false, + } + } + + pub fn is_local_reset(&self) -> bool { + match self.inner { + Closed(Cause::LocallyReset(_)) => true, + Closed(Cause::Scheduled(..)) => true, + _ => false, + } + } + + /// Returns true if the stream is already reset. + pub fn is_reset(&self) -> bool { + match self.inner { + Closed(Cause::EndStream) => false, + Closed(_) => true, + _ => false, + } + } + + pub fn is_send_streaming(&self) -> bool { + match self.inner { + Open { + local: Streaming, + .. + } => true, + HalfClosedRemote(Streaming) => true, + _ => false, + } + } + + /// Returns true when the stream is in a state to receive headers + pub fn is_recv_headers(&self) -> bool { + match self.inner { + Idle => true, + Open { + remote: AwaitingHeaders, + .. + } => true, + HalfClosedLocal(AwaitingHeaders) => true, + ReservedRemote => true, + _ => false, + } + } + + pub fn is_recv_streaming(&self) -> bool { + match self.inner { + Open { + remote: Streaming, + .. + } => true, + HalfClosedLocal(Streaming) => true, + _ => false, + } + } + + pub fn is_closed(&self) -> bool { + match self.inner { + Closed(_) => true, + _ => false, + } + } + + pub fn is_recv_closed(&self) -> bool { + match self.inner { + Closed(..) | HalfClosedRemote(..) => true, + _ => false, + } + } + + pub fn is_send_closed(&self) -> bool { + match self.inner { + Closed(..) | HalfClosedLocal(..) | ReservedRemote => true, + _ => false, + } + } + + pub fn is_idle(&self) -> bool { + match self.inner { + Idle => true, + _ => false, + } + } + + pub fn ensure_recv_open(&self) -> Result { + // TODO: Is this correct? + match self.inner { + Closed(Cause::Proto(reason)) | + Closed(Cause::LocallyReset(reason)) | + Closed(Cause::Scheduled(reason)) => Err(proto::Error::Proto(reason)), + Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into())), + Closed(Cause::EndStream) | + HalfClosedRemote(..) => Ok(false), + _ => Ok(true), + } + } + + /// Returns a reason if the stream has been reset. + pub(super) fn ensure_reason(&self, mode: PollReset) -> Result, ::Error> { + match self.inner { + Closed(Cause::Proto(reason)) | + Closed(Cause::LocallyReset(reason)) | + Closed(Cause::Scheduled(reason)) => Ok(Some(reason)), + Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into()).into()), + Open { local: Streaming, .. } | + HalfClosedRemote(Streaming) => match mode { + PollReset::AwaitingHeaders => { + Err(UserError::PollResetAfterSendResponse.into()) + }, + PollReset::Streaming => Ok(None), + }, + _ => Ok(None), + } + } +} + +impl Default for State { + fn default() -> State { + State { + inner: Inner::Idle, + } + } +} + +impl Default for Peer { + fn default() -> Self { + AwaitingHeaders + } +} diff --git a/third_party/rust/h2/src/proto/streams/store.rs b/third_party/rust/h2/src/proto/streams/store.rs new file mode 100644 index 000000000000..f66906a60902 --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/store.rs @@ -0,0 +1,405 @@ +use super::*; + +use slab; + +use indexmap::{self, IndexMap}; + +use std::fmt; +use std::marker::PhantomData; +use std::ops; + +/// Storage for streams +#[derive(Debug)] +pub(super) struct Store { + slab: slab::Slab<(StoreId, Stream)>, + ids: IndexMap, + counter: StoreId, +} + +/// "Pointer" to an entry in the store +pub(super) struct Ptr<'a> { + key: Key, + store: &'a mut Store, +} + +/// References an entry in the store. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct Key { + index: usize, + store_id: StoreId, +} + +type StoreId = usize; + +#[derive(Debug)] +pub(super) struct Queue { + indices: Option, + _p: PhantomData, +} + +pub(super) trait Next { + fn next(stream: &Stream) -> Option; + + fn set_next(stream: &mut Stream, key: Option); + + fn take_next(stream: &mut Stream) -> Option; + + fn is_queued(stream: &Stream) -> bool; + + fn set_queued(stream: &mut Stream, val: bool); +} + +/// A linked list +#[derive(Debug, Clone, Copy)] +struct Indices { + pub head: Key, + pub tail: Key, +} + +pub(super) enum Entry<'a> { + Occupied(OccupiedEntry<'a>), + Vacant(VacantEntry<'a>), +} + +pub(super) struct OccupiedEntry<'a> { + ids: indexmap::map::OccupiedEntry<'a, StreamId, (usize, StoreId)>, +} + +pub(super) struct VacantEntry<'a> { + ids: indexmap::map::VacantEntry<'a, StreamId, (usize, StoreId)>, + slab: &'a mut slab::Slab<(StoreId, Stream)>, + counter: &'a mut usize, +} + +pub(super) trait Resolve { + fn resolve(&mut self, key: Key) -> Ptr; +} + +// ===== impl Store ===== + +impl Store { + pub fn new() -> Self { + Store { + slab: slab::Slab::new(), + ids: IndexMap::new(), + counter: 0, + } + } + + pub fn find_mut(&mut self, id: &StreamId) -> Option { + let key = match self.ids.get(id) { + Some(key) => *key, + None => return None, + }; + + Some(Ptr { + key: Key { + index: key.0, + store_id: key.1, + }, + store: self, + }) + } + + pub fn insert(&mut self, id: StreamId, val: Stream) -> Ptr { + let store_id = self.counter; + self.counter = self.counter.wrapping_add(1); + let key = self.slab.insert((store_id, val)); + assert!(self.ids.insert(id, (key, store_id)).is_none()); + + Ptr { + key: Key { + index: key, + store_id, + }, + store: self, + } + } + + pub fn find_entry(&mut self, id: StreamId) -> Entry { + use self::indexmap::map::Entry::*; + + match self.ids.entry(id) { + Occupied(e) => Entry::Occupied(OccupiedEntry { + ids: e, + }), + Vacant(e) => Entry::Vacant(VacantEntry { + ids: e, + slab: &mut self.slab, + counter: &mut self.counter, + }), + } + } + + pub fn for_each(&mut self, mut f: F) -> Result<(), E> + where + F: FnMut(Ptr) -> Result<(), E>, + { + let mut len = self.ids.len(); + let mut i = 0; + + while i < len { + // Get the key by index, this makes the borrow checker happy + let key = *self.ids.get_index(i).unwrap().1; + + f(Ptr { + key: Key { + index: key.0, + store_id: key.1, + }, + store: self, + })?; + + // TODO: This logic probably could be better... + let new_len = self.ids.len(); + + if new_len < len { + debug_assert!(new_len == len - 1); + len -= 1; + } else { + i += 1; + } + } + + Ok(()) + } +} + +impl Resolve for Store { + fn resolve(&mut self, key: Key) -> Ptr { + Ptr { + key: key, + store: self, + } + } +} + +impl ops::Index for Store { + type Output = Stream; + + fn index(&self, key: Key) -> &Self::Output { + let slot = self.slab.index(key.index); + assert_eq!(slot.0, key.store_id); + &slot.1 + } +} + +impl ops::IndexMut for Store { + fn index_mut(&mut self, key: Key) -> &mut Self::Output { + let slot = self.slab.index_mut(key.index); + assert_eq!(slot.0, key.store_id); + &mut slot.1 + } +} + +impl Store { + pub fn num_active_streams(&self) -> usize { + self.ids.len() + } + + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + self.slab.len() + } +} + +impl Drop for Store { + fn drop(&mut self) { + use std::thread; + + if !thread::panicking() { + debug_assert!(self.slab.is_empty()); + } + } +} + +// ===== impl Queue ===== + +impl Queue +where + N: Next, +{ + pub fn new() -> Self { + Queue { + indices: None, + _p: PhantomData, + } + } + + pub fn take(&mut self) -> Self { + Queue { + indices: self.indices.take(), + _p: PhantomData, + } + } + + /// Queue the stream. + /// + /// If the stream is already contained by the list, return `false`. + pub fn push(&mut self, stream: &mut store::Ptr) -> bool { + trace!("Queue::push"); + + if N::is_queued(stream) { + trace!(" -> already queued"); + return false; + } + + N::set_queued(stream, true); + + // The next pointer shouldn't be set + debug_assert!(N::next(stream).is_none()); + + // Queue the stream + match self.indices { + Some(ref mut idxs) => { + trace!(" -> existing entries"); + + // Update the current tail node to point to `stream` + let key = stream.key(); + N::set_next(&mut stream.resolve(idxs.tail), Some(key)); + + // Update the tail pointer + idxs.tail = stream.key(); + }, + None => { + trace!(" -> first entry"); + self.indices = Some(store::Indices { + head: stream.key(), + tail: stream.key(), + }); + }, + } + + true + } + + pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option> + where + R: Resolve, + { + if let Some(mut idxs) = self.indices { + let mut stream = store.resolve(idxs.head); + + if idxs.head == idxs.tail { + assert!(N::next(&*stream).is_none()); + self.indices = None; + } else { + idxs.head = N::take_next(&mut *stream).unwrap(); + self.indices = Some(idxs); + } + + debug_assert!(N::is_queued(&*stream)); + N::set_queued(&mut *stream, false); + + return Some(stream); + } + + None + } + + pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option> + where + R: Resolve, + F: Fn(&Stream) -> bool, + { + if let Some(idxs) = self.indices { + let should_pop = f(&store.resolve(idxs.head)); + if should_pop { + return self.pop(store); + } + } + + None + } +} + +// ===== impl Ptr ===== + +impl<'a> Ptr<'a> { + /// Returns the Key associated with the stream + pub fn key(&self) -> Key { + self.key + } + + pub fn store_mut(&mut self) -> &mut Store { + &mut self.store + } + + /// Remove the stream from the store + pub fn remove(self) -> StreamId { + // The stream must have been unlinked before this point + debug_assert!(!self.store.ids.contains_key(&self.id)); + + // Remove the stream state + self.store.slab.remove(self.key.index).1.id + } + + /// Remove the StreamId -> stream state association. + /// + /// This will effectively remove the stream as far as the H2 protocol is + /// concerned. + pub fn unlink(&mut self) { + let id = self.id; + self.store.ids.remove(&id); + } +} + +impl<'a> Resolve for Ptr<'a> { + fn resolve(&mut self, key: Key) -> Ptr { + Ptr { + key: key, + store: &mut *self.store, + } + } +} + +impl<'a> ops::Deref for Ptr<'a> { + type Target = Stream; + + fn deref(&self) -> &Stream { + &self.store.slab[self.key.index].1 + } +} + +impl<'a> ops::DerefMut for Ptr<'a> { + fn deref_mut(&mut self) -> &mut Stream { + &mut self.store.slab[self.key.index].1 + } +} + +impl<'a> fmt::Debug for Ptr<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(fmt) + } +} + +// ===== impl OccupiedEntry ===== + +impl<'a> OccupiedEntry<'a> { + pub fn key(&self) -> Key { + let tup = self.ids.get(); + Key { + index: tup.0, + store_id: tup.1, + } + } +} + +// ===== impl VacantEntry ===== + +impl<'a> VacantEntry<'a> { + pub fn insert(self, value: Stream) -> Key { + // Insert the value in the slab + let store_id = *self.counter; + *self.counter = store_id.wrapping_add(1); + let index = self.slab.insert((store_id, value)); + + // Insert the handle in the ID map + self.ids.insert((index, store_id)); + + Key { + index, + store_id, + } + } +} diff --git a/third_party/rust/h2/src/proto/streams/stream.rs b/third_party/rust/h2/src/proto/streams/stream.rs new file mode 100644 index 000000000000..b4594ef821e6 --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/stream.rs @@ -0,0 +1,453 @@ +use super::*; + +use std::time::Instant; +use std::usize; + +/// Tracks Stream related state +/// +/// # Reference counting +/// +/// There can be a number of outstanding handles to a single Stream. These are +/// tracked using reference counting. The `ref_count` field represents the +/// number of outstanding userspace handles that can reach this stream. +/// +/// It's important to note that when the stream is placed in an internal queue +/// (such as an accept queue), this is **not** tracked by a reference count. +/// Thus, `ref_count` can be zero and the stream still has to be kept around. +#[derive(Debug)] +pub(super) struct Stream { + /// The h2 stream identifier + pub id: StreamId, + + /// Current state of the stream + pub state: State, + + /// Set to `true` when the stream is counted against the connection's max + /// concurrent streams. + pub is_counted: bool, + + /// Number of outstanding handles pointing to this stream + pub ref_count: usize, + + // ===== Fields related to sending ===== + /// Next node in the accept linked list + pub next_pending_send: Option, + + /// Set to true when the stream is pending accept + pub is_pending_send: bool, + + /// Send data flow control + pub send_flow: FlowControl, + + /// Amount of send capacity that has been requested, but not yet allocated. + pub requested_send_capacity: WindowSize, + + /// Amount of data buffered at the prioritization layer. + /// TODO: Technically this could be greater than the window size... + pub buffered_send_data: WindowSize, + + /// Task tracking additional send capacity (i.e. window updates). + send_task: Option, + + /// Frames pending for this stream being sent to the socket + pub pending_send: buffer::Deque, + + /// Next node in the linked list of streams waiting for additional + /// connection level capacity. + pub next_pending_send_capacity: Option, + + /// True if the stream is waiting for outbound connection capacity + pub is_pending_send_capacity: bool, + + /// Set to true when the send capacity has been incremented + pub send_capacity_inc: bool, + + /// Next node in the open linked list + pub next_open: Option, + + /// Set to true when the stream is pending to be opened + pub is_pending_open: bool, + + // ===== Fields related to receiving ===== + /// Next node in the accept linked list + pub next_pending_accept: Option, + + /// Set to true when the stream is pending accept + pub is_pending_accept: bool, + + /// Receive data flow control + pub recv_flow: FlowControl, + + pub in_flight_recv_data: WindowSize, + + /// Next node in the linked list of streams waiting to send window updates. + pub next_window_update: Option, + + /// True if the stream is waiting to send a window update + pub is_pending_window_update: bool, + + /// The time when this stream may have been locally reset. + pub reset_at: Option, + + /// Next node in list of reset streams that should expire eventually + pub next_reset_expire: Option, + + /// Frames pending for this stream to read + pub pending_recv: buffer::Deque, + + /// Task tracking receiving frames + pub recv_task: Option, + + /// The stream's pending push promises + pub pending_push_promises: store::Queue, + + /// Validate content-length headers + pub content_length: ContentLength, +} + +/// State related to validating a stream's content-length +#[derive(Debug)] +pub enum ContentLength { + Omitted, + Head, + Remaining(u64), +} + +#[derive(Debug)] +pub(super) struct NextAccept; + +#[derive(Debug)] +pub(super) struct NextSend; + +#[derive(Debug)] +pub(super) struct NextSendCapacity; + +#[derive(Debug)] +pub(super) struct NextWindowUpdate; + +#[derive(Debug)] +pub(super) struct NextOpen; + +#[derive(Debug)] +pub(super) struct NextResetExpire; + +impl Stream { + pub fn new( + id: StreamId, + init_send_window: WindowSize, + init_recv_window: WindowSize, + ) -> Stream { + let mut send_flow = FlowControl::new(); + let mut recv_flow = FlowControl::new(); + + recv_flow + .inc_window(init_recv_window) + .ok() + .expect("invalid initial receive window"); + recv_flow.assign_capacity(init_recv_window); + + send_flow + .inc_window(init_send_window) + .ok() + .expect("invalid initial send window size"); + + Stream { + id, + state: State::default(), + ref_count: 0, + is_counted: false, + + // ===== Fields related to sending ===== + next_pending_send: None, + is_pending_send: false, + send_flow: send_flow, + requested_send_capacity: 0, + buffered_send_data: 0, + send_task: None, + pending_send: buffer::Deque::new(), + is_pending_send_capacity: false, + next_pending_send_capacity: None, + send_capacity_inc: false, + is_pending_open: false, + next_open: None, + + // ===== Fields related to receiving ===== + next_pending_accept: None, + is_pending_accept: false, + recv_flow: recv_flow, + in_flight_recv_data: 0, + next_window_update: None, + is_pending_window_update: false, + reset_at: None, + next_reset_expire: None, + pending_recv: buffer::Deque::new(), + recv_task: None, + pending_push_promises: store::Queue::new(), + content_length: ContentLength::Omitted, + } + } + + /// Increment the stream's ref count + pub fn ref_inc(&mut self) { + assert!(self.ref_count < usize::MAX); + self.ref_count += 1; + } + + /// Decrements the stream's ref count + pub fn ref_dec(&mut self) { + assert!(self.ref_count > 0); + self.ref_count -= 1; + } + + /// Returns true if stream is currently being held for some time because of + /// a local reset. + pub fn is_pending_reset_expiration(&self) -> bool { + self.reset_at.is_some() + } + + /// Returns true if the stream is closed + pub fn is_closed(&self) -> bool { + // The state has fully transitioned to closed. + self.state.is_closed() && + // Because outbound frames transition the stream state before being + // buffered, we have to ensure that all frames have been flushed. + self.pending_send.is_empty() && + // Sometimes large data frames are sent out in chunks. After a chunk + // of the frame is sent, the remainder is pushed back onto the send + // queue to be rescheduled. + // + // Checking for additional buffered data lets us catch this case. + self.buffered_send_data == 0 + } + + /// Returns true if the stream is no longer in use + pub fn is_released(&self) -> bool { + // The stream is closed and fully flushed + self.is_closed() && + // There are no more outstanding references to the stream + self.ref_count == 0 && + // The stream is not in any queue + !self.is_pending_send && !self.is_pending_send_capacity && + !self.is_pending_accept && !self.is_pending_window_update && + !self.is_pending_open && !self.reset_at.is_some() + } + + /// Returns true when the consumer of the stream has dropped all handles + /// (indicating no further interest in the stream) and the stream state is + /// not actually closed. + /// + /// In this case, a reset should be sent. + pub fn is_canceled_interest(&self) -> bool { + self.ref_count == 0 && !self.state.is_closed() + } + + pub fn assign_capacity(&mut self, capacity: WindowSize) { + debug_assert!(capacity > 0); + self.send_capacity_inc = true; + self.send_flow.assign_capacity(capacity); + + trace!(" assigned capacity to stream; available={}; buffered={}; id={:?}", + self.send_flow.available(), self.buffered_send_data, self.id); + + // Only notify if the capacity exceeds the amount of buffered data + if self.send_flow.available() > self.buffered_send_data { + trace!(" notifying task"); + self.notify_send(); + } + } + + /// Returns `Err` when the decrement cannot be completed due to overflow. + pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { + match self.content_length { + ContentLength::Remaining(ref mut rem) => match rem.checked_sub(len as u64) { + Some(val) => *rem = val, + None => return Err(()), + }, + ContentLength::Head => return Err(()), + _ => {}, + } + + Ok(()) + } + + pub fn ensure_content_length_zero(&self) -> Result<(), ()> { + match self.content_length { + ContentLength::Remaining(0) => Ok(()), + ContentLength::Remaining(_) => Err(()), + _ => Ok(()), + } + } + + pub fn notify_send(&mut self) { + if let Some(task) = self.send_task.take() { + task.notify(); + } + } + + pub fn wait_send(&mut self) { + self.send_task = Some(task::current()); + } + + pub fn notify_recv(&mut self) { + if let Some(task) = self.recv_task.take() { + task.notify(); + } + } +} + +impl store::Next for NextAccept { + fn next(stream: &Stream) -> Option { + stream.next_pending_accept + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_pending_accept = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_pending_accept.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.is_pending_accept + } + + fn set_queued(stream: &mut Stream, val: bool) { + stream.is_pending_accept = val; + } +} + +impl store::Next for NextSend { + fn next(stream: &Stream) -> Option { + stream.next_pending_send + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_pending_send = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_pending_send.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.is_pending_send + } + + fn set_queued(stream: &mut Stream, val: bool) { + if val { + // ensure that stream is not queued for being opened + // if it's being put into queue for sending data + debug_assert_eq!(stream.is_pending_open, false); + } + stream.is_pending_send = val; + } +} + +impl store::Next for NextSendCapacity { + fn next(stream: &Stream) -> Option { + stream.next_pending_send_capacity + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_pending_send_capacity = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_pending_send_capacity.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.is_pending_send_capacity + } + + fn set_queued(stream: &mut Stream, val: bool) { + stream.is_pending_send_capacity = val; + } +} + +impl store::Next for NextWindowUpdate { + fn next(stream: &Stream) -> Option { + stream.next_window_update + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_window_update = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_window_update.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.is_pending_window_update + } + + fn set_queued(stream: &mut Stream, val: bool) { + stream.is_pending_window_update = val; + } +} + +impl store::Next for NextOpen { + fn next(stream: &Stream) -> Option { + stream.next_open + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_open = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_open.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.is_pending_open + } + + fn set_queued(stream: &mut Stream, val: bool) { + if val { + // ensure that stream is not queued for being sent + // if it's being put into queue for opening the stream + debug_assert_eq!(stream.is_pending_send, false); + } + stream.is_pending_open = val; + } +} + +impl store::Next for NextResetExpire { + fn next(stream: &Stream) -> Option { + stream.next_reset_expire + } + + fn set_next(stream: &mut Stream, key: Option) { + stream.next_reset_expire = key; + } + + fn take_next(stream: &mut Stream) -> Option { + stream.next_reset_expire.take() + } + + fn is_queued(stream: &Stream) -> bool { + stream.reset_at.is_some() + } + + fn set_queued(stream: &mut Stream, val: bool) { + if val { + stream.reset_at = Some(Instant::now()); + } else { + stream.reset_at = None; + } + } +} + +// ===== impl ContentLength ===== + +impl ContentLength { + pub fn is_head(&self) -> bool { + match *self { + ContentLength::Head => true, + _ => false, + } + } +} diff --git a/third_party/rust/h2/src/proto/streams/streams.rs b/third_party/rust/h2/src/proto/streams/streams.rs new file mode 100644 index 000000000000..de595761546d --- /dev/null +++ b/third_party/rust/h2/src/proto/streams/streams.rs @@ -0,0 +1,1182 @@ +use {client, proto, server}; +use codec::{Codec, RecvError, SendError, UserError}; +use frame::{self, Frame, Reason}; +use proto::{peer, Peer, Open, WindowSize}; +use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; +use super::recv::RecvHeaderBlockError; +use super::store::{self, Entry, Resolve, Store}; + +use bytes::{Buf, Bytes}; +use futures::{task, Async, Poll}; +use http::{HeaderMap, Request, Response}; +use tokio_io::AsyncWrite; + +use std::{fmt, io}; +use std::sync::{Arc, Mutex}; + +#[derive(Debug)] +pub(crate) struct Streams +where + P: Peer, +{ + /// Holds most of the connection and stream related state for processing + /// HTTP/2.0 frames associated with streams. + inner: Arc>, + + /// This is the queue of frames to be written to the wire. This is split out + /// to avoid requiring a `B` generic on all public API types even if `B` is + /// not technically required. + /// + /// Currently, splitting this out requires a second `Arc` + `Mutex`. + /// However, it should be possible to avoid this duplication with a little + /// bit of unsafe code. This optimization has been postponed until it has + /// been shown to be necessary. + send_buffer: Arc>, + + _p: ::std::marker::PhantomData

, +} + +/// Reference to the stream state +#[derive(Debug)] +pub(crate) struct StreamRef { + opaque: OpaqueStreamRef, + send_buffer: Arc>, +} + +/// Reference to the stream state that hides the send data chunk generic +pub(crate) struct OpaqueStreamRef { + inner: Arc>, + key: store::Key, +} + +/// Fields needed to manage state related to managing the set of streams. This +/// is mostly split out to make ownership happy. +/// +/// TODO: better name +#[derive(Debug)] +struct Inner { + /// Tracks send & recv stream concurrency. + counts: Counts, + + /// Connection level state and performs actions on streams + actions: Actions, + + /// Stores stream state + store: Store, +} + +#[derive(Debug)] +struct Actions { + /// Manages state transitions initiated by receiving frames + recv: Recv, + + /// Manages state transitions initiated by sending frames + send: Send, + + /// Task that calls `poll_complete`. + task: Option, + + /// If the connection errors, a copy is kept for any StreamRefs. + conn_error: Option, +} + +/// Contains the buffer of frames to be written to the wire. +#[derive(Debug)] +struct SendBuffer { + inner: Mutex>>, +} + +// ===== impl Streams ===== + +impl Streams +where + B: Buf, + P: Peer, +{ + pub fn new(config: Config) -> Self { + let peer = P::dyn(); + + Streams { + inner: Arc::new(Mutex::new(Inner { + counts: Counts::new(peer, &config), + actions: Actions { + recv: Recv::new(peer, &config), + send: Send::new(&config), + task: None, + conn_error: None, + }, + store: Store::new(), + })), + send_buffer: Arc::new(SendBuffer::new()), + _p: ::std::marker::PhantomData, + } + } + + pub fn set_target_connection_window_size(&mut self, size: WindowSize) { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + me.actions + .recv + .set_target_connection_window(size, &mut me.actions.task) + } + + /// Process inbound headers + pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { + let id = frame.stream_id(); + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + // The GOAWAY process has begun. All streams with a greater ID than + // specified as part of GOAWAY should be ignored. + if id > me.actions.recv.max_stream_id() { + trace!("id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id()); + return Ok(()); + } + + let key = match me.store.find_entry(id) { + Entry::Occupied(e) => e.key(), + Entry::Vacant(e) => match me.actions.recv.open(id, Open::Headers, &mut me.counts)? { + Some(stream_id) => { + let stream = Stream::new( + stream_id, + me.actions.send.init_window_sz(), + me.actions.recv.init_window_sz(), + ); + + e.insert(stream) + }, + None => return Ok(()), + }, + }; + + let stream = me.store.resolve(key); + + if stream.state.is_local_reset() { + // Locally reset streams must ignore frames "for some time". + // This is because the remote may have sent trailers before + // receiving the RST_STREAM frame. + trace!("recv_headers; ignoring trailers on {:?}", stream.id); + return Ok(()); + } + + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + trace!( + "recv_headers; stream={:?}; state={:?}", + stream.id, + stream.state + ); + + let res = if stream.state.is_recv_headers() { + match actions.recv.recv_headers(frame, stream, counts) { + Ok(()) => Ok(()), + Err(RecvHeaderBlockError::Oversize(resp)) => { + if let Some(resp) = resp { + let _ = actions.send.send_headers( + resp, send_buffer, stream, counts, &mut actions.task); + + actions.send.schedule_implicit_reset( + stream, + Reason::REFUSED_STREAM, + counts, + &mut actions.task); + + actions.recv.enqueue_reset_expiration(stream, counts); + + Ok(()) + } else { + Err(RecvError::Stream { + id: stream.id, + reason: Reason::REFUSED_STREAM, + }) + } + }, + Err(RecvHeaderBlockError::State(err)) => Err(err), + } + } else { + if !frame.is_end_stream() { + // TODO: Is this the right error + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + actions.recv.recv_trailers(frame, stream) + }; + + actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) + }) + } + + pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let id = frame.stream_id(); + + let stream = match me.store.find_mut(&id) { + Some(stream) => stream, + None => { + // The GOAWAY process has begun. All streams with a greater ID + // than specified as part of GOAWAY should be ignored. + if id > me.actions.recv.max_stream_id() { + trace!("id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id()); + return Ok(()); + } + + trace!("recv_data; stream not found: {:?}", id); + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + }, + }; + + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + let sz = frame.payload().len(); + let res = actions.recv.recv_data(frame, stream); + + // Any stream error after receiving a DATA frame means + // we won't give the data to the user, and so they can't + // release the capacity. We do it automatically. + if let Err(RecvError::Stream { .. }) = res { + actions.recv.release_connection_capacity(sz as WindowSize, &mut None); + } + + actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) + }) + } + + pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let id = frame.stream_id(); + + if id.is_zero() { + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + // The GOAWAY process has begun. All streams with a greater ID than + // specified as part of GOAWAY should be ignored. + if id > me.actions.recv.max_stream_id() { + trace!("id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id()); + return Ok(()); + } + + let stream = match me.store.find_mut(&id) { + Some(stream) => stream, + None => { + // TODO: Are there other error cases? + me.actions + .ensure_not_idle(me.counts.peer(), id) + .map_err(RecvError::Connection)?; + + return Ok(()); + }, + }; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let actions = &mut me.actions; + + me.counts.transition(stream, |_, stream| { + actions.recv.recv_reset(frame, stream); + actions.send.recv_reset(send_buffer, stream); + assert!(stream.state.is_closed()); + Ok(()) + }) + } + + /// Handle a received error and return the ID of the last processed stream. + pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let actions = &mut me.actions; + let counts = &mut me.counts; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let last_processed_id = actions.recv.last_processed_id(); + + me.store + .for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.recv_err(err, &mut *stream); + actions.send.recv_err(send_buffer, stream, counts); + Ok::<_, ()>(()) + }) + }) + .unwrap(); + + actions.conn_error = Some(err.shallow_clone()); + + last_processed_id + } + + pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let actions = &mut me.actions; + let counts = &mut me.counts; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + let last_stream_id = frame.last_stream_id(); + let err = frame.reason().into(); + + if last_stream_id > actions.recv.max_stream_id() { + // The remote endpoint sent a `GOAWAY` frame indicating a stream + // that we never sent, or that we have already terminated on account + // of previous `GOAWAY` frame. In either case, that is illegal. + // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase + // the value they send in the last stream identifier, since the + // peers might already have retried unprocessed requests on another + // connection.") + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + actions.recv.go_away(last_stream_id); + + me.store + .for_each(|stream| if stream.id > last_stream_id { + counts.transition(stream, |counts, stream| { + actions.recv.recv_err(&err, &mut *stream); + actions.send.recv_err(send_buffer, stream, counts); + Ok::<_, ()>(()) + }) + } else { + Ok::<_, ()>(()) + }) + .unwrap(); + + actions.conn_error = Some(err); + + Ok(()) + } + + pub fn last_processed_id(&self) -> StreamId { + self.inner.lock().unwrap().actions.recv.last_processed_id() + } + + pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { + let id = frame.stream_id(); + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + if id.is_zero() { + me.actions + .send + .recv_connection_window_update(frame, &mut me.store, &mut me.counts) + .map_err(RecvError::Connection)?; + } else { + // The remote may send window updates for streams that the local now + // considers closed. It's ok... + if let Some(mut stream) = me.store.find_mut(&id) { + // This result is ignored as there is nothing to do when there + // is an error. The stream is reset by the function on error and + // the error is informational. + let _ = me.actions.send.recv_stream_window_update( + frame.size_increment(), + send_buffer, + &mut stream, + &mut me.counts, + &mut me.actions.task, + ); + } else { + me.actions + .recv + .ensure_not_idle(id) + .map_err(RecvError::Connection)?; + } + } + + Ok(()) + } + + pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let id = frame.stream_id(); + let promised_id = frame.promised_id(); + + // First, ensure that the initiating stream is still in a valid state. + let parent_key = match me.store.find_mut(&id) { + Some(stream) => { + // The GOAWAY process has begun. All streams with a greater ID + // than specified as part of GOAWAY should be ignored. + if id > me.actions.recv.max_stream_id() { + trace!("id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id()); + return Ok(()); + } + + // The stream must be receive open + stream.state.ensure_recv_open()?; + stream.key() + } + None => return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)), + }; + + // Ensure that we can reserve streams + me.actions.recv.ensure_can_reserve()?; + + // Next, open the stream. + // + // If `None` is returned, then the stream is being refused. There is no + // further work to be done. + if me.actions.recv.open(promised_id, Open::PushPromise, &mut me.counts)?.is_none() { + return Ok(()); + } + + // Create a scope + let child_key = { + // Create state for the stream + let stream = me.store.insert(promised_id, { + Stream::new( + promised_id, + me.actions.send.init_window_sz(), + me.actions.recv.init_window_sz()) + }); + + let actions = &mut me.actions; + + me.counts.transition(stream, |counts, stream| { + let res = actions.recv.recv_push_promise(frame, stream); + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + actions.reset_on_recv_stream_err(&mut *send_buffer, stream, counts, res) + .map(|_| stream.key()) + })? + }; + + // Push the stream... this requires a bit of indirection to make + // the borrow checker happy. + let mut ppp = me.store[parent_key].pending_push_promises.take(); + ppp.push(&mut me.store.resolve(child_key)); + + let parent = &mut me.store[parent_key]; + + parent.pending_push_promises = ppp; + parent.notify_recv(); + + Ok(()) + } + + pub fn next_incoming(&mut self) -> Option> { + let key = { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + match me.actions.recv.next_incoming(&mut me.store) { + Some(key) => { + let mut stream = me.store.resolve(key); + trace!("next_incoming; id={:?}, state={:?}", stream.id, stream.state); + // Increment the ref count + stream.ref_inc(); + + // Return the key + Some(key) + }, + None => None, + } + }; + + key.map(|key| { + StreamRef { + opaque: OpaqueStreamRef { + inner: self.inner.clone(), + key, + }, + send_buffer: self.send_buffer.clone(), + } + }) + } + + pub fn send_pending_refusal( + &mut self, + dst: &mut Codec>, + ) -> Poll<(), io::Error> + where + T: AsyncWrite, + { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + me.actions.recv.send_pending_refusal(dst) + } + + pub fn clear_expired_reset_streams(&mut self) { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + me.actions.recv.clear_expired_reset_streams(&mut me.store, &mut me.counts); + } + + pub fn poll_complete(&mut self, dst: &mut Codec>) -> Poll<(), io::Error> + where + T: AsyncWrite, + { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + // Send WINDOW_UPDATE frames first + // + // TODO: It would probably be better to interleave updates w/ data + // frames. + try_ready!(me.actions.recv.poll_complete(&mut me.store, &mut me.counts, dst)); + + // Send any other pending frames + try_ready!(me.actions.send.poll_complete( + send_buffer, + &mut me.store, + &mut me.counts, + dst + )); + + // Nothing else to do, track the task + me.actions.task = Some(task::current()); + + Ok(().into()) + } + + pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.apply_remote_settings(frame); + + me.actions.send.apply_remote_settings( + frame, send_buffer, &mut me.store, &mut me.counts, &mut me.actions.task) + } + + pub fn send_request( + &mut self, + request: Request<()>, + end_of_stream: bool, + pending: Option<&OpaqueStreamRef>, + ) -> Result, SendError> { + use http::Method; + use super::stream::ContentLength; + + // TODO: There is a hazard with assigning a stream ID before the + // prioritize layer. If prioritization reorders new streams, this + // implicitly closes the earlier stream IDs. + // + // See: carllerche/h2#11 + let key = { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.actions.ensure_no_conn_error()?; + me.actions.send.ensure_next_stream_id()?; + + // The `pending` argument is provided by the `Client`, and holds + // a store `Key` of a `Stream` that may have been not been opened + // yet. + // + // If that stream is still pending, the Client isn't allowed to + // queue up another pending stream. They should use `poll_ready`. + if let Some(stream) = pending { + if me.store.resolve(stream.key).is_pending_open { + return Err(UserError::Rejected.into()); + } + } + + if me.counts.peer().is_server() { + // Servers cannot open streams. PushPromise must first be reserved. + return Err(UserError::UnexpectedFrameType.into()); + } + + let stream_id = me.actions.send.open()?; + + let mut stream = Stream::new( + stream_id, + me.actions.send.init_window_sz(), + me.actions.recv.init_window_sz(), + ); + + if *request.method() == Method::HEAD { + stream.content_length = ContentLength::Head; + } + + // Convert the message + let headers = client::Peer::convert_send_message( + stream_id, request, end_of_stream)?; + + let mut stream = me.store.insert(stream.id, stream); + + let sent = me.actions.send.send_headers( + headers, + send_buffer, + &mut stream, + &mut me.counts, + &mut me.actions.task, + ); + + // send_headers can return a UserError, if it does, + // we should forget about this stream. + if let Err(err) = sent { + stream.unlink(); + stream.remove(); + return Err(err.into()); + } + + // Given that the stream has been initialized, it should not be in the + // closed state. + debug_assert!(!stream.state.is_closed()); + + // Increment the stream ref count as we will be returning a handle. + stream.ref_inc(); + + stream.key() + }; + + Ok(StreamRef { + opaque: OpaqueStreamRef { + inner: self.inner.clone(), + key: key, + }, + send_buffer: self.send_buffer.clone(), + }) + } + + pub fn send_reset(&mut self, id: StreamId, reason: Reason) { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let key = match me.store.find_entry(id) { + Entry::Occupied(e) => e.key(), + Entry::Vacant(e) => match me.actions.recv.open(id, Open::Headers, &mut me.counts) { + Ok(Some(stream_id)) => { + let stream = Stream::new(stream_id, 0, 0); + + e.insert(stream) + }, + _ => return, + }, + }; + + let stream = me.store.resolve(key); + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + actions.send.send_reset( + reason, send_buffer, stream, counts, &mut actions.task); + actions.recv.enqueue_reset_expiration(stream, counts) + }) + } + + pub fn send_go_away(&mut self, last_processed_id: StreamId) { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + let actions = &mut me.actions; + actions.recv.go_away(last_processed_id); + } +} + +impl Streams +where + B: Buf, +{ + pub fn poll_pending_open(&mut self, pending: Option<&OpaqueStreamRef>) -> Poll<(), ::Error> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + me.actions.ensure_no_conn_error()?; + me.actions.send.ensure_next_stream_id()?; + + if let Some(pending) = pending { + let mut stream = me.store.resolve(pending.key); + trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); + if stream.is_pending_open { + stream.wait_send(); + return Ok(Async::NotReady); + } + } + Ok(().into()) + } +} + +impl Streams +where + P: Peer, +{ + /// This function is safe to call multiple times. + /// + /// A `Result` is returned to avoid panicking if the mutex is poisoned. + pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { + let mut me = self.inner.lock().map_err(|_| ())?; + let me = &mut *me; + + let actions = &mut me.actions; + let counts = &mut me.counts; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + if actions.conn_error.is_none() { + actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); + } + + trace!("Streams::recv_eof"); + + me.store + .for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.recv_eof(stream); + + // This handles resetting send state associated with the + // stream + actions.send.recv_err(send_buffer, stream, counts); + Ok::<_, ()>(()) + }) + }) + .expect("recv_eof"); + + actions.clear_queues(clear_pending_accept, &mut me.store, counts); + Ok(()) + } + + pub fn num_active_streams(&self) -> usize { + let me = self.inner.lock().unwrap(); + me.store.num_active_streams() + } + + pub fn has_streams_or_other_references(&self) -> bool { + if Arc::strong_count(&self.inner) > 1 { + return true; + } + + if Arc::strong_count(&self.send_buffer) > 1 { + return true; + } + + let me = self.inner.lock().unwrap(); + me.counts.has_streams() + } + + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + let me = self.inner.lock().unwrap(); + me.store.num_wired_streams() + } +} + +// no derive because we don't need B and P to be Clone. +impl Clone for Streams +where + P: Peer, +{ + fn clone(&self) -> Self { + Streams { + inner: self.inner.clone(), + send_buffer: self.send_buffer.clone(), + _p: ::std::marker::PhantomData, + } + } +} + +// ===== impl StreamRef ===== + +impl StreamRef { + pub fn send_data(&mut self, data: B, end_stream: bool) -> Result<(), UserError> + where + B: Buf, + { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.opaque.key); + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + // Create the data frame + let mut frame = frame::Data::new(stream.id, data); + frame.set_end_stream(end_stream); + + // Send the data frame + actions.send.send_data( + frame, + send_buffer, + stream, + counts, + &mut actions.task) + }) + } + + pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), UserError> { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.opaque.key); + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + // Create the trailers frame + let frame = frame::Headers::trailers(stream.id, trailers); + + // Send the trailers frame + actions.send.send_trailers( + frame, send_buffer, stream, counts, &mut actions.task) + }) + } + + pub fn send_reset(&mut self, reason: Reason) { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.opaque.key); + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + actions.send.send_reset( + reason, send_buffer, stream, counts, &mut actions.task) + }) + } + + pub fn send_response( + &mut self, + response: Response<()>, + end_of_stream: bool, + ) -> Result<(), UserError> { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.opaque.key); + let actions = &mut me.actions; + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.transition(stream, |counts, stream| { + let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream); + + actions.send.send_headers( + frame, send_buffer, stream, counts, &mut actions.task) + }) + } + + /// Called by the server after the stream is accepted. Given that clients + /// initialize streams by sending HEADERS, the request will always be + /// available. + /// + /// # Panics + /// + /// This function panics if the request isn't present. + pub fn take_request(&self) -> Request<()> { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.opaque.key); + me.actions.recv.take_request(&mut stream) + } + + /// Called by a client to see if the current stream is pending open + pub fn is_pending_open(&self) -> bool { + let mut me = self.opaque.inner.lock().unwrap(); + me.store.resolve(self.opaque.key).is_pending_open + } + + /// Request capacity to send data + pub fn reserve_capacity(&mut self, capacity: WindowSize) { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.opaque.key); + + me.actions.send.reserve_capacity(capacity, &mut stream, &mut me.counts) + } + + /// Returns the stream's current send capacity. + pub fn capacity(&self) -> WindowSize { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.opaque.key); + + me.actions.send.capacity(&mut stream) + } + + /// Request to be notified when the stream's capacity increases + pub fn poll_capacity(&mut self) -> Poll, UserError> { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.opaque.key); + + me.actions.send.poll_capacity(&mut stream) + } + + /// Request to be notified for if a `RST_STREAM` is received for this stream. + pub(crate) fn poll_reset(&mut self, mode: proto::PollReset) -> Poll { + let mut me = self.opaque.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.opaque.key); + + me.actions.send.poll_reset(&mut stream, mode) + .map_err(From::from) + } + + pub fn clone_to_opaque(&self) -> OpaqueStreamRef + where B: 'static, + { + self.opaque.clone() + } + + pub fn stream_id(&self) -> StreamId { + self.opaque.stream_id() + } +} + +impl Clone for StreamRef { + fn clone(&self) -> Self { + StreamRef { + opaque: self.opaque.clone(), + send_buffer: self.send_buffer.clone(), + } + } +} + +// ===== impl OpaqueStreamRef ===== + +impl OpaqueStreamRef { + /// Called by a client to check for a received response. + pub fn poll_response(&mut self) -> Poll, proto::Error> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.key); + + me.actions.recv.poll_response(&mut stream) + } + + pub fn body_is_empty(&self) -> bool { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.key); + + me.actions.recv.body_is_empty(&stream) + } + + pub fn is_end_stream(&self) -> bool { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let stream = me.store.resolve(self.key); + + me.actions.recv.is_end_stream(&stream) + } + + pub fn poll_data(&mut self) -> Poll, proto::Error> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.key); + + me.actions.recv.poll_data(&mut stream) + } + + pub fn poll_trailers(&mut self) -> Poll, proto::Error> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.key); + + me.actions.recv.poll_trailers(&mut stream) + } + + /// Releases recv capacity back to the peer. This may result in sending + /// WINDOW_UPDATE frames on both the stream and connection. + pub fn release_capacity(&mut self, capacity: WindowSize) -> Result<(), UserError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut stream = me.store.resolve(self.key); + + me.actions + .recv + .release_capacity(capacity, &mut stream, &mut me.actions.task) + } + + pub fn stream_id(&self) -> StreamId { + self.inner.lock() + .unwrap() + .store[self.key] + .id + } +} + +impl fmt::Debug for OpaqueStreamRef { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self.inner.lock() { + Ok(me) => { + let stream = &me.store[self.key]; + fmt.debug_struct("OpaqueStreamRef") + .field("stream_id", &stream.id) + .field("ref_count", &stream.ref_count) + .finish() + }, + Err(_poisoned) => fmt.debug_struct("OpaqueStreamRef") + .field("inner", &"") + .finish(), + } + } +} + +impl Clone for OpaqueStreamRef { + fn clone(&self) -> Self { + // Increment the ref count + self.inner.lock().unwrap().store.resolve(self.key).ref_inc(); + + OpaqueStreamRef { + inner: self.inner.clone(), + key: self.key.clone(), + } + } +} + +impl Drop for OpaqueStreamRef { + fn drop(&mut self) { + drop_stream_ref(&self.inner, self.key); + } +} + +// TODO: Move back in fn above +fn drop_stream_ref(inner: &Mutex, key: store::Key) { + let mut me = match inner.lock() { + Ok(inner) => inner, + Err(_) => if ::std::thread::panicking() { + trace!("StreamRef::drop; mutex poisoned"); + return; + } else { + panic!("StreamRef::drop; mutex poisoned"); + }, + }; + + let me = &mut *me; + + let mut stream = me.store.resolve(key); + + trace!("drop_stream_ref; stream={:?}", stream); + + // decrement the stream's ref count by 1. + stream.ref_dec(); + + let actions = &mut me.actions; + + // If the stream is not referenced and it is already + // closed (does not have to go through logic below + // of canceling the stream), we should notify the task + // (connection) so that it can close properly + if stream.ref_count == 0 && stream.is_closed() { + if let Some(task) = actions.task.take() { + task.notify(); + } + } + + me.counts.transition(stream, |counts, stream| { + maybe_cancel(stream, actions, counts); + + if stream.ref_count == 0 { + let mut ppp = stream.pending_push_promises.take(); + while let Some(promise) = ppp.pop(stream.store_mut()) { + counts.transition(promise, |counts, stream| { + maybe_cancel(stream, actions, counts); + }); + } + } + }); +} + +fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { + if stream.is_canceled_interest() { + actions.send.schedule_implicit_reset( + stream, + Reason::CANCEL, + counts, + &mut actions.task); + actions.recv.enqueue_reset_expiration(stream, counts); + } +} + +// ===== impl SendBuffer ===== + +impl SendBuffer { + fn new() -> Self { + let inner = Mutex::new(Buffer::new()); + SendBuffer { inner } + } +} + +// ===== impl Actions ===== + +impl Actions { + fn reset_on_recv_stream_err( + &mut self, + buffer: &mut Buffer>, + stream: &mut store::Ptr, + counts: &mut Counts, + res: Result<(), RecvError>, + ) -> Result<(), RecvError> { + if let Err(RecvError::Stream { + reason, .. + }) = res + { + // Reset the stream. + self.send.send_reset(reason, buffer, stream, counts, &mut self.task); + Ok(()) + } else { + res + } + } + + fn ensure_not_idle(&mut self, peer: peer::Dyn, id: StreamId) -> Result<(), Reason> { + if peer.is_local_init(id) { + self.send.ensure_not_idle(id) + } else { + self.recv.ensure_not_idle(id) + } + } + + fn ensure_no_conn_error(&self) -> Result<(), proto::Error> { + if let Some(ref err) = self.conn_error { + Err(err.shallow_clone()) + } else { + Ok(()) + } + } + + fn clear_queues(&mut self, + clear_pending_accept: bool, + store: &mut Store, + counts: &mut Counts) + { + self.recv.clear_queues(clear_pending_accept, store, counts); + self.send.clear_queues(store, counts); + } +} diff --git a/third_party/rust/h2/src/server.rs b/third_party/rust/h2/src/server.rs new file mode 100644 index 000000000000..e07946e99561 --- /dev/null +++ b/third_party/rust/h2/src/server.rs @@ -0,0 +1,1322 @@ +//! Server implementation of the HTTP/2.0 protocol. +//! +//! # Getting started +//! +//! Running an HTTP/2.0 server requires the caller to manage accepting the +//! connections as well as getting the connections to a state that is ready to +//! begin the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! details. +//! +//! This could be as basic as using Tokio's [`TcpListener`] to accept +//! connections, but usually it means using either ALPN or HTTP/1.1 protocol +//! upgrades. +//! +//! Once a connection is obtained, it is passed to [`handshake`], +//! which will begin the [HTTP/2.0 handshake]. This returns a future that +//! completes once the handshake process is performed and HTTP/2.0 streams may +//! be received. +//! +//! [`handshake`] uses default configuration values. There are a number of +//! settings that can be changed by using [`Builder`] instead. +//! +//! # Inbound streams +//! +//! The [`Connection`] instance is used to accept inbound HTTP/2.0 streams. It +//! does this by implementing [`futures::Stream`]. When a new stream is +//! received, a call to [`Connection::poll`] will return `(request, response)`. +//! The `request` handle (of type [`http::Request`]) contains the +//! HTTP request head as well as provides a way to receive the inbound data +//! stream and the trailers. The `response` handle (of type [`SendStream`]) +//! allows responding to the request, stream the response payload, send +//! trailers, and send push promises. +//! +//! The send ([`SendStream`]) and receive ([`RecvStream`]) halves of the stream +//! can be operated independently. +//! +//! # Managing the connection +//! +//! The [`Connection`] instance is used to manage connection state. The caller +//! is required to call either [`Connection::poll`] or +//! [`Connection::poll_close`] in order to advance the connection state. Simply +//! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the +//! connection state is advanced. +//! +//! It is not required to call **both** [`Connection::poll`] and +//! [`Connection::poll_close`]. If the caller is ready to accept a new stream, +//! then only [`Connection::poll`] should be called. When the caller **does +//! not** want to accept a new stream, [`Connection::poll_close`] should be +//! called. +//! +//! The [`Connection`] instance should only be dropped once +//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::poll`] +//! returns `Ready(None)`, there will no longer be any more inbound streams. At +//! this point, only [`Connection::poll_close`] should be called. +//! +//! # Shutting down the server +//! +//! Graceful shutdown of the server is [not yet +//! implemented](https://github.com/carllerche/h2/issues/69). +//! +//! # Example +//! +//! A basic HTTP/2.0 server example that runs over TCP and assumes [prior +//! knowledge], i.e. both the client and the server assume that the TCP socket +//! will use the HTTP/2.0 protocol without prior negotiation. +//! +//! ```rust +//! extern crate futures; +//! extern crate h2; +//! extern crate http; +//! extern crate tokio_core; +//! +//! use futures::{Future, Stream}; +//! # use futures::future::ok; +//! use h2::server; +//! use http::{Response, StatusCode}; +//! use tokio_core::reactor; +//! use tokio_core::net::TcpListener; +//! +//! pub fn main () { +//! let mut core = reactor::Core::new().unwrap(); +//! let handle = core.handle(); +//! +//! let addr = "127.0.0.1:5928".parse().unwrap(); +//! let listener = TcpListener::bind(&addr, &handle).unwrap(); +//! +//! core.run({ +//! // Accept all incoming TCP connections. +//! listener.incoming().for_each(move |(socket, _)| { +//! // Spawn a new task to process each connection. +//! handle.spawn({ +//! // Start the HTTP/2.0 connection handshake +//! server::handshake(socket) +//! .and_then(|h2| { +//! // Accept all inbound HTTP/2.0 streams sent over the +//! // connection. +//! h2.for_each(|(request, mut respond)| { +//! println!("Received request: {:?}", request); +//! +//! // Build a response with no body +//! let response = Response::builder() +//! .status(StatusCode::OK) +//! .body(()) +//! .unwrap(); +//! +//! // Send the response back to the client +//! respond.send_response(response, true) +//! .unwrap(); +//! +//! Ok(()) +//! }) +//! }) +//! .map_err(|e| panic!("unexpected error = {:?}", e)) +//! }); +//! +//! Ok(()) +//! }) +//! # .select(ok(())) +//! }).ok().expect("failed to run HTTP/2.0 server"); +//! } +//! ``` +//! +//! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http +//! [`handshake`]: fn.handshake.html +//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [`Builder`]: struct.Builder.html +//! [`Connection`]: struct.Connection.html +//! [`Connection::poll`]: struct.Connection.html#method.poll +//! [`Connection::poll_close`]: struct.Connection.html#method.poll_close +//! [`futures::Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html +//! [`http::Request`]: ../struct.RecvStream.html +//! [`RecvStream`]: ../struct.RecvStream.html +//! [`SendStream`]: ../struct.SendStream.html +//! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html + +use {SendStream, RecvStream, ReleaseCapacity}; +use codec::{Codec, RecvError}; +use frame::{self, Reason, Settings, StreamId}; +use proto::{self, Config, Prioritized}; + +use bytes::{Buf, Bytes, IntoBuf}; +use futures::{self, Async, Future, Poll}; +use http::{Request, Response}; +use std::{convert, fmt, io, mem}; +use std::time::Duration; +use tokio_io::{AsyncRead, AsyncWrite}; + +/// In progress HTTP/2.0 connection handshake future. +/// +/// This type implements `Future`, yielding a `Connection` instance once the +/// handshake has completed. +/// +/// The handshake is completed once the connection preface is fully received +/// from the client **and** the initial settings frame is sent to the client. +/// +/// The handshake future does not wait for the initial settings frame from the +/// client. +/// +/// See [module] level docs for more details. +/// +/// [module]: index.html +#[must_use = "futures do nothing unless polled"] +pub struct Handshake { + /// The config to pass to Connection::new after handshake succeeds. + builder: Builder, + /// The current state of the handshake. + state: Handshaking +} + +/// Accepts inbound HTTP/2.0 streams on a connection. +/// +/// A `Connection` is backed by an I/O resource (usually a TCP socket) and +/// implements the HTTP/2.0 server logic for that connection. It is responsible +/// for receiving inbound streams initiated by the client as well as driving the +/// internal state forward. +/// +/// `Connection` values are created by calling [`handshake`]. Once a +/// `Connection` value is obtained, the caller must call [`poll`] or +/// [`poll_close`] in order to drive the internal connection state forward. +/// +/// See [module level] documentation for more details +/// +/// [module level]: index.html +/// [`handshake`]: struct.Connection.html#method.handshake +/// [`poll`]: struct.Connection.html#method.poll +/// [`poll_close`]: struct.Connection.html#method.poll_close +/// +/// # Examples +/// +/// ``` +/// # extern crate futures; +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use futures::{Future, Stream}; +/// # use tokio_io::*; +/// # use h2::server; +/// # use h2::server::*; +/// # +/// # fn doc(my_io: T) { +/// server::handshake(my_io) +/// .and_then(|server| { +/// server.for_each(|(request, respond)| { +/// // Process the request and send the response back to the client +/// // using `respond`. +/// # Ok(()) +/// }) +/// }) +/// # .wait().unwrap(); +/// # } +/// # +/// # pub fn main() {} +/// ``` +#[must_use = "streams do nothing unless polled"] +pub struct Connection { + connection: proto::Connection, +} + +/// Builds server connections with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. +/// +/// The server is constructed by calling [`handshake`] and passing the I/O +/// handle that will back the HTTP/2.0 server. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various server +/// configuration settings. +/// +/// [`Builder::new`]: struct.Builder.html#method.new +/// [`handshake`]: struct.Builder.html#method.handshake +/// +/// # Examples +/// +/// ``` +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use tokio_io::*; +/// # use h2::server::*; +/// # +/// # fn doc(my_io: T) +/// # -> Handshake +/// # { +/// // `server_fut` is a future representing the completion of the HTTP/2.0 +/// // handshake. +/// let server_fut = Builder::new() +/// .initial_window_size(1_000_000) +/// .max_concurrent_streams(1000) +/// .handshake(my_io); +/// # server_fut +/// # } +/// # +/// # pub fn main() {} +/// ``` +#[derive(Clone, Debug)] +pub struct Builder { + /// Time to keep locally reset streams around before reaping. + reset_stream_duration: Duration, + + /// Maximum number of locally reset streams to keep at a time. + reset_stream_max: usize, + + /// Initial `Settings` frame to send as part of the handshake. + settings: Settings, + + /// Initial target window size for new connections. + initial_target_connection_window_size: Option, +} + +/// Send a response back to the client +/// +/// A `SendResponse` instance is provided when receiving a request and is used +/// to send the associated response back to the client. It is also used to +/// explicitly reset the stream with a custom reason. +/// +/// It will also be used to initiate push promises linked with the associated +/// stream. This is [not yet +/// implemented](https://github.com/carllerche/h2/issues/185). +/// +/// If the `SendResponse` instance is dropped without sending a response, then +/// the HTTP/2.0 stream will be reset. +/// +/// See [module] level docs for more details. +/// +/// [module]: index.html +#[derive(Debug)] +pub struct SendResponse { + inner: proto::StreamRef, +} + +/// Stages of an in-progress handshake. +enum Handshaking { + /// State 1. Connection is flushing pending SETTINGS frame. + Flushing(Flush>), + /// State 2. Connection is waiting for the client preface. + ReadingPreface(ReadPreface>), + /// Dummy state for `mem::replace`. + Empty, +} + +/// Flush a Sink +struct Flush { + codec: Option>, +} + +/// Read the client connection preface +struct ReadPreface { + codec: Option>, + pos: usize, +} + +#[derive(Debug)] +pub(crate) struct Peer; + +const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/// Creates a new configured HTTP/2.0 server with default configuration +/// values backed by `io`. +/// +/// It is expected that `io` already be in an appropriate state to commence +/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// +/// Returns a future which resolves to the [`Connection`] instance once the +/// HTTP/2.0 handshake has been completed. The returned [`Connection`] +/// instance will be using default configuration values. Use [`Builder`] to +/// customize the configuration values used by a [`Connection`] instance. +/// +/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [Handshake]: ../index.html#handshake +/// [`Connection`]: struct.Connection.html +/// +/// # Examples +/// +/// ``` +/// # extern crate futures; +/// # extern crate h2; +/// # extern crate tokio_io; +/// # use tokio_io::*; +/// # use futures::*; +/// # use h2::server; +/// # use h2::server::*; +/// # +/// # fn doc(my_io: T) +/// # { +/// server::handshake(my_io) +/// .and_then(|connection| { +/// // The HTTP/2.0 handshake has completed, now use `connection` to +/// // accept inbound HTTP/2.0 streams. +/// # Ok(()) +/// }) +/// # .wait().unwrap(); +/// # } +/// # +/// # pub fn main() {} +/// ``` +pub fn handshake(io: T) -> Handshake +where T: AsyncRead + AsyncWrite, +{ + Builder::new().handshake(io) +} + +// ===== impl Connection ===== + +impl Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + fn handshake2(io: T, builder: Builder) -> Handshake { + // Create the codec. + let mut codec = Codec::new(io); + + if let Some(max) = builder.settings.max_frame_size() { + codec.set_max_recv_frame_size(max as usize); + } + + if let Some(max) = builder.settings.max_header_list_size() { + codec.set_max_recv_header_list_size(max as usize); + } + + // Send initial settings frame. + codec + .buffer(builder.settings.clone().into()) + .expect("invalid SETTINGS frame"); + + // Create the handshake future. + let state = Handshaking::from(codec); + + Handshake { builder, state } + } + + /// Sets the target window size for the whole connection. + /// + /// If `size` is greater than the current value, then a `WINDOW_UPDATE` + /// frame will be immediately sent to the remote, increasing the connection + /// level window by `size - current_value`. + /// + /// If `size` is less than the current value, nothing will happen + /// immediately. However, as window capacity is released by + /// [`ReleaseCapacity`] instances, no `WINDOW_UPDATE` frames will be sent + /// out until the number of "in flight" bytes drops below `size`. + /// + /// The default value is 65,535. + /// + /// See [`ReleaseCapacity`] documentation for more details. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// [library level]: ../index.html#flow-control + pub fn set_target_window_size(&mut self, size: u32) { + assert!(size <= proto::MAX_WINDOW_SIZE); + self.connection.set_target_window_size(size); + } + + /// Returns `Ready` when the underlying connection has closed. + /// + /// If any new inbound streams are received during a call to `poll_close`, + /// they will be queued and returned on the next call to [`poll`]. + /// + /// This function will advance the internal connection state, driving + /// progress on all the other handles (e.g. [`RecvStream`] and [`SendStream`]). + /// + /// See [here](index.html#managing-the-connection) for more details. + /// + /// [`poll`]: struct.Connection.html#method.poll + /// [`RecvStream`]: ../struct.RecvStream.html + /// [`SendStream`]: ../struct.SendStream.html + pub fn poll_close(&mut self) -> Poll<(), ::Error> { + self.connection.poll().map_err(Into::into) + } + + #[deprecated(note="use abrupt_shutdown or graceful_shutdown instead", since="0.1.4")] + #[doc(hidden)] + pub fn close_connection(&mut self) { + self.graceful_shutdown(); + } + + /// Sets the connection to a GOAWAY state. + /// + /// Does not terminate the connection. Must continue being polled to close + /// connection. + /// + /// After flushing the GOAWAY frame, the connection is closed. Any + /// outstanding streams do not prevent the connection from closing. This + /// should usually be reserved for shutting down when something bad + /// external to `h2` has happened, and open streams cannot be properly + /// handled. + /// + /// For graceful shutdowns, see [`graceful_shutdown`](Connection::graceful_shutdown). + pub fn abrupt_shutdown(&mut self, reason: Reason) { + self.connection.go_away_now(reason); + } + + /// Starts a [graceful shutdown][1] process. + /// + /// Must continue being polled to close connection. + /// + /// It's possible to receive more requests after calling this method, since + /// they might have been in-flight from the client already. After about + /// 1 RTT, no new requests should be accepted. Once all active streams + /// have completed, the connection is closed. + /// + /// [1]: http://httpwg.org/specs/rfc7540.html#GOAWAY + pub fn graceful_shutdown(&mut self) { + self.connection.go_away_gracefully(); + } +} + +impl futures::Stream for Connection +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, + B::Buf: 'static, +{ + type Item = (Request, SendResponse); + type Error = ::Error; + + fn poll(&mut self) -> Poll, ::Error> { + // Always try to advance the internal state. Getting NotReady also is + // needed to allow this function to return NotReady. + match self.poll_close()? { + Async::Ready(_) => { + // If the socket is closed, don't return anything + // TODO: drop any pending streams + return Ok(None.into()); + }, + _ => {}, + } + + if let Some(inner) = self.connection.next_incoming() { + trace!("received incoming"); + let (head, _) = inner.take_request().into_parts(); + let body = RecvStream::new(ReleaseCapacity::new(inner.clone_to_opaque())); + + let request = Request::from_parts(head, body); + let respond = SendResponse { inner }; + + return Ok(Some((request, respond)).into()); + } + + Ok(Async::NotReady) + } +} + +impl fmt::Debug for Connection +where + T: fmt::Debug, + B: fmt::Debug + IntoBuf, + B::Buf: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Connection") + .field("connection", &self.connection) + .finish() + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Returns a new server builder instance initialized with default + /// configuration values. + /// + /// Configuration methods can be chained on the return value. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .initial_window_size(1_000_000) + /// .max_concurrent_streams(1000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn new() -> Builder { + Builder { + reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), + reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + settings: Settings::default(), + initial_target_connection_window_size: None, + } + } + + /// Indicates the initial window size (in octets) for stream-level + /// flow control for received data. + /// + /// The initial window of a stream is used as part of flow control. For more + /// details, see [`ReleaseCapacity`]. + /// + /// The default value is 65,535. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .initial_window_size(1_000_000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn initial_window_size(&mut self, size: u32) -> &mut Self { + self.settings.set_initial_window_size(Some(size)); + self + } + + /// Indicates the initial window size (in octets) for connection-level flow control + /// for received data. + /// + /// The initial window of a connection is used as part of flow control. For more details, + /// see [`ReleaseCapacity`]. + /// + /// The default value is 65,535. + /// + /// [`ReleaseCapacity`]: ../struct.ReleaseCapacity.html + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .initial_connection_window_size(1_000_000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { + self.initial_target_connection_window_size = Some(size); + self + } + + /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// configured server is able to accept. + /// + /// The sender may send data frames that are **smaller** than this value, + /// but any data larger than `max` will be broken up into multiple `DATA` + /// frames. + /// + /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_frame_size(1_000_000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + /// + /// # Panics + /// + /// This function panics if `max` is not within the legal range specified + /// above. + pub fn max_frame_size(&mut self, max: u32) -> &mut Self { + self.settings.set_max_frame_size(Some(max)); + self + } + + /// Sets the max size of received header frames. + /// + /// This advisory setting informs a peer of the maximum size of header list + /// that the sender is prepared to accept, in octets. The value is based on + /// the uncompressed size of header fields, including the length of the name + /// and value in octets plus an overhead of 32 octets for each header field. + /// + /// This setting is also used to limit the maximum amount of data that is + /// buffered to decode HEADERS frames. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_header_list_size(16 * 1024) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.settings.set_max_header_list_size(Some(max)); + self + } + + /// Sets the maximum number of concurrent streams. + /// + /// The maximum concurrent streams setting only controls the maximum number + /// of streams that can be initiated by the remote peer. In other words, + /// when this setting is set to 100, this does not limit the number of + /// concurrent streams that can be created by the caller. + /// + /// It is recommended that this value be no smaller than 100, so as to not + /// unnecessarily limit parallelism. However, any value is legal, including + /// 0. If `max` is set to 0, then the remote will not be permitted to + /// initiate streams. + /// + /// Note that streams in the reserved state, i.e., push promises that have + /// been reserved but the stream has not started, do not count against this + /// setting. + /// + /// Also note that if the remote *does* exceed the value set here, it is not + /// a protocol level error. Instead, the `h2` library will immediately reset + /// the stream. + /// + /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// + /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_concurrent_streams(1000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { + self.settings.set_max_concurrent_streams(Some(max)); + self + } + + /// Sets the maximum number of concurrent locally reset streams. + /// + /// When a stream is explicitly reset by either calling + /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance + /// before completing the stream, the HTTP/2.0 specification requires that + /// any further frames received for that stream must be ignored for "some + /// time". + /// + /// In order to satisfy the specification, internal state must be maintained + /// to implement the behavior. This state grows linearly with the number of + /// streams that are locally reset. + /// + /// The `max_concurrent_reset_streams` setting configures sets an upper + /// bound on the amount of state that is maintained. When this max value is + /// reached, the oldest reset stream is purged from memory. + /// + /// Once the stream has been fully purged from memory, any additional frames + /// received for that stream will result in a connection level protocol + /// error, forcing the connection to terminate. + /// + /// The default value is 10. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_concurrent_reset_streams(1000) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.reset_stream_max = max; + self + } + + /// Sets the maximum number of concurrent locally reset streams. + /// + /// When a stream is explicitly reset by either calling + /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance + /// before completing the stream, the HTTP/2.0 specification requires that + /// any further frames received for that stream must be ignored for "some + /// time". + /// + /// In order to satisfy the specification, internal state must be maintained + /// to implement the behavior. This state grows linearly with the number of + /// streams that are locally reset. + /// + /// The `reset_stream_duration` setting configures the max amount of time + /// this state will be maintained in memory. Once the duration elapses, the + /// stream state is purged from memory. + /// + /// Once the stream has been fully purged from memory, any additional frames + /// received for that stream will result in a connection level protocol + /// error, forcing the connection to terminate. + /// + /// The default value is 30 seconds. + /// + /// # Examples + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # use std::time::Duration; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .reset_stream_duration(Duration::from_secs(10)) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { + self.reset_stream_duration = dur; + self + } + + /// Creates a new configured HTTP/2.0 server backed by `io`. + /// + /// It is expected that `io` already be in an appropriate state to commence + /// the [HTTP/2.0 handshake]. See [Handshake] for more details. + /// + /// Returns a future which resolves to the [`Connection`] instance once the + /// HTTP/2.0 handshake has been completed. + /// + /// This function also allows the caller to configure the send payload data + /// type. See [Outbound data type] for more details. + /// + /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [Handshake]: ../index.html#handshake + /// [`Connection`]: struct.Connection.html + /// [Outbound data type]: ../index.html#outbound-data-type. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut = Builder::new() + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + /// + /// Configures the send-payload data type. In this case, the outbound data + /// type will be `&'static [u8]`. + /// + /// ``` + /// # extern crate h2; + /// # extern crate tokio_io; + /// # use tokio_io::*; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // handshake. + /// let server_fut: Handshake<_, &'static [u8]> = Builder::new() + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn handshake(&self, io: T) -> Handshake + where + T: AsyncRead + AsyncWrite, + B: IntoBuf, + B::Buf: 'static, + { + Connection::handshake2(io, self.clone()) + } +} + +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + +// ===== impl SendResponse ===== + +impl SendResponse { + /// Send a response to a client request. + /// + /// On success, a [`SendStream`] instance is returned. This instance can be + /// used to stream the response body and send trailers. + /// + /// If a body or trailers will be sent on the returned [`SendStream`] + /// instance, then `end_of_stream` must be set to `false` when calling this + /// function. + /// + /// The [`SendResponse`] instance is already associated with a received + /// request. This function may only be called once per instance and only if + /// [`send_reset`] has not been previously called. + /// + /// [`SendResponse`]: # + /// [`SendStream`]: ../struct.SendStream.html + /// [`send_reset`]: #method.send_reset + pub fn send_response( + &mut self, + response: Response<()>, + end_of_stream: bool, + ) -> Result, ::Error> { + self.inner + .send_response(response, end_of_stream) + .map(|_| SendStream::new(self.inner.clone())) + .map_err(Into::into) + } + + /// Send a stream reset to the peer. + /// + /// This essentially cancels the stream, including any inbound or outbound + /// data streams. + /// + /// If this function is called before [`send_response`], a call to + /// [`send_response`] will result in an error. + /// + /// If this function is called while a [`SendStream`] instance is active, + /// any further use of the instance will result in an error. + /// + /// This function should only be called once. + /// + /// [`send_response`]: #method.send_response + /// [`SendStream`]: ../struct.SendStream.html + pub fn send_reset(&mut self, reason: Reason) { + self.inner.send_reset(reason) + } + + /// Polls to be notified when the client resets this stream. + /// + /// If stream is still open, this returns `Ok(Async::NotReady)`, and + /// registers the task to be notified if a `RST_STREAM` is received. + /// + /// If a `RST_STREAM` frame is received for this stream, calling this + /// method will yield the `Reason` for the reset. + /// + /// # Error + /// + /// Calling this method after having called `send_response` will return + /// a user error. + pub fn poll_reset(&mut self) -> Poll { + self.inner.poll_reset(proto::PollReset::AwaitingHeaders) + } + + /// Returns the stream ID of the response stream. + /// + /// # Panics + /// + /// If the lock on the strean store has been poisoned. + pub fn stream_id(&self) -> ::StreamId { + ::StreamId::from_internal(self.inner.stream_id()) + } + + // TODO: Support reserving push promises. +} + +// ===== impl Flush ===== + +impl Flush { + fn new(codec: Codec) -> Self { + Flush { + codec: Some(codec), + } + } +} + +impl Future for Flush +where + T: AsyncWrite, + B: Buf, +{ + type Item = Codec; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + // Flush the codec + try_ready!(self.codec.as_mut().unwrap().flush()); + + // Return the codec + Ok(Async::Ready(self.codec.take().unwrap())) + } +} + +impl ReadPreface { + fn new(codec: Codec) -> Self { + ReadPreface { + codec: Some(codec), + pos: 0, + } + } + + fn inner_mut(&mut self) -> &mut T { + self.codec.as_mut().unwrap().get_mut() + } +} + +impl Future for ReadPreface +where + T: AsyncRead, + B: Buf, +{ + type Item = Codec; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + let mut buf = [0; 24]; + let mut rem = PREFACE.len() - self.pos; + + while rem > 0 { + let n = try_nb!(self.inner_mut().read(&mut buf[..rem])); + if n == 0 { + return Err(io::Error::new( + io::ErrorKind::ConnectionReset, + "connection closed unexpectedly", + ).into()); + } + + if PREFACE[self.pos..self.pos + n] != buf[..n] { + // TODO: Should this just write the GO_AWAY frame directly? + return Err(Reason::PROTOCOL_ERROR.into()); + } + + self.pos += n; + rem -= n; // TODO test + } + + Ok(Async::Ready(self.codec.take().unwrap())) + } +} + +// ===== impl Handshake ===== + +impl Future for Handshake + where T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + type Item = Connection; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + trace!("Handshake::poll(); state={:?};", self.state); + use server::Handshaking::*; + + self.state = if let Flushing(ref mut flush) = self.state { + // We're currently flushing a pending SETTINGS frame. Poll the + // flush future, and, if it's completed, advance our state to wait + // for the client preface. + let codec = match flush.poll()? { + Async::NotReady => { + trace!("Handshake::poll(); flush.poll()=NotReady"); + return Ok(Async::NotReady); + }, + Async::Ready(flushed) => { + trace!("Handshake::poll(); flush.poll()=Ready"); + flushed + } + }; + Handshaking::from(ReadPreface::new(codec)) + } else { + // Otherwise, we haven't actually advanced the state, but we have + // to replace it with itself, because we have to return a value. + // (note that the assignment to `self.state` has to be outside of + // the `if let` block above in order to placate the borrow checker). + mem::replace(&mut self.state, Handshaking::Empty) + }; + let poll = if let ReadingPreface(ref mut read) = self.state { + // We're now waiting for the client preface. Poll the `ReadPreface` + // future. If it has completed, we will create a `Connection` handle + // for the connection. + read.poll() + // Actually creating the `Connection` has to occur outside of this + // `if let` block, because we've borrowed `self` mutably in order + // to poll the state and won't be able to borrow the SETTINGS frame + // as well until we release the borrow for `poll()`. + } else { + unreachable!("Handshake::poll() state was not advanced completely!") + }; + let server = poll?.map(|codec| { + let connection = proto::Connection::new(codec, Config { + next_stream_id: 2.into(), + // Server does not need to locally initiate any streams + initial_max_send_streams: 0, + reset_stream_duration: self.builder.reset_stream_duration, + reset_stream_max: self.builder.reset_stream_max, + settings: self.builder.settings.clone(), + }); + + trace!("Handshake::poll(); connection established!"); + let mut c = Connection { connection }; + if let Some(sz) = self.builder.initial_target_connection_window_size { + c.set_target_window_size(sz); + } + c + }); + Ok(server) + } +} + +impl fmt::Debug for Handshake + where T: AsyncRead + AsyncWrite + fmt::Debug, + B: fmt::Debug + IntoBuf, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "server::Handshake") + } +} + +impl Peer { + pub fn convert_send_message( + id: StreamId, + response: Response<()>, + end_of_stream: bool) -> frame::Headers + { + use http::response::Parts; + + // Extract the components of the HTTP request + let ( + Parts { + status, + headers, + .. + }, + _, + ) = response.into_parts(); + + // Build the set pseudo header set. All requests will include `method` + // and `path`. + let pseudo = frame::Pseudo::response(status); + + // Create the HEADERS frame + let mut frame = frame::Headers::new(id, pseudo, headers); + + if end_of_stream { + frame.set_end_stream() + } + + frame + } +} + +impl proto::Peer for Peer { + type Poll = Request<()>; + + fn is_server() -> bool { + true + } + + fn dyn() -> proto::DynPeer { + proto::DynPeer::Server + } + + fn convert_poll_message(headers: frame::Headers) -> Result { + use http::{uri, Version}; + + let mut b = Request::builder(); + + let stream_id = headers.stream_id(); + let (pseudo, fields) = headers.into_parts(); + + macro_rules! malformed { + ($($arg:tt)*) => {{ + debug!($($arg)*); + return Err(RecvError::Stream { + id: stream_id, + reason: Reason::PROTOCOL_ERROR, + }); + }} + }; + + b.version(Version::HTTP_2); + + if let Some(method) = pseudo.method { + b.method(method); + } else { + malformed!("malformed headers: missing method"); + } + + // Specifying :status for a request is a protocol error + if pseudo.status.is_some() { + return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + } + + // Convert the URI + let mut parts = uri::Parts::default(); + + if let Some(scheme) = pseudo.scheme { + parts.scheme = Some(uri::Scheme::from_shared(scheme.into_inner()) + .or_else(|_| malformed!("malformed headers: malformed scheme"))?); + } else { + malformed!("malformed headers: missing scheme"); + } + + if let Some(authority) = pseudo.authority { + parts.authority = Some(uri::Authority::from_shared(authority.into_inner()) + .or_else(|_| malformed!("malformed headers: malformed authority"))?); + } + + if let Some(path) = pseudo.path { + // This cannot be empty + if path.is_empty() { + malformed!("malformed headers: missing path"); + } + + parts.path_and_query = Some(uri::PathAndQuery::from_shared(path.into_inner()) + .or_else(|_| malformed!("malformed headers: malformed path"))?); + } + + b.uri(parts); + + let mut request = match b.body(()) { + Ok(request) => request, + Err(_) => { + // TODO: Should there be more specialized handling for different + // kinds of errors + return Err(RecvError::Stream { + id: stream_id, + reason: Reason::PROTOCOL_ERROR, + }); + }, + }; + + *request.headers_mut() = fields; + + Ok(request) + } +} + +// ===== impl Handshaking ===== + +impl fmt::Debug for Handshaking +where + B: IntoBuf +{ + #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Handshaking::Flushing(_) => + write!(f, "Handshaking::Flushing(_)"), + Handshaking::ReadingPreface(_) => + write!(f, "Handshaking::ReadingPreface(_)"), + Handshaking::Empty => + write!(f, "Handshaking::Empty"), + } + + } +} + +impl convert::From>> for Handshaking +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + #[inline] fn from(flush: Flush>) -> Self { + Handshaking::Flushing(flush) + } +} + +impl convert::From>> for + Handshaking +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + #[inline] fn from(read: ReadPreface>) -> Self { + Handshaking::ReadingPreface(read) + } +} + +impl convert::From>> for Handshaking +where + T: AsyncRead + AsyncWrite, + B: IntoBuf, +{ + #[inline] fn from(codec: Codec>) -> Self { + Handshaking::from(Flush::new(codec)) + } +} diff --git a/third_party/rust/h2/src/share.rs b/third_party/rust/h2/src/share.rs new file mode 100644 index 000000000000..d98a592ddbc7 --- /dev/null +++ b/third_party/rust/h2/src/share.rs @@ -0,0 +1,479 @@ +use codec::UserError; +use frame::Reason; +use proto::{self, WindowSize}; + +use bytes::{Bytes, IntoBuf}; +use futures::{self, Poll, Async}; +use http::{HeaderMap}; + +use std::fmt; + +/// Sends the body stream and trailers to the remote peer. +/// +/// # Overview +/// +/// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the +/// HTTP/2.0 message header has been sent sent. It is used to stream the message +/// body and send the message trailers. See method level documentation for more +/// details. +/// +/// The `SendStream` instance is also used to manage outbound flow control. +/// +/// If a `SendStream` is dropped without explicitly closing the send stream, a +/// `RST_STREAM` frame will be sent. This essentially cancels the request / +/// response exchange. +/// +/// The ways to explicitly close the send stream are: +/// +/// * Set `end_of_stream` to true when calling [`send_request`], +/// [`send_response`], or [`send_data`]. +/// * Send trailers with [`send_trailers`]. +/// * Explicitly reset the stream with [`send_reset`]. +/// +/// # Flow control +/// +/// In HTTP/2.0, data cannot be sent to the remote peer unless there is +/// available window capacity on both the stream and the connection. When a data +/// frame is sent, both the stream window and the connection window are +/// decremented. When the stream level window reaches zero, no further data can +/// be sent on that stream. When the connection level window reaches zero, no +/// further data can be sent on any stream for that connection. +/// +/// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE` +/// frames. These frames increment the windows. See the [specification] for more +/// details on the principles of HTTP/2.0 flow control. +/// +/// The implications for sending data are that the caller **should** ensure that +/// both the stream and the connection has available window capacity before +/// loading the data to send into memory. The `SendStream` instance provides the +/// necessary APIs to perform this logic. This, however, is not an obligation. +/// If the caller attempts to send data on a stream when there is no available +/// window capacity, the library will buffer the data until capacity becomes +/// available, at which point the buffer will be flushed to the connection. +/// +/// **NOTE**: There is no bound on the amount of data that the library will +/// buffer. If you are sending large amounts of data, you really should hook +/// into the flow control lifecycle. Otherwise, you risk using up significant +/// amounts of memory. +/// +/// To hook into the flow control lifecycle, the caller signals to the library +/// that it intends to send data by calling [`reserve_capacity`], specifying the +/// amount of data, in octets, that the caller intends to send. After this, +/// `poll_capacity` is used to be notified when the requested capacity is +/// assigned to the stream. Once [`poll_capacity`] returns `Ready` with the number +/// of octets available to the stream, the caller is able to actually send the +/// data using [`send_data`]. +/// +/// Because there is also a connection level window that applies to **all** +/// streams on a connection, when capacity is assigned to a stream (indicated by +/// `poll_capacity` returning `Ready`), this capacity is reserved on the +/// connection and will **not** be assigned to any other stream. If data is +/// never written to the stream, that capacity is effectively lost to other +/// streams and this introduces the risk of deadlocking a connection. +/// +/// To avoid throttling data on a connection, the caller should not reserve +/// capacity until ready to send data and once any capacity is assigned to the +/// stream, the caller should immediately send data consuming this capacity. +/// There is no guarantee as to when the full capacity requested will become +/// available. For example, if the caller requests 64 KB of data and 512 bytes +/// become available, the caller should immediately send 512 bytes of data. +/// +/// See [`reserve_capacity`] documentation for more details. +/// +/// [`SendRequest`]: client/struct.SendRequest.html +/// [`SendResponse`]: server/struct.SendResponse.html +/// [specification]: http://httpwg.org/specs/rfc7540.html#FlowControl +/// [`reserve_capacity`]: #method.reserve_capacity +/// [`poll_capacity`]: #method.poll_capacity +/// [`send_data`]: #method.send_data +/// [`send_request`]: client/struct.SendRequest.html#method.send_request +/// [`send_response`]: server/struct.SendResponse.html#method.send_response +/// [`send_data`]: #method.send_data +/// [`send_trailers`]: #method.send_trailers +/// [`send_reset`]: #method.send_reset +#[derive(Debug)] +pub struct SendStream { + inner: proto::StreamRef, +} + +/// A stream identifier, as described in [Section 5.1.1] of RFC 7540. +/// +/// Streams are identified with an unsigned 31-bit integer. Streams +/// initiated by a client MUST use odd-numbered stream identifiers; those +/// initiated by the server MUST use even-numbered stream identifiers. A +/// stream identifier of zero (0x0) is used for connection control +/// messages; the stream identifier of zero cannot be used to establish a +/// new stream. +/// +/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct StreamId(u32); + +/// Receives the body stream and trailers from the remote peer. +/// +/// A `RecvStream` is provided by [`client::ResponseFuture`] and +/// [`server::Connection`] with the received HTTP/2.0 message head (the response +/// and request head respectively). +/// +/// A `RecvStream` instance is used to receive the streaming message body and +/// any trailers from the remote peer. It is also used to manage inbound flow +/// control. +/// +/// See method level documentation for more details on receiving data. See +/// [`ReleaseCapacity`] for more details on inbound flow control. +/// +/// Note that this type implements [`Stream`], yielding the received data frames. +/// When this implementation is used, the capacity is immediately released when +/// the data is yielded. It is recommended to only use this API when the data +/// will not be retained in memory for extended periods of time. +/// +/// [`client::ResponseFuture`]: client/struct.ResponseFuture.html +/// [`server::Connection`]: server/struct.Connection.html +/// [`ReleaseCapacity`]: struct.ReleaseCapacity.html +/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html +#[must_use = "streams do nothing unless polled"] +pub struct RecvStream { + inner: ReleaseCapacity, +} + +/// A handle to release window capacity to a remote stream. +/// +/// This type allows the caller to manage inbound data [flow control]. The +/// caller is expected to call [`release_capacity`] after dropping data frames. +/// +/// # Overview +/// +/// Each stream has a window size. This window size is the maximum amount of +/// inbound data that can be in-flight. In-flight data is defined as data that +/// has been received, but not yet released. +/// +/// When a stream is created, the window size is set to the connection's initial +/// window size value. When a data frame is received, the window size is then +/// decremented by size of the data frame before the data is provided to the +/// caller. As the caller finishes using the data, [`release_capacity`] must be +/// called. This will then increment the window size again, allowing the peer to +/// send more data. +/// +/// There is also a connection level window as well as the stream level window. +/// Received data counts against the connection level window as well and calls +/// to [`release_capacity`] will also increment the connection level window. +/// +/// # Sending `WINDOW_UPDATE` frames +/// +/// `WINDOW_UPDATE` frames will not be sent out for **every** call to +/// `release_capacity`, as this would end up slowing down the protocol. Instead, +/// `h2` waits until the window size is increased to a certain threshold and +/// then sends out a single `WINDOW_UPDATE` frame representing all the calls to +/// `release_capacity` since the last `WINDOW_UPDATE` frame. +/// +/// This essentially batches window updating. +/// +/// # Scenarios +/// +/// Following is a basic scenario with an HTTP/2.0 connection containing a +/// single active stream. +/// +/// * A new stream is activated. The receive window is initialized to 1024 (the +/// value of the initial window size for this connection). +/// * A `DATA` frame is received containing a payload of 400 bytes. +/// * The receive window size is reduced to 424 bytes. +/// * [`release_capacity`] is called with 200. +/// * The receive window size is now 624 bytes. The peer may send no more than +/// this. +/// * A `DATA` frame is received with a payload of 624 bytes. +/// * The window size is now 0 bytes. The peer may not send any more data. +/// * [`release_capacity`] is called with 1024. +/// * The receive window size is now 1024 bytes. The peer may now send more +/// data. +/// +/// [flow control]: ../index.html#flow-control +/// [`release_capacity`]: struct.ReleaseCapacity.html#method.release_capacity +#[derive(Debug)] +pub struct ReleaseCapacity { + inner: proto::OpaqueStreamRef, +} + +// ===== impl SendStream ===== + +impl SendStream { + pub(crate) fn new(inner: proto::StreamRef) -> Self { + SendStream { inner } + } + + /// Requests capacity to send data. + /// + /// This function is used to express intent to send data. This requests + /// connection level capacity. Once the capacity is available, it is + /// assigned to the stream and not reused by other streams. + /// + /// This function may be called repeatedly. The `capacity` argument is the + /// **total** amount of requested capacity. Sequential calls to + /// `reserve_capacity` are *not* additive. Given the following: + /// + /// ```rust + /// # use h2::*; + /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { + /// send_stream.reserve_capacity(100); + /// send_stream.reserve_capacity(200); + /// # } + /// ``` + /// + /// After the second call to `reserve_capacity`, the *total* requested + /// capacity will be 200. + /// + /// `reserve_capacity` is also used to cancel previous capacity requests. + /// Given the following: + /// + /// ```rust + /// # use h2::*; + /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { + /// send_stream.reserve_capacity(100); + /// send_stream.reserve_capacity(0); + /// # } + /// ``` + /// + /// After the second call to `reserve_capacity`, the *total* requested + /// capacity will be 0, i.e. there is no requested capacity for the stream. + /// + /// If `reserve_capacity` is called with a lower value than the amount of + /// capacity **currently** assigned to the stream, this capacity will be + /// returned to the connection to be re-assigned to other streams. + /// + /// Also, the amount of capacity that is reserved gets decremented as data + /// is sent. For example: + /// + /// ```rust + /// # use h2::*; + /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { + /// send_stream.reserve_capacity(100); + /// + /// let capacity = send_stream.poll_capacity(); + /// // capacity == 5; + /// + /// send_stream.send_data(b"hello", false).unwrap(); + /// // At this point, the total amount of requested capacity is 95 bytes. + /// + /// // Calling `reserve_capacity` with `100` again essentially requests an + /// // additional 5 bytes. + /// send_stream.reserve_capacity(100); + /// # } + /// ``` + /// + /// See [Flow control](struct.SendStream.html#flow-control) for an overview + /// of how send flow control works. + pub fn reserve_capacity(&mut self, capacity: usize) { + // TODO: Check for overflow + self.inner.reserve_capacity(capacity as WindowSize) + } + + /// Returns the stream's current send capacity. + /// + /// This allows the caller to check the current amount of available capacity + /// before sending data. + pub fn capacity(&self) -> usize { + self.inner.capacity() as usize + } + + /// Requests to be notified when the stream's capacity increases. + /// + /// Before calling this, capacity should be requested with + /// [`reserve_capacity`]. Once capacity is requested, the connection will + /// assign capacity to the stream **as it becomes available**. There is no + /// guarantee as to when and in what increments capacity gets assigned to + /// the stream. + /// + /// To get notified when the available capacity increases, the caller calls + /// `poll_capacity`, which returns `Ready(Some(n))` when `n` has been + /// increased by the connection. Note that `n` here represents the **total** + /// amount of assigned capacity at that point in time. It is also possible + /// that `n` is lower than the previous call if, since then, the caller has + /// sent data. + pub fn poll_capacity(&mut self) -> Poll, ::Error> { + let res = try_ready!(self.inner.poll_capacity()); + Ok(Async::Ready(res.map(|v| v as usize))) + } + + /// Sends a single data frame to the remote peer. + /// + /// This function may be called repeatedly as long as `end_of_stream` is set + /// to `false`. Setting `end_of_stream` to `true` sets the end stream flag + /// on the data frame. Any further calls to `send_data` or `send_trailers` + /// will return an [`Error`]. + /// + /// `send_data` can be called without reserving capacity. In this case, the + /// data is buffered and the capacity is implicitly requested. Once the + /// capacity becomes available, the data is flushed to the connection. + /// However, this buffering is unbounded. As such, sending large amounts of + /// data without reserving capacity before hand could result in large + /// amounts of data being buffered in memory. + /// + /// [`Error`]: struct.Error.html + pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), ::Error> { + self.inner + .send_data(data.into_buf(), end_of_stream) + .map_err(Into::into) + } + + /// Sends trailers to the remote peer. + /// + /// Sending trailers implicitly closes the send stream. Once the send stream + /// is closed, no more data can be sent. + pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), ::Error> { + self.inner.send_trailers(trailers).map_err(Into::into) + } + + /// Resets the stream. + /// + /// This cancels the request / response exchange. If the response has not + /// yet been received, the associated `ResponseFuture` will return an + /// [`Error`] to reflect the canceled exchange. + /// + /// [`Error`]: struct.Error.html + pub fn send_reset(&mut self, reason: Reason) { + self.inner.send_reset(reason) + } + + /// Polls to be notified when the client resets this stream. + /// + /// If stream is still open, this returns `Ok(Async::NotReady)`, and + /// registers the task to be notified if a `RST_STREAM` is received. + /// + /// If a `RST_STREAM` frame is received for this stream, calling this + /// method will yield the `Reason` for the reset. + /// + /// # Error + /// + /// If connection sees an error, this returns that error instead of a + /// `Reason`. + pub fn poll_reset(&mut self) -> Poll { + self.inner.poll_reset(proto::PollReset::Streaming) + } + + /// Returns the stream ID of this `SendStream`. + /// + /// # Panics + /// + /// If the lock on the stream store has been poisoned. + pub fn stream_id(&self) -> StreamId { + StreamId::from_internal(self.inner.stream_id()) + } +} + +// ===== impl StreamId ===== + +impl StreamId { + pub(crate) fn from_internal(id: ::frame::StreamId) -> Self { + StreamId(id.into()) + } +} +// ===== impl RecvStream ===== + +impl RecvStream { + pub(crate) fn new(inner: ReleaseCapacity) -> Self { + RecvStream { inner } + } + + #[deprecated(since = "0.0.0")] + #[doc(hidden)] + pub fn is_empty(&self) -> bool { + // If the recv side is closed and the receive queue is empty, the body is empty. + self.inner.inner.body_is_empty() + } + + /// Returns true if the receive half has reached the end of stream. + /// + /// A return value of `true` means that calls to `poll` and `poll_trailers` + /// will both return `None`. + pub fn is_end_stream(&self) -> bool { + self.inner.inner.is_end_stream() + } + + /// Get a mutable reference to this streams `ReleaseCapacity`. + /// + /// It can be used immediately, or cloned to be used later. + pub fn release_capacity(&mut self) -> &mut ReleaseCapacity { + &mut self.inner + } + + /// Returns received trailers. + pub fn poll_trailers(&mut self) -> Poll, ::Error> { + self.inner.inner.poll_trailers().map_err(Into::into) + } + + /// Returns the stream ID of this stream. + /// + /// # Panics + /// + /// If the lock on the stream store has been poisoned. + pub fn stream_id(&self) -> StreamId { + self.inner.stream_id() + } +} + +impl futures::Stream for RecvStream { + type Item = Bytes; + type Error = ::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.inner.poll_data().map_err(Into::into) + } +} + +impl fmt::Debug for RecvStream { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("RecvStream") + .field("inner", &self.inner) + .finish() + } +} + +// ===== impl ReleaseCapacity ===== + +impl ReleaseCapacity { + pub(crate) fn new(inner: proto::OpaqueStreamRef) -> Self { + ReleaseCapacity { inner } + } + + /// Returns the stream ID of the stream whose capacity will + /// be released by this `ReleaseCapacity`. + /// + /// # Panics + /// + /// If the lock on the stream store has been poisoned. + pub fn stream_id(&self) -> StreamId { + StreamId::from_internal(self.inner.stream_id()) + } + + /// Release window capacity back to remote stream. + /// + /// This releases capacity back to the stream level and the connection level + /// windows. Both window sizes will be increased by `sz`. + /// + /// See [struct level] documentation for more details. + /// + /// # Panics + /// + /// This function panics if increasing the receive window size by `sz` would + /// result in a window size greater than the target window size set by + /// [`set_target_window_size`]. In other words, the caller cannot release + /// more capacity than data has been received. If 1024 bytes of data have + /// been received, at most 1024 bytes can be released. + /// + /// [struct level]: # + /// [`set_target_window_size`]: server/struct.Server.html#method.set_target_window_size + pub fn release_capacity(&mut self, sz: usize) -> Result<(), ::Error> { + if sz > proto::MAX_WINDOW_SIZE as usize { + return Err(UserError::ReleaseCapacityTooBig.into()); + } + self.inner + .release_capacity(sz as proto::WindowSize) + .map_err(Into::into) + } +} + +impl Clone for ReleaseCapacity { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + ReleaseCapacity { inner } + } +} diff --git a/third_party/rust/http/.cargo-checksum.json b/third_party/rust/http/.cargo-checksum.json new file mode 100644 index 000000000000..438552a9dc15 --- /dev/null +++ b/third_party/rust/http/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"a3e83cca9fce4b586ed9d810b22855bb3a8879d6303608d21161bbde2fb63fbc","CHANGELOG.md":"58184edd80e6c281bff0397f3dc8b46154339ae882849fea5d9409485bb9ce12","Cargo.toml":"4c6f18c58eafb596a68557c9586f4c886858b4288a77a14ff89b8292c8fe5cd4","LICENSE-APACHE":"8bb1b50b0e5c9399ae33bd35fab2769010fa6c14e8860c729a52295d84896b7a","LICENSE-MIT":"dc91f8200e4b2a1f9261035d4c18c33c246911a6c0f7b543d75347e61b249cff","README.md":"37ea8a27cc63bef2b875fc0220cdb71b3287068365713f5a1b46626fcc79a54c","benches/header_map/basic.rs":"7e91c67c5ee59bb0f12bba735e4b2ed0b0fc2a4c0007c3920dfc66d266eeaeeb","benches/header_map/mod.rs":"601ab90cdb234f0d1e15c98d9d407f1dd44984d6abb0ecaccd9f64f1fc5255e2","benches/header_map/vec_map.rs":"6cee49372f891c21355d04c336ebd26971c42b78a2edb7e8281174d771856d2f","benches/header_value.rs":"53c765dff1ad2c7e7f9bfcb35bdbb3f9b42e3e0a537401dc691226488389062c","src/byte_str.rs":"a88516a7c54e5830ef9e07382baf76151cf4d3829736656ab2688ef03848c8ae","src/convert.rs":"0fb7d797d45177728aa754a909869259d7748616da2fcfaf189cdc8365bad376","src/error.rs":"0e4df207f49be5af249870837607f8fc24102a4641a7b775f907688deff6930a","src/extensions.rs":"7e57d0ff5251bb8fb212c36168849a3f9b14ee8aa1aeed8483153908562aa07a","src/header/map.rs":"b0f06ec1187c824c0f78867aca406a2d0d306078511684ca8846f6babc1f114b","src/header/mod.rs":"941ff93360aa16e6f0cef1be5502e26b34da24a7c00077ac23366a2535b028b6","src/header/name.rs":"6f8c8e111aad66b1c8c3614e96ccc14867b3db32d980bc84ddc1ab3adb1e03fc","src/header/value.rs":"343e97de3ba2e3a0280968b978427688214ea51a15d5e97ea5de332f5b106e33","src/lib.rs":"888ec90491a7075dee84e7d07da77ac790d9dd227521a970aafb18f5a5581a98","src/method.rs":"493643a0d52300d0dcb4e6853d922c11d64bff151e4c2da9de536139ad869770","src/request.rs":"8d8a594068312ce7311ea813d9c658fae8a13c0e7b1853dd2567ce9f1ad55b60","src/response.rs":"90cb51393bb27ef589c4b29fa438d4d3488ae74178a24be169bfba33700e557d","src/status.rs":"ececb9f5f49b8e33eb0a9a9168fa813cc17df237bdc886de598e3d2f0345a31e","src/uri/authority.rs":"18d9ebeaaaa0f22f4fe32f63b406b1018536efead38af434b3b5e17e911ba71c","src/uri/mod.rs":"04138bed38f1c3872314634bb1d16ed9b92a32282cc442d47621e11611303443","src/uri/path.rs":"cef057399db0c86aafe381b8f002c8207f85d7c77835d1560c61987b60db05d2","src/uri/scheme.rs":"15bc1075699f663a17688c230792ccf645105e5d1439c494714df14b74db5d8c","src/uri/tests.rs":"e7034c41831fb223b4e2964170669e1a7f52d922109181c6067c55e4409312c0","src/version.rs":"d7cea6976335425a1fd10766083f9ac74b9f8c6667c2e71abf5f0886c0a93186","tests/header_map.rs":"7ff5fd9785ed6f9d03b57df10393fef523e56891520b76710c85ff01b2c48376","tests/header_map_fuzz.rs":"9fc39a62f89dc7794d1e29dbf70ac352b1ddf981501a59cfcca02f6fd11acd98","tests/status_code.rs":"112e27e367e2adc16d5734b6a523d6dd5208e69fd6b9e596d95111051881d10b"},"package":"dca621d0fa606a5ff2850b6e337b57ad6137ee4d67e940449643ff45af6874c6"} \ No newline at end of file diff --git a/third_party/rust/http/.travis.yml b/third_party/rust/http/.travis.yml new file mode 100644 index 000000000000..053ba1f5968e --- /dev/null +++ b/third_party/rust/http/.travis.yml @@ -0,0 +1,26 @@ +language: rust +sudo: false + +cache: cargo + +matrix: + include: + - rust: stable + - rust: beta + - rust: nightly + # ensure wasm always builds + - rust: stable + script: + - rustup target add wasm32-unknown-unknown + - cargo build --target=wasm32-unknown-unknown + # minimum rustc version + - rust: 1.20.0 + script: cargo build + +script: + - cargo test + - 'if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo test --benches; fi' + +notifications: + email: + on_success: never diff --git a/third_party/rust/http/CHANGELOG.md b/third_party/rust/http/CHANGELOG.md new file mode 100644 index 000000000000..9b821c9a0d8e --- /dev/null +++ b/third_party/rust/http/CHANGELOG.md @@ -0,0 +1,66 @@ +# 0.1.10 (August, 8 2018) + +* impl HttpTryFrom for HeaderValue (#236) + +# 0.1.9 (August 7, 2018) + +* Fix double percent encoding (#233) +* Add additional HttpTryFrom impls (#234) + +# 0.1.8 (July 23, 2018) + +* Add fuller set of `PartialEq` for `Method` (#221) +* Reduce size of `HeaderMap` by using `Box<[Entry]>` instea of `Vec` (#224) +* Reduce size of `Extensions` by storing as `Option>` (#227) +* Implement `Iterator::size_hint` for most iterators in `header` (#226) + +# 0.1.7 (June 22, 2018) + +* Add `From for HeaderValue` for most integer types (#218). +* Add `Uri::into_parts()` inherent method (same as `Parts::from(uri)`) (#214). +* Fix converting `Uri`s in authority-form to `Parts` and then back into `Uri` (#216). +* Fix `Authority` parsing to reject multiple port sections (#215). +* Fix parsing 1 character authority-form `Uri`s into illegal forms (#220). + +# 0.1.6 (June 13, 2018) + +* Add `HeaderName::from_static()` constructor (#195). +* Add `Authority::from_static()` constructor (#186). +* Implement `From` for `HeaderValue` (#184). +* Fix duplicate keys when iterating over `header::Keys` (#201). + +# 0.1.5 (February 28, 2018) + +* Add websocket handshake related header constants (#162). +* Parsing `Authority` with an empty string now returns an error (#164). +* Implement `PartialEq` for `StatusCode` (#153). +* Implement `HttpTryFrom<&Uri>` for `Uri` (#165). +* Implement `FromStr` for `Method` (#167). +* Implement `HttpTryFrom` for `Uri` (#171). +* Add `into_body` fns to `Request` and `Response` (#172). +* Fix `Request::options` (#177). + +# 0.1.4 (January 4, 2018) + +* Add PathAndQuery::from_static (#148). +* Impl PartialOrd / PartialEq for Authority and PathAndQuery (#150). +* Add `map` fn to `Request` and `Response` (#151). + +# 0.1.3 (December 11, 2017) + +* Add `Scheme` associated consts for common protos. + +# 0.1.2 (November 29, 2017) + +* Add Uri accessor for scheme part. +* Fix Uri parsing bug (#134) + +# 0.1.1 (October 9, 2017) + +* Provide Uri accessors for parts (#129) +* Add Request builder helpers. (#123) +* Misc performance improvements (#126) + +# 0.1.0 (September 8, 2017) + +* Initial release. diff --git a/third_party/rust/http/Cargo.toml b/third_party/rust/http/Cargo.toml new file mode 100644 index 000000000000..0b19c6d42979 --- /dev/null +++ b/third_party/rust/http/Cargo.toml @@ -0,0 +1,57 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "http" +version = "0.1.10" +authors = ["Alex Crichton ", "Carl Lerche ", "Sean McArthur "] +description = "A set of types for representing HTTP requests and responses.\n" +homepage = "https://github.com/hyperium/http" +documentation = "https://docs.rs/http" +readme = "README.md" +keywords = ["http"] +categories = ["web-programming"] +license = "MIT/Apache-2.0" +repository = "https://github.com/hyperium/http" + +[[bench]] +name = "header_map" +path = "benches/header_map/mod.rs" + +[[bench]] +name = "header_value" +path = "benches/header_value.rs" +[dependencies.bytes] +version = "0.4" + +[dependencies.fnv] +version = "1.0.5" + +[dependencies.itoa] +version = "0.4.1" +[dev-dependencies.indexmap] +version = "1.0" + +[dev-dependencies.quickcheck] +version = "0.6" + +[dev-dependencies.rand] +version = "0.4" + +[dev-dependencies.seahash] +version = "3.0.5" + +[dev-dependencies.serde] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" diff --git a/third_party/rust/bytes/LICENSE-APACHE b/third_party/rust/http/LICENSE-APACHE similarity index 99% rename from third_party/rust/bytes/LICENSE-APACHE rename to third_party/rust/http/LICENSE-APACHE index 87d73e7f9191..80176c2b2399 100644 --- a/third_party/rust/bytes/LICENSE-APACHE +++ b/third_party/rust/http/LICENSE-APACHE @@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work. same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017 Carl Lerche +Copyright 2017 http-rs authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/third_party/rust/http/LICENSE-MIT b/third_party/rust/http/LICENSE-MIT new file mode 100644 index 000000000000..0cbc55049230 --- /dev/null +++ b/third_party/rust/http/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2017 http-rs authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/http/README.md b/third_party/rust/http/README.md new file mode 100644 index 000000000000..04f2fb3ecf55 --- /dev/null +++ b/third_party/rust/http/README.md @@ -0,0 +1,80 @@ +# HTTP + +A general purpose library of common HTTP types + +[![Build Status](https://travis-ci.org/hyperium/http.svg?branch=master)](https://travis-ci.org/hyperium/http) +[![Crates.io](https://img.shields.io/crates/v/http.svg?maxAge=2592000)](https://crates.io/crates/http) +[![Documentation](https://docs.rs/http/badge.svg)][dox] + +More information about this crate can be found in the [crate +documentation][dox]. + +[dox]: https://docs.rs/http + +## Usage + +To use `http`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +http = "0.1" +``` + +Next, add this to your crate: + +```rust +extern crate http; + +use http::{Request, Response}; + +fn main() { + // ... +} +``` + +## Examples + +Create an HTTP request: + +```rust +extern crate http; + +use http::Request; + +fn main() { + let request = Request::builder() + .uri("https://www.rust-lang.org/") + .header("User-Agent", "awesome/1.0") + .body(()) + .unwrap(); +} +``` + +Create an HTTP response: + +```rust +extern crate http; + +use http::{Response, StatusCode}; + +fn main() { + let response = Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header("Location", "https://www.rust-lang.org/install.html") + .body(()) + .unwrap(); +} +``` + +# License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +# Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/http/benches/header_map/basic.rs b/third_party/rust/http/benches/header_map/basic.rs new file mode 100644 index 000000000000..4ce7ed628737 --- /dev/null +++ b/third_party/rust/http/benches/header_map/basic.rs @@ -0,0 +1,586 @@ +macro_rules! bench { + ($name:ident($map:ident, $b:ident) $body:expr) => { + mod $name { + #[allow(unused_imports)] + use test::{self, Bencher}; + use seahash::SeaHasher; + use fnv::FnvHasher; + use std::hash::BuildHasherDefault; + use http::header::*; + #[allow(unused_imports)] + use super::custom_hdr; + + #[bench] + fn header_map($b: &mut Bencher) { + let $map = || HeaderMap::default(); + $body + } + + #[bench] + fn order_map_fnv($b: &mut Bencher) { + use indexmap::IndexMap; + let $map = || IndexMap::<_, _, BuildHasherDefault>::default(); + $body + } + + #[bench] + fn vec_map($b: &mut Bencher) { + use vec_map::VecMap; + + let $map = || VecMap::with_capacity(0); + $body + } + + #[bench] + fn order_map_seahash($b: &mut Bencher) { + use indexmap::IndexMap; + let $map = || IndexMap::<_, _, BuildHasherDefault>::default(); + $body + } + + /* + #[bench] + fn order_map_siphash($b: &mut Bencher) { + use indexmap::IndexMap; + let $map = || IndexMap::new(); + $body + } + + #[bench] + fn std_map_siphash($b: &mut Bencher) { + use std::collections::HashMap; + let $map = || HashMap::new(); + $body + } + */ + } + }; +} + +bench!(new_insert_get_host(new_map, b) { + b.iter(|| { + let mut h = new_map(); + h.insert(HOST, "hyper.rs"); + test::black_box(h.get(&HOST)); + }) +}); + +bench!(insert_4_std_get_30(new_map, b) { + + b.iter(|| { + let mut h = new_map(); + + for i in 0..4 { + h.insert(super::STD[i].clone(), "foo"); + } + + for i in 0..30 { + test::black_box(h.get(&super::STD[i % 4])); + } + }) +}); + +bench!(insert_6_std_get_6(new_map, b) { + + b.iter(|| { + let mut h = new_map(); + + for i in 0..6 { + h.insert(super::STD[i].clone(), "foo"); + } + + for i in 0..6 { + test::black_box(h.get(&super::STD[i % 4])); + } + }) +}); + +/* +bench!(insert_remove_host(new_map, b) { + let mut h = new_map(); + + b.iter(|| { + test::black_box(h.insert(HOST, "hyper.rs")); + test::black_box(h.remove(&HOST)); + }) +}); + +bench!(insert_insert_host(new_map, b) { + let mut h = new_map(); + + b.iter(|| { + test::black_box(h.insert(HOST, "hyper.rs")); + test::black_box(h.insert(HOST, "hyper.rs")); + }) +}); +*/ + +bench!(get_10_of_20_std(new_map, b) { + let mut h = new_map(); + + for hdr in super::STD[10..30].iter() { + h.insert(hdr.clone(), hdr.as_str().to_string()); + } + + b.iter(|| { + for hdr in &super::STD[10..20] { + test::black_box(h.get(hdr)); + } + }) +}); + +bench!(get_100_std(new_map, b) { + let mut h = new_map(); + + for hdr in super::STD.iter() { + h.insert(hdr.clone(), hdr.as_str().to_string()); + } + + b.iter(|| { + for i in 0..100 { + test::black_box(h.get(&super::STD[i % super::STD.len()])); + } + }) +}); + +bench!(set_8_get_1_std(new_map, b) { + b.iter(|| { + let mut h = new_map(); + + for hdr in &super::STD[0..8] { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&super::STD[0])); + }) +}); + +bench!(set_10_get_1_std(new_map, b) { + b.iter(|| { + let mut h = new_map(); + + for hdr in &super::STD[0..10] { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&super::STD[0])); + }) +}); + +bench!(set_20_get_1_std(new_map, b) { + b.iter(|| { + let mut h = new_map(); + + for hdr in &super::STD[0..20] { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&super::STD[0])); + }) +}); + +bench!(get_10_custom_short(new_map, b) { + let hdrs = custom_hdr(20); + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), hdr.as_str().to_string()); + } + + b.iter(|| { + for hdr in &hdrs[..10] { + test::black_box(h.get(hdr)); + } + }) +}); + +bench!(set_10_get_1_custom_short(new_map, b) { + let hdrs = custom_hdr(10); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + + +bench!(set_10_get_1_custom_med(new_map, b) { + let hdrs = super::med_custom_hdr(10); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_10_get_1_custom_long(new_map, b) { + let hdrs = super::long_custom_hdr(10); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_10_get_1_custom_very_long(new_map, b) { + let hdrs = super::very_long_custom_hdr(10); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_20_get_1_custom_short(new_map, b) { + let hdrs = custom_hdr(20); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_20_get_1_custom_med(new_map, b) { + let hdrs = super::med_custom_hdr(20); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_20_get_1_custom_long(new_map, b) { + let hdrs = super::long_custom_hdr(20); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(set_20_get_1_custom_very_long(new_map, b) { + let hdrs = super::very_long_custom_hdr(20); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + + test::black_box(h.get(&hdrs[0])); + }) +}); + +bench!(insert_all_std_headers(new_map, b) { + b.iter(|| { + let mut h = new_map(); + + for hdr in super::STD { + test::black_box(h.insert(hdr.clone(), "foo")); + } + }) +}); + +bench!(insert_79_custom_std_headers(new_map, b) { + let hdrs = super::custom_std(79); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + h.insert(hdr.clone(), "foo"); + } + }) +}); + +bench!(insert_100_custom_headers(new_map, b) { + let hdrs = custom_hdr(100); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + test::black_box(h.insert(hdr.clone(), "foo")); + } + }) +}); + +bench!(insert_500_custom_headers(new_map, b) { + let hdrs = custom_hdr(500); + + b.iter(|| { + let mut h = new_map(); + + for hdr in &hdrs { + test::black_box(h.insert(hdr.clone(), "foo")); + } + }) +}); + +bench!(insert_one_15_char_header(new_map, b) { + let hdr: HeaderName = "abcd-abcd-abcde" + .parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + h.insert(hdr.clone(), "hello"); + test::black_box(h); + }) +}); + +bench!(insert_one_25_char_header(new_map, b) { + let hdr: HeaderName = "abcd-abcd-abcd-abcd-abcde" + .parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + h.insert(hdr.clone(), "hello"); + test::black_box(h); + }) +}); + +bench!(insert_one_50_char_header(new_map, b) { + let hdr: HeaderName = "abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcde" + .parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + h.insert(hdr.clone(), "hello"); + test::black_box(h); + }) +}); + +bench!(insert_one_100_char_header(new_map, b) { + let hdr: HeaderName = "abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcdeabcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcd-abcde" + .parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + h.insert(hdr.clone(), "hello"); + test::black_box(h); + }) +}); + +const HN_HDRS: [(&'static str, &'static str); 11] = [ + ("Date", "Fri, 27 Jan 2017 23:00:00 GMT"), + ("Content-Type", "text/html; charset=utf-8"), + ("Transfer-Encoding", "chunked"), + ("Connection", "keep-alive"), + ("Set-Cookie", "__cfduid=dbdfbbe3822b61cb8750ba37d894022151485558000; expires=Sat, 27-Jan-18 23:00:00 GMT; path=/; domain=.ycombinator.com; HttpOnly"), + ("Vary", "Accept-Encoding"), + ("Cache-Control", "private"), + ("X-Frame-Options", "DENY"), + ("Strict-Transport-Security", "max-age=31556900; includeSubDomains"), + ("Server", "cloudflare-nginx"), + ("CF-RAY", "327fd1809f3c1baf-SEA"), +]; + +bench!(hn_hdrs_set_8_get_many(new_map, b) { + let hdrs: Vec<(HeaderName, &'static str)> = super::HN_HDRS[..8].iter() + .map(|&(name, val)| (name.parse().unwrap(), val)) + .collect(); + + b.iter(|| { + let mut h = new_map(); + + for &(ref name, val) in hdrs.iter() { + h.insert(name.clone(), val); + } + + for _ in 0..15 { + test::black_box(h.get(&CONTENT_LENGTH)); + test::black_box(h.get(&VARY)); + } + }); +}); + +bench!(hn_hdrs_set_8_get_miss(new_map, b) { + let hdrs: Vec<(HeaderName, &'static str)> = super::HN_HDRS[..8].iter() + .map(|&(name, val)| (name.parse().unwrap(), val)) + .collect(); + + let miss: HeaderName = "x-wat".parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + + for &(ref name, val) in hdrs.iter() { + h.insert(name.clone(), val); + } + + test::black_box(h.get(&CONTENT_LENGTH)); + test::black_box(h.get(&miss)); + }); +}); + +bench!(hn_hdrs_set_11_get_with_miss(new_map, b) { + let hdrs: Vec<(HeaderName, &'static str)> = super::HN_HDRS.iter() + .map(|&(name, val)| (name.parse().unwrap(), val)) + .collect(); + + let miss: HeaderName = "x-wat".parse().unwrap(); + + b.iter(|| { + let mut h = new_map(); + + for &(ref name, val) in hdrs.iter() { + h.insert(name.clone(), val); + } + + for _ in 0..10 { + test::black_box(h.get(&CONTENT_LENGTH)); + test::black_box(h.get(&VARY)); + test::black_box(h.get(&miss)); + } + }); +}); + +use http::header::*; + +fn custom_hdr(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("x-custom-{}", i); + s.parse().unwrap() + }).collect() +} + +fn med_custom_hdr(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("content-length-{}", i); + s.parse().unwrap() + }).collect() +} + +fn long_custom_hdr(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("access-control-allow-headers-{}", i); + s.parse().unwrap() + }).collect() +} + +fn very_long_custom_hdr(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("access-control-allow-access-control-allow-headers-{}", i); + s.parse().unwrap() + }).collect() +} + +fn custom_std(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("{}-{}", STD[i % STD.len()].as_str(), i); + s.parse().unwrap() + }).collect() +} + +const STD: &'static [HeaderName] = &[ + ACCEPT, + ACCEPT_CHARSET, + ACCEPT_ENCODING, + ACCEPT_LANGUAGE, + ACCEPT_RANGES, + ACCESS_CONTROL_ALLOW_CREDENTIALS, + ACCESS_CONTROL_ALLOW_HEADERS, + ACCESS_CONTROL_ALLOW_METHODS, + ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, + ACCESS_CONTROL_MAX_AGE, + ACCESS_CONTROL_REQUEST_HEADERS, + ACCESS_CONTROL_REQUEST_METHOD, + AGE, + ALLOW, + ALT_SVC, + AUTHORIZATION, + CACHE_CONTROL, + CONNECTION, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + CONTENT_LENGTH, + CONTENT_LOCATION, + CONTENT_RANGE, + CONTENT_SECURITY_POLICY, + CONTENT_SECURITY_POLICY_REPORT_ONLY, + CONTENT_TYPE, + COOKIE, + DNT, + DATE, + ETAG, + EXPECT, + EXPIRES, + FORWARDED, + FROM, + HOST, + IF_MATCH, + IF_MODIFIED_SINCE, + IF_NONE_MATCH, + IF_RANGE, + IF_UNMODIFIED_SINCE, + LAST_MODIFIED, + LINK, + LOCATION, + MAX_FORWARDS, + ORIGIN, + PRAGMA, + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + PUBLIC_KEY_PINS, + PUBLIC_KEY_PINS_REPORT_ONLY, + RANGE, + REFERER, + REFERRER_POLICY, + REFRESH, + RETRY_AFTER, + SERVER, + SET_COOKIE, + STRICT_TRANSPORT_SECURITY, + TE, + TRAILER, + TRANSFER_ENCODING, + USER_AGENT, + UPGRADE, + UPGRADE_INSECURE_REQUESTS, + VARY, + VIA, + WARNING, + WWW_AUTHENTICATE, + X_CONTENT_TYPE_OPTIONS, + X_DNS_PREFETCH_CONTROL, + X_FRAME_OPTIONS, + X_XSS_PROTECTION, +]; diff --git a/third_party/rust/http/benches/header_map/mod.rs b/third_party/rust/http/benches/header_map/mod.rs new file mode 100644 index 000000000000..50a97f7a420a --- /dev/null +++ b/third_party/rust/http/benches/header_map/mod.rs @@ -0,0 +1,10 @@ +#![feature(test)] + +extern crate http; +extern crate test; +extern crate indexmap; +extern crate seahash; +extern crate fnv; + +mod basic; +mod vec_map; diff --git a/third_party/rust/hyper/src/header/internals/vec_map.rs b/third_party/rust/http/benches/header_map/vec_map.rs similarity index 73% rename from third_party/rust/hyper/src/header/internals/vec_map.rs rename to third_party/rust/http/benches/header_map/vec_map.rs index 25ed2a6cd4e9..995e8062334a 100644 --- a/third_party/rust/hyper/src/header/internals/vec_map.rs +++ b/third_party/rust/http/benches/header_map/vec_map.rs @@ -1,15 +1,19 @@ +#![allow(dead_code)] + #[derive(Clone)] pub struct VecMap { vec: Vec<(K, V)>, } impl VecMap { - pub fn new() -> VecMap { + #[inline] + pub fn with_capacity(cap: usize) -> VecMap { VecMap { - vec: Vec::new() + vec: Vec::with_capacity(cap) } } + #[inline] pub fn insert(&mut self, key: K, value: V) { match self.find(&key) { Some(pos) => self.vec[pos] = (key, value), @@ -17,6 +21,7 @@ impl VecMap { } } + #[inline] pub fn entry(&mut self, key: K) -> Entry { match self.find(&key) { Some(pos) => Entry::Occupied(OccupiedEntry { @@ -30,30 +35,39 @@ impl VecMap { } } - pub fn get(&self, key: &K) -> Option<&V> { + #[inline] + pub fn get + ?Sized>(&self, key: &K2) -> Option<&V> { self.find(key).map(move |pos| &self.vec[pos].1) } - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + #[inline] + pub fn get_mut + ?Sized>(&mut self, key: &K2) -> Option<&mut V> { self.find(key).map(move |pos| &mut self.vec[pos].1) } - pub fn contains_key(&self, key: &K) -> bool { + #[inline] + pub fn contains_key + ?Sized>(&self, key: &K2) -> bool { self.find(key).is_some() } + #[inline] pub fn len(&self) -> usize { self.vec.len() } + + #[inline] pub fn iter(&self) -> ::std::slice::Iter<(K, V)> { self.vec.iter() } - pub fn remove(&mut self, key: &K) -> Option { + #[inline] + pub fn remove + ?Sized>(&mut self, key: &K2) -> Option { self.find(key).map(|pos| self.vec.remove(pos)).map(|(_, v)| v) } + #[inline] pub fn clear(&mut self) { self.vec.clear(); } - fn find(&self, key: &K) -> Option { + #[inline] + fn find + ?Sized>(&self, key: &K2) -> Option { self.vec.iter().position(|entry| key == &entry.0) } } diff --git a/third_party/rust/http/benches/header_value.rs b/third_party/rust/http/benches/header_value.rs new file mode 100644 index 000000000000..d9d2055c1508 --- /dev/null +++ b/third_party/rust/http/benches/header_value.rs @@ -0,0 +1,54 @@ +#![feature(test)] + +extern crate bytes; +extern crate http; +extern crate test; + +use bytes::Bytes; +use http::header::HeaderValue; +use test::Bencher; + +static SHORT: &'static [u8] = b"localhost"; +static LONG: &'static [u8] = b"Mozilla/5.0 (X11; CrOS x86_64 9592.71.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.80 Safari/537.36"; + + +#[bench] +fn from_shared_short(b: &mut Bencher) { + b.bytes = SHORT.len() as u64; + let bytes = Bytes::from_static(SHORT); + b.iter(|| { + HeaderValue::from_shared(bytes.clone()).unwrap(); + }); +} + +#[bench] +fn from_shared_long(b: &mut Bencher) { + b.bytes = LONG.len() as u64; + let bytes = Bytes::from_static(LONG); + b.iter(|| { + HeaderValue::from_shared(bytes.clone()).unwrap(); + }); +} + + +#[bench] +fn from_shared_unchecked_short(b: &mut Bencher) { + b.bytes = SHORT.len() as u64; + let bytes = Bytes::from_static(SHORT); + b.iter(|| { + unsafe { + HeaderValue::from_shared_unchecked(bytes.clone()); + } + }); +} + +#[bench] +fn from_shared_unchecked_long(b: &mut Bencher) { + b.bytes = LONG.len() as u64; + let bytes = Bytes::from_static(LONG); + b.iter(|| { + unsafe { + HeaderValue::from_shared_unchecked(bytes.clone()); + } + }); +} diff --git a/third_party/rust/http/src/byte_str.rs b/third_party/rust/http/src/byte_str.rs new file mode 100644 index 000000000000..828f5e1c58ad --- /dev/null +++ b/third_party/rust/http/src/byte_str.rs @@ -0,0 +1,55 @@ +use bytes::Bytes; + +use std::{ops, str}; + +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct ByteStr { + bytes: Bytes, +} + +impl ByteStr { + #[inline] + pub fn new() -> ByteStr { + ByteStr { bytes: Bytes::new() } + } + + #[inline] + pub fn from_static(val: &'static str) -> ByteStr { + ByteStr { bytes: Bytes::from_static(val.as_bytes()) } + } + + #[inline] + pub unsafe fn from_utf8_unchecked(bytes: Bytes) -> ByteStr { + ByteStr { bytes: bytes } + } +} + +impl ops::Deref for ByteStr { + type Target = str; + + #[inline] + fn deref(&self) -> &str { + let b: &[u8] = self.bytes.as_ref(); + unsafe { str::from_utf8_unchecked(b) } + } +} + +impl From for ByteStr { + #[inline] + fn from(src: String) -> ByteStr { + ByteStr { bytes: Bytes::from(src) } + } +} + +impl<'a> From<&'a str> for ByteStr { + #[inline] + fn from(src: &'a str) -> ByteStr { + ByteStr { bytes: Bytes::from(src) } + } +} + +impl From for Bytes { + fn from(src: ByteStr) -> Self { + src.bytes + } +} diff --git a/third_party/rust/http/src/convert.rs b/third_party/rust/http/src/convert.rs new file mode 100644 index 000000000000..2f901e6fdfe5 --- /dev/null +++ b/third_party/rust/http/src/convert.rs @@ -0,0 +1,45 @@ +use Error; +use header::{HeaderName, HeaderValue}; +use method::Method; +use sealed::Sealed; +use status::StatusCode; +use uri::Uri; + +/// Private trait for the `http` crate to have generic methods with fallible +/// conversions. +/// +/// This trait is similar to the `TryFrom` trait proposed in the standard +/// library, except this is specialized for the `http` crate and isn't intended +/// for general consumption. +/// +/// This trait cannot be implemented types outside of the `http` crate, and is +/// only intended for use as a generic bound on methods in the `http` crate. +pub trait HttpTryFrom: Sized + Sealed { + /// Associated error with the conversion this implementation represents. + type Error: Into; + + #[doc(hidden)] + fn try_from(t: T) -> Result; +} + +macro_rules! reflexive { + ($($t:ty,)*) => ($( + impl HttpTryFrom<$t> for $t { + type Error = Error; + + fn try_from(t: Self) -> Result { + Ok(t) + } + } + + impl Sealed for $t {} + )*) +} + +reflexive! { + Uri, + Method, + StatusCode, + HeaderName, + HeaderValue, +} diff --git a/third_party/rust/http/src/error.rs b/third_party/rust/http/src/error.rs new file mode 100644 index 000000000000..8834059fcb37 --- /dev/null +++ b/third_party/rust/http/src/error.rs @@ -0,0 +1,144 @@ +use std::error; +use std::fmt; +use std::result; + +use header; +use method; +use status; +use uri; + +/// A generic "error" for HTTP connections +/// +/// This error type is less specific than the error returned from other +/// functions in this crate, but all other errors can be converted to this +/// error. Consumers of this crate can typically consume and work with this form +/// of error for conversions with the `?` operator. +#[derive(Debug)] +pub struct Error { + inner: ErrorKind, +} + +/// A `Result` typedef to use with the `http::Error` type +pub type Result = result::Result; + +#[derive(Debug)] +enum ErrorKind { + StatusCode(status::InvalidStatusCode), + Method(method::InvalidMethod), + Uri(uri::InvalidUri), + UriShared(uri::InvalidUriBytes), + UriParts(uri::InvalidUriParts), + HeaderName(header::InvalidHeaderName), + HeaderNameShared(header::InvalidHeaderNameBytes), + HeaderValue(header::InvalidHeaderValue), + HeaderValueShared(header::InvalidHeaderValueBytes), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + error::Error::description(self).fmt(f) + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + use self::ErrorKind::*; + + match self.inner { + StatusCode(ref e) => e.description(), + Method(ref e) => e.description(), + Uri(ref e) => e.description(), + UriShared(ref e) => e.description(), + UriParts(ref e) => e.description(), + HeaderName(ref e) => e.description(), + HeaderNameShared(ref e) => e.description(), + HeaderValue(ref e) => e.description(), + HeaderValueShared(ref e) => e.description(), + } + } +} + +impl From for Error { + fn from(err: status::InvalidStatusCode) -> Error { + Error { inner: ErrorKind::StatusCode(err) } + } +} + +impl From for Error { + fn from(err: method::InvalidMethod) -> Error { + Error { inner: ErrorKind::Method(err) } + } +} + +impl From for Error { + fn from(err: uri::InvalidUri) -> Error { + Error { inner: ErrorKind::Uri(err) } + } +} + +impl From for Error { + fn from(err: uri::InvalidUriBytes) -> Error { + Error { inner: ErrorKind::UriShared(err) } + } +} + +impl From for Error { + fn from(err: uri::InvalidUriParts) -> Error { + Error { inner: ErrorKind::UriParts(err) } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderName) -> Error { + Error { inner: ErrorKind::HeaderName(err) } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderNameBytes) -> Error { + Error { inner: ErrorKind::HeaderNameShared(err) } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderValue) -> Error { + Error { inner: ErrorKind::HeaderValue(err) } + } +} + +impl From for Error { + fn from(err: header::InvalidHeaderValueBytes) -> Error { + Error { inner: ErrorKind::HeaderValueShared(err) } + } +} + +// A crate-private type until we can use !. +// +// Being crate-private, we should be able to swap the type out in a +// backwards compatible way. +pub enum Never {} + +impl From for Error { + fn from(never: Never) -> Error { + match never {} + } +} + +impl fmt::Debug for Never { + fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } +} + +impl fmt::Display for Never { + fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } +} + +impl error::Error for Never { + fn description(&self) -> &str { + match *self {} + } +} + diff --git a/third_party/rust/http/src/extensions.rs b/third_party/rust/http/src/extensions.rs new file mode 100644 index 000000000000..6cbf1e3ae2f5 --- /dev/null +++ b/third_party/rust/http/src/extensions.rs @@ -0,0 +1,195 @@ +use std::any::{Any, TypeId}; +use std::collections::HashMap; +use std::hash::{BuildHasherDefault, Hasher}; +use std::fmt; + +type AnyMap = HashMap, BuildHasherDefault>; + +// With TypeIds as keys, there's no need to hash them. They are already hashes +// themselves, coming from the compiler. The IdHasher just holds the u64 of +// the TypeId, and then returns it, instead of doing any bit fiddling. +#[derive(Default)] +struct IdHasher(u64); + +impl Hasher for IdHasher { + fn write(&mut self, _: &[u8]) { + unreachable!("TypeId calls write_u64"); + } + + #[inline] + fn write_u64(&mut self, id: u64) { + self.0 = id; + } + + #[inline] + fn finish(&self) -> u64 { + self.0 + } +} + + + +/// A type map of protocol extensions. +/// +/// `Extensions` can be used by `Request` and `Response` to store +/// extra data derived from the underlying protocol. +#[derive(Default)] +pub struct Extensions { + // If extensions are never used, no need to carry around an empty HashMap. + // That's 3 words. Instead, this is only 1 word. + map: Option>, +} + +impl Extensions { + /// Create an empty `Extensions`. + #[inline] + pub fn new() -> Extensions { + Extensions { + map: None, + } + } + + /// Insert a type into this `Extensions`. + /// + /// If a extension of this type already existed, it will + /// be returned. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.insert(5i32).is_none()); + /// assert!(ext.insert(4u8).is_none()); + /// assert_eq!(ext.insert(9i32), Some(5i32)); + /// ``` + pub fn insert(&mut self, val: T) -> Option { + self + .map + .get_or_insert_with(|| Box::new(HashMap::default())) + .insert(TypeId::of::(), Box::new(val)) + .and_then(|boxed| { + //TODO: we can use unsafe and remove double checking the type id + (boxed as Box) + .downcast() + .ok() + .map(|boxed| *boxed) + }) + } + + /// Get a reference to a type previously inserted on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.get::().is_none()); + /// ext.insert(5i32); + /// + /// assert_eq!(ext.get::(), Some(&5i32)); + /// ``` + pub fn get(&self) -> Option<&T> { + self + .map + .as_ref() + .and_then(|map| map.get(&TypeId::of::())) + //TODO: we can use unsafe and remove double checking the type id + .and_then(|boxed| (&**boxed as &(Any + 'static)).downcast_ref()) + } + + /// Get a mutable reference to a type previously inserted on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(String::from("Hello")); + /// ext.get_mut::().unwrap().push_str(" World"); + /// + /// assert_eq!(ext.get::().unwrap(), "Hello World"); + /// ``` + pub fn get_mut(&mut self) -> Option<&mut T> { + self + .map + .as_mut() + .and_then(|map| map.get_mut(&TypeId::of::())) + //TODO: we can use unsafe and remove double checking the type id + .and_then(|boxed| (&mut **boxed as &mut (Any + 'static)).downcast_mut()) + } + + + /// Remove a type from this `Extensions`. + /// + /// If a extension of this type existed, it will be returned. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(5i32); + /// assert_eq!(ext.remove::(), Some(5i32)); + /// assert!(ext.get::().is_none()); + /// ``` + pub fn remove(&mut self) -> Option { + self + .map + .as_mut() + .and_then(|map| map.remove(&TypeId::of::())) + .and_then(|boxed| { + //TODO: we can use unsafe and remove double checking the type id + (boxed as Box) + .downcast() + .ok() + .map(|boxed| *boxed) + }) + } + + /// Clear the `Extensions` of all inserted extensions. + /// + /// # Example + /// + /// ``` + /// # use http::Extensions; + /// let mut ext = Extensions::new(); + /// ext.insert(5i32); + /// ext.clear(); + /// + /// assert!(ext.get::().is_none()); + /// ``` + #[inline] + pub fn clear(&mut self) { + if let Some(ref mut map) = self.map { + map.clear(); + } + } +} + +impl fmt::Debug for Extensions { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Extensions") + .finish() + } +} + +#[test] +fn test_extensions() { + #[derive(Debug, PartialEq)] + struct MyType(i32); + + let mut extensions = Extensions::new(); + + extensions.insert(5i32); + extensions.insert(MyType(10)); + + assert_eq!(extensions.get(), Some(&5i32)); + assert_eq!(extensions.get_mut(), Some(&mut 5i32)); + + assert_eq!(extensions.remove::(), Some(5i32)); + assert!(extensions.get::().is_none()); + + assert_eq!(extensions.get::(), None); + assert_eq!(extensions.get(), Some(&MyType(10))); +} diff --git a/third_party/rust/http/src/header/map.rs b/third_party/rust/http/src/header/map.rs new file mode 100644 index 000000000000..2344c659bd26 --- /dev/null +++ b/third_party/rust/http/src/header/map.rs @@ -0,0 +1,3282 @@ +use super::HeaderValue; +use super::name::{HeaderName, HdrName, InvalidHeaderName}; + +use std::{fmt, mem, ops, ptr, vec}; +use std::collections::hash_map::RandomState; +use std::hash::{BuildHasher, Hasher, Hash}; +use std::iter::FromIterator; +use std::marker::PhantomData; + +pub use self::as_header_name::AsHeaderName; +pub use self::into_header_name::IntoHeaderName; + +/// A set of HTTP headers +/// +/// `HeaderMap` is an multimap of `HeaderName` to values. +/// +/// # Examples +/// +/// Basic usage +/// +/// ``` +/// # use http::HeaderMap; +/// # use http::header::{CONTENT_LENGTH, HOST, LOCATION}; +/// let mut headers = HeaderMap::new(); +/// +/// headers.insert(HOST, "example.com".parse().unwrap()); +/// headers.insert(CONTENT_LENGTH, "123".parse().unwrap()); +/// +/// assert!(headers.contains_key(HOST)); +/// assert!(!headers.contains_key(LOCATION)); +/// +/// assert_eq!(headers[HOST], "example.com"); +/// +/// headers.remove(HOST); +/// +/// assert!(!headers.contains_key(HOST)); +/// ``` +#[derive(Clone)] +pub struct HeaderMap { + // Used to mask values to get an index + mask: Size, + indices: Box<[Pos]>, + entries: Vec>, + extra_values: Vec>, + danger: Danger, +} + +// # Implementation notes +// +// Below, you will find a fairly large amount of code. Most of this is to +// provide the necessary functions to efficiently manipulate the header +// multimap. The core hashing table is based on robin hood hashing [1]. While +// this is the same hashing algorithm used as part of Rust's `HashMap` in +// stdlib, many implementation details are different. The two primary reasons +// for this divergence are that `HeaderMap` is a multimap and the structure has +// been optimized to take advantage of the characteristics of HTTP headers. +// +// ## Structure Layout +// +// Most of the data contained by `HeaderMap` is *not* stored in the hash table. +// Instead, pairs of header name and *first* associated header value are stored +// in the `entries` vector. If the header name has more than one associated +// header value, then additional values are stored in `extra_values`. The actual +// hash table (`indices`) only maps hash codes to indices in `entries`. This +// means that, when an eviction happens, the actual header name and value stay +// put and only a tiny amount of memory has to be copied. +// +// Extra values associated with a header name are tracked using a linked list. +// Links are formed with offsets into `extra_values` and not pointers. +// +// [1]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing + +/// `HeaderMap` entry iterator. +/// +/// Yields `(&HeaderName, &value)` tuples. The same header name may be yielded +/// more than once if it has more than one associated value. +#[derive(Debug)] +pub struct Iter<'a, T: 'a> { + inner: IterMut<'a, T>, +} + +/// `HeaderMap` mutable entry iterator +/// +/// Yields `(&HeaderName, &mut value)` tuples. The same header name may be +/// yielded more than once if it has more than one associated value. +#[derive(Debug)] +pub struct IterMut<'a, T: 'a> { + map: *mut HeaderMap, + entry: usize, + cursor: Option, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// An owning iterator over the entries of a `HeaderMap`. +/// +/// This struct is created by the `into_iter` method on `HeaderMap`. +#[derive(Debug)] +pub struct IntoIter { + // If None, pull from `entries` + next: Option, + entries: vec::IntoIter>, + extra_values: Vec>, +} + +/// An iterator over `HeaderMap` keys. +/// +/// Each header name is yielded only once, even if it has more than one +/// associated value. +#[derive(Debug)] +pub struct Keys<'a, T: 'a> { + inner: ::std::slice::Iter<'a, Bucket>, +} + +/// `HeaderMap` value iterator. +/// +/// Each value contained in the `HeaderMap` will be yielded. +#[derive(Debug)] +pub struct Values<'a, T: 'a> { + inner: Iter<'a, T>, +} + +/// `HeaderMap` mutable value iterator +#[derive(Debug)] +pub struct ValuesMut<'a, T: 'a> { + inner: IterMut<'a, T>, +} + +/// A drain iterator for `HeaderMap`. +#[derive(Debug)] +pub struct Drain<'a, T: 'a> { + idx: usize, + map: *mut HeaderMap, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// A view to all values stored in a single entry. +/// +/// This struct is returned by `HeaderMap::get_all`. +#[derive(Debug)] +pub struct GetAll<'a, T: 'a> { + map: &'a HeaderMap, + index: Option, +} + +/// A view into a single location in a `HeaderMap`, which may be vacant or occupied. +#[derive(Debug)] +pub enum Entry<'a, T: 'a> { + /// An occupied entry + Occupied(OccupiedEntry<'a, T>), + + /// A vacant entry + Vacant(VacantEntry<'a, T>), +} + +/// A view into a single empty location in a `HeaderMap`. +/// +/// This struct is returned as part of the `Entry` enum. +#[derive(Debug)] +pub struct VacantEntry<'a, T: 'a> { + map: &'a mut HeaderMap, + key: HeaderName, + hash: HashValue, + probe: usize, + danger: bool, +} + +/// A view into a single occupied location in a `HeaderMap`. +/// +/// This struct is returned as part of the `Entry` enum. +#[derive(Debug)] +pub struct OccupiedEntry<'a, T: 'a> { + map: &'a mut HeaderMap, + probe: usize, + index: usize, +} + +/// An iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueIter<'a, T: 'a> { + map: &'a HeaderMap, + index: usize, + front: Option, + back: Option, +} + +/// A mutable iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueIterMut<'a, T: 'a> { + map: *mut HeaderMap, + index: usize, + front: Option, + back: Option, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// An drain iterator of all values associated with a single header name. +#[derive(Debug)] +pub struct ValueDrain<'a, T: 'a> { + map: *mut HeaderMap, + first: Option, + next: Option, + lt: PhantomData<&'a mut HeaderMap>, +} + +/// Tracks the value iterator state +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum Cursor { + Head, + Values(usize), +} + +/// Type used for representing the size of a HeaderMap value. +/// +/// 32,768 is more than enough entries for a single header map. Setting this +/// limit enables using `u16` to represent all offsets, which takes 2 bytes +/// instead of 8 on 64 bit processors. +/// +/// Setting this limit is especially benificial for `indices`, making it more +/// cache friendly. More hash codes can fit in a cache line. +/// +/// You may notice that `u16` may represent more than 32,768 values. This is +/// true, but 32,768 should be plenty and it allows us to reserve the top bit +/// for future usage. +type Size = usize; + +/// This limit falls out from above. +const MAX_SIZE: usize = (1 << 15); + +/// An entry in the hash table. This represents the full hash code for an entry +/// as well as the position of the entry in the `entries` vector. +#[derive(Copy, Clone)] +struct Pos { + // Index in the `entries` vec + index: Size, + // Full hash value for the entry. + hash: HashValue, +} + +/// Hash values are limited to u16 as well. While `fast_hash` and `Hasher` +/// return `usize` hash codes, limiting the effective hash code to the lower 16 +/// bits is fine since we know that the `indices` vector will never grow beyond +/// that size. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +struct HashValue(usize); + +/// Stores the data associated with a `HeaderMap` entry. Only the first value is +/// included in this struct. If a header name has more than one associated +/// value, all extra values are stored in the `extra_values` vector. A doubly +/// linked list of entries is maintained. The doubly linked list is used so that +/// removing a value is constant time. This also has the nice property of +/// enabling double ended iteration. +#[derive(Debug, Clone)] +struct Bucket { + hash: HashValue, + key: HeaderName, + value: T, + links: Option, +} + +/// The head and tail of the value linked list. +#[derive(Debug, Copy, Clone)] +struct Links { + next: usize, + tail: usize, +} + +/// Node in doubly-linked list of header value entries +#[derive(Debug, Clone)] +struct ExtraValue { + value: T, + prev: Link, + next: Link, +} + +/// A header value node is either linked to another node in the `extra_values` +/// list or it points to an entry in `entries`. The entry in `entries` is the +/// start of the list and holds the associated header name. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum Link { + Entry(usize), + Extra(usize), +} + +/// Tracks the header map danger level! This relates to the adaptive hashing +/// algorithm. A HeaderMap starts in the "green" state, when a large number of +/// collisions are detected, it transitions to the yellow state. At this point, +/// the header map will either grow and switch back to the green state OR it +/// will transition to the red state. +/// +/// When in the red state, a safe hashing algorithm is used and all values in +/// the header map have to be rehashed. +#[derive(Clone)] +enum Danger { + Green, + Yellow, + Red(RandomState), +} + +// Constants related to detecting DOS attacks. +// +// Displacement is the number of entries that get shifted when inserting a new +// value. Forward shift is how far the entry gets stored from the ideal +// position. +// +// The current constant values were picked from another implementation. It could +// be that there are different values better suited to the header map case. +const DISPLACEMENT_THRESHOLD: usize = 128; +const FORWARD_SHIFT_THRESHOLD: usize = 512; + +// The default strategy for handling the yellow danger state is to increase the +// header map capacity in order to (hopefully) reduce the number of collisions. +// If growing the hash map would cause the load factor to drop bellow this +// threshold, then instead of growing, the headermap is switched to the red +// danger state and safe hashing is used instead. +const LOAD_FACTOR_THRESHOLD: f32 = 0.2; + +// Macro used to iterate the hash table starting at a given point, looping when +// the end is hit. +macro_rules! probe_loop { + ($label:tt: $probe_var: ident < $len: expr, $body: expr) => { + debug_assert!($len > 0); + $label: + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + }; + ($probe_var: ident < $len: expr, $body: expr) => { + debug_assert!($len > 0); + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + }; +} + +// First part of the robinhood algorithm. Given a key, find the slot in which it +// will be inserted. This is done by starting at the "ideal" spot. Then scanning +// until the destination slot is found. A destination slot is either the next +// empty slot or the next slot that is occupied by an entry that has a lower +// displacement (displacement is the distance from the ideal spot). +// +// This is implemented as a macro instead of a function that takes a closure in +// order to guarantee that it is "inlined". There is no way to annotate closures +// to guarantee inlining. +macro_rules! insert_phase_one { + ($map:ident, + $key:expr, + $probe:ident, + $pos:ident, + $hash:ident, + $danger:ident, + $vacant:expr, + $occupied:expr, + $robinhood:expr) => + {{ + let $hash = hash_elem_using(&$map.danger, &$key); + let mut $probe = desired_pos($map.mask, $hash); + let mut dist = 0; + let ret; + + // Start at the ideal position, checking all slots + probe_loop!('probe: $probe < $map.indices.len(), { + if let Some(($pos, entry_hash)) = $map.indices[$probe].resolve() { + // The slot is already occupied, but check if it has a lower + // displacement. + let their_dist = probe_distance($map.mask, entry_hash, $probe); + + if their_dist < dist { + // The new key's distance is larger, so claim this spot and + // displace the current entry. + // + // Check if this insertion is above the danger threshold. + let $danger = + dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); + + ret = $robinhood; + break 'probe; + } else if entry_hash == $hash && $map.entries[$pos].key == $key { + // There already is an entry with the same key. + ret = $occupied; + break 'probe; + } + } else { + // The entry is vacant, use it for this key. + let $danger = + dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); + + ret = $vacant; + break 'probe; + } + + dist += 1; + }); + + ret + }} +} + +// ===== impl HeaderMap ===== + +impl HeaderMap { + /// Create an empty `HeaderMap`. + /// + /// The map will be created without any capacity. This function will not + /// allocate. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let map = HeaderMap::new(); + /// + /// assert!(map.is_empty()); + /// assert_eq!(0, map.capacity()); + /// ``` + pub fn new() -> Self { + HeaderMap::with_capacity(0) + } +} + +impl HeaderMap { + /// Create an empty `HeaderMap` with the specified capacity. + /// + /// The returned map will allocate internal storage in order to hold about + /// `capacity` elements without reallocating. However, this is a "best + /// effort" as there are usage patterns that could cause additional + /// allocations before `capacity` headers are stored in the map. + /// + /// More capacity than requested may be allocated. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let map: HeaderMap = HeaderMap::with_capacity(10); + /// + /// assert!(map.is_empty()); + /// assert_eq!(12, map.capacity()); + /// ``` + pub fn with_capacity(capacity: usize) -> HeaderMap { + assert!(capacity <= MAX_SIZE, "requested capacity too large"); + + if capacity == 0 { + HeaderMap { + mask: 0, + indices: Box::new([]), // as a ZST, this doesn't actually allocate anything + entries: Vec::new(), + extra_values: Vec::new(), + danger: Danger::Green, + } + } else { + let raw_cap = to_raw_capacity(capacity).next_power_of_two(); + debug_assert!(raw_cap > 0); + + HeaderMap { + mask: (raw_cap - 1) as Size, + indices: vec![Pos::none(); raw_cap].into_boxed_slice(), + entries: Vec::with_capacity(raw_cap), + extra_values: Vec::new(), + danger: Danger::Green, + } + } + } + + /// Returns the number of headers stored in the map. + /// + /// This number represents the total number of **values** stored in the map. + /// This number can be greater than or equal to the number of **keys** + /// stored given that a single key may have more than one associated value. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{ACCEPT, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.len()); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "localhost".parse().unwrap()); + /// + /// assert_eq!(2, map.len()); + /// + /// map.append(ACCEPT, "text/html".parse().unwrap()); + /// + /// assert_eq!(3, map.len()); + /// ``` + pub fn len(&self) -> usize { + self.entries.len() + self.extra_values.len() + } + + /// Returns the number of keys stored in the map. + /// + /// This number will be less than or equal to `len()` as each key may have + /// more than one associated value. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{ACCEPT, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.keys_len()); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "localhost".parse().unwrap()); + /// + /// assert_eq!(2, map.keys_len()); + /// + /// map.insert(ACCEPT, "text/html".parse().unwrap()); + /// + /// assert_eq!(2, map.keys_len()); + /// ``` + pub fn keys_len(&self) -> usize { + self.entries.len() + } + + /// Returns true if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// assert!(!map.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.entries.len() == 0 + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// map.clear(); + /// assert!(map.is_empty()); + /// assert!(map.capacity() > 0); + /// ``` + pub fn clear(&mut self) { + self.entries.clear(); + self.extra_values.clear(); + self.danger = Danger::Green; + + for e in self.indices.iter_mut() { + *e = Pos::none(); + } + } + + /// Returns the number of headers the map can hold without reallocating. + /// + /// This number is an approximation as certain usage patterns could cause + /// additional allocations before the returned capacity is filled. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(0, map.capacity()); + /// + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// assert_eq!(6, map.capacity()); + /// ``` + pub fn capacity(&self) -> usize { + usable_capacity(self.indices.len()) + } + + /// Reserves capacity for at least `additional` more headers to be inserted + /// into the `HeaderMap`. + /// + /// The header map may reserve more space to avoid frequent reallocations. + /// Like with `with_capacity`, this will be a "best effort" to avoid + /// allocations until `additional` more headers are inserted. Certain usage + /// patterns could cause additional allocations before the number is + /// reached. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.reserve(10); + /// # map.insert(HOST, "bar".parse().unwrap()); + /// ``` + pub fn reserve(&mut self, additional: usize) { + // TODO: This can't overflow if done properly... since the max # of + // elements is u16::MAX. + let cap = self.entries.len() + .checked_add(additional) + .expect("reserve overflow"); + + if cap > self.indices.len() { + let cap = cap.next_power_of_two(); + + if self.entries.len() == 0 { + self.mask = cap - 1; + self.indices = vec![Pos::none(); cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(cap)); + } else { + self.grow(cap); + } + } + } + + /// Returns a reference to the value associated with the key. + /// + /// If there are multiple values associated with the key, then the first one + /// is returned. Use `get_all` to get all values associated with a given + /// key. Returns `None` if there are no values associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.get("host").is_none()); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// assert_eq!(map.get(HOST).unwrap(), &"hello"); + /// assert_eq!(map.get("host").unwrap(), &"hello"); + /// + /// map.append(HOST, "world".parse().unwrap()); + /// assert_eq!(map.get("host").unwrap(), &"hello"); + /// ``` + pub fn get(&self, key: K) -> Option<&T> + where K: AsHeaderName + { + match key.find(self) { + Some((_, found)) => { + let entry = &self.entries[found]; + Some(&entry.value) + } + None => None, + } + } + + /// Returns a mutable reference to the value associated with the key. + /// + /// If there are multiple values associated with the key, then the first one + /// is returned. Use `entry` to get all values associated with a given + /// key. Returns `None` if there are no values associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello".to_string()); + /// map.get_mut("host").unwrap().push_str("-world"); + /// + /// assert_eq!(map.get(HOST).unwrap(), &"hello-world"); + /// ``` + pub fn get_mut(&mut self, key: K) -> Option<&mut T> + where K: AsHeaderName + { + match key.find(self) { + Some((_, found)) => { + let entry = &mut self.entries[found]; + Some(&mut entry.value) + } + None => None, + } + } + + /// Returns a view of all values associated with a key. + /// + /// The returned view does not incur any allocations and allows iterating + /// the values associated with the key. See [`GetAll`] for more details. + /// Returns `None` if there are no values associated with the key. + /// + /// [`GetAll`]: struct.GetAll.html + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// + /// let view = map.get_all("host"); + /// + /// let mut iter = view.iter(); + /// assert_eq!(&"hello", iter.next().unwrap()); + /// assert_eq!(&"goodbye", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// ``` + pub fn get_all(&self, key: K) -> GetAll + where K: AsHeaderName + { + GetAll { + map: self, + index: key.find(self).map(|(_, i)| i), + } + } + + /// Returns true if the map contains a value for the specified key. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(!map.contains_key(HOST)); + /// + /// map.insert(HOST, "world".parse().unwrap()); + /// assert!(map.contains_key("host")); + /// ``` + pub fn contains_key(&self, key: K) -> bool + where K: AsHeaderName + { + key.find(self).is_some() + } + + /// An iterator visiting all key-value pairs. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. Each key will be yielded once per associated + /// value. So, if a key has 3 associated values, it will be yielded 3 times. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for (key, value) in map.iter() { + /// println!("{:?}: {:?}", key, value); + /// } + /// ``` + pub fn iter(&self) -> Iter { + Iter { + inner: IterMut { + map: self as *const _ as *mut _, + entry: 0, + cursor: self.entries.first().map(|_| Cursor::Head), + lt: PhantomData, + } + } + } + + /// An iterator visiting all key-value pairs, with mutable value references. + /// + /// The iterator order is arbitrary, but consistent across platforms for the + /// same crate version. Each key will be yielded once per associated value, + /// so if a key has 3 associated values, it will be yielded 3 times. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::default(); + /// + /// map.insert(HOST, "hello".to_string()); + /// map.append(HOST, "goodbye".to_string()); + /// map.insert(CONTENT_LENGTH, "123".to_string()); + /// + /// for (key, value) in map.iter_mut() { + /// value.push_str("-boop"); + /// } + /// ``` + pub fn iter_mut(&mut self) -> IterMut { + IterMut { + map: self as *mut _, + entry: 0, + cursor: self.entries.first().map(|_| Cursor::Head), + lt: PhantomData, + } + } + + /// An iterator visiting all keys. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. Each key will be yielded only once even if it + /// has multiple associated values. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for key in map.keys() { + /// println!("{:?}", key); + /// } + /// ``` + pub fn keys(&self) -> Keys { + Keys { inner: self.entries.iter() } + } + + /// An iterator visiting all values. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// for value in map.values() { + /// println!("{:?}", value); + /// } + /// ``` + pub fn values(&self) -> Values { + Values { inner: self.iter() } + } + + /// An iterator visiting all values mutably. + /// + /// The iteration order is arbitrary, but consistent across platforms for + /// the same crate version. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::default(); + /// + /// map.insert(HOST, "hello".to_string()); + /// map.append(HOST, "goodbye".to_string()); + /// map.insert(CONTENT_LENGTH, "123".to_string()); + /// + /// for value in map.values_mut() { + /// value.push_str("-boop"); + /// } + /// ``` + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut { inner: self.iter_mut() } + } + + /// Clears the map, returning all entries as an iterator. + /// + /// The internal memory is kept for reuse. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::{CONTENT_LENGTH, HOST}; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(HOST, "hello".parse().unwrap()); + /// map.append(HOST, "goodbye".parse().unwrap()); + /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); + /// + /// let mut drain = map.drain(); + /// + /// let (key, mut vals) = drain.next().unwrap(); + /// + /// assert_eq!("host", key); + /// assert_eq!("hello", vals.next().unwrap()); + /// assert_eq!("goodbye", vals.next().unwrap()); + /// assert!(vals.next().is_none()); + /// + /// let (key, mut vals) = drain.next().unwrap(); + /// + /// assert_eq!("content-length", key); + /// assert_eq!("123", vals.next().unwrap()); + /// assert!(vals.next().is_none()); + /// ``` + pub fn drain(&mut self) -> Drain { + for i in self.indices.iter_mut() { + *i = Pos::none(); + } + + Drain { + idx: 0, + map: self as *mut _, + lt: PhantomData, + } + } + + fn value_iter(&self, idx: Option) -> ValueIter { + use self::Cursor::*; + + if let Some(idx) = idx { + let back = { + let entry = &self.entries[idx]; + + entry.links + .map(|l| Values(l.tail)) + .unwrap_or(Head) + }; + + ValueIter { + map: self, + index: idx, + front: Some(Head), + back: Some(back), + } + } else { + ValueIter { + map: self, + index: ::std::usize::MAX, + front: None, + back: None, + } + } + } + + fn value_iter_mut(&mut self, idx: usize) -> ValueIterMut { + use self::Cursor::*; + + let back = { + let entry = &self.entries[idx]; + + entry.links + .map(|l| Values(l.tail)) + .unwrap_or(Head) + }; + + ValueIterMut { + map: self as *mut _, + index: idx, + front: Some(Head), + back: Some(back), + lt: PhantomData, + } + } + + /// Gets the given key's corresponding entry in the map for in-place + /// manipulation. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map: HeaderMap = HeaderMap::default(); + /// + /// let headers = &[ + /// "content-length", + /// "x-hello", + /// "Content-Length", + /// "x-world", + /// ]; + /// + /// for &header in headers { + /// let counter = map.entry(header).unwrap().or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(map["content-length"], 2); + /// assert_eq!(map["x-hello"], 1); + /// ``` + pub fn entry(&mut self, key: K) -> Result, InvalidHeaderName> + where K: AsHeaderName, + { + key.entry(self) + } + + fn entry2(&mut self, key: K) -> Entry + where K: Hash + Into, + HeaderName: PartialEq, + { + // Ensure that there is space in the map + self.reserve_one(); + + insert_phase_one!( + self, + key, + probe, + pos, + hash, + danger, + Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key.into(), + probe: probe, + danger: danger, + }), + Entry::Occupied(OccupiedEntry { + map: self, + index: pos, + probe: probe, + }), + Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key.into(), + probe: probe, + danger: danger, + })) + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `None` is + /// returned. + /// + /// If the map did have this key present, the new value is associated with + /// the key and all previous values are removed. **Note** that only a single + /// one of the previous values is returned. If there are multiple values + /// that have been previously associated with the key, then the first one is + /// returned. See `insert_mult` on `OccupiedEntry` for an API that returns + /// all values. + /// + /// The key is not updated, though; this matters for types that can be `==` + /// without being identical. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); + /// assert!(!map.is_empty()); + /// + /// let mut prev = map.insert(HOST, "earth".parse().unwrap()).unwrap(); + /// assert_eq!("world", prev); + /// ``` + pub fn insert(&mut self, key: K, val: T) -> Option + where K: IntoHeaderName, + { + key.insert(self, val) + } + + #[inline] + fn insert2(&mut self, key: K, value: T) -> Option + where K: Hash + Into, + HeaderName: PartialEq, + { + self.reserve_one(); + + insert_phase_one!( + self, key, probe, pos, hash, danger, + // Vacant + { + drop(danger); // Make lint happy + let index = self.entries.len(); + self.insert_entry(hash, key.into(), value); + self.indices[probe] = Pos::new(index, hash); + None + }, + // Occupied + Some(self.insert_occupied(pos, value)), + // Robinhood + { + self.insert_phase_two( + key.into(), + value, + hash, + probe, + danger); + None + }) + } + + /// Set an occupied bucket to the given value + #[inline] + fn insert_occupied(&mut self, index: usize, value: T) -> T { + if let Some(links) = self.entries[index].links { + self.remove_all_extra_values(links.next); + } + + let entry = &mut self.entries[index]; + mem::replace(&mut entry.value, value) + } + + fn insert_occupied_mult(&mut self, index: usize, value: T) -> ValueDrain { + let old; + let links; + + { + let entry = &mut self.entries[index]; + + old = mem::replace(&mut entry.value, value); + links = entry.links.take(); + } + + ValueDrain { + map: self as *mut _, + first: Some(old), + next: links.map(|l| l.next), + lt: PhantomData, + } + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not previously have this key present, then `false` is + /// returned. + /// + /// If the map did have this key present, the new value is pushed to the end + /// of the list of values currently associated with the key. The key is not + /// updated, though; this matters for types that can be `==` without being + /// identical. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); + /// assert!(!map.is_empty()); + /// + /// map.append(HOST, "earth".parse().unwrap()); + /// + /// let values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!("world", *i.next().unwrap()); + /// assert_eq!("earth", *i.next().unwrap()); + /// ``` + pub fn append(&mut self, key: K, value: T) -> bool + where K: IntoHeaderName, + { + key.append(self, value) + } + + #[inline] + fn append2(&mut self, key: K, value: T) -> bool + where K: Hash + Into, + HeaderName: PartialEq, + { + self.reserve_one(); + + insert_phase_one!( + self, key, probe, pos, hash, danger, + // Vacant + { + drop(danger); + let index = self.entries.len(); + self.insert_entry(hash, key.into(), value); + self.indices[probe] = Pos::new(index, hash); + false + }, + // Occupied + { + append_value(pos, &mut self.entries[pos], &mut self.extra_values, value); + true + }, + // Robinhood + { + self.insert_phase_two( + key.into(), + value, + hash, + probe, + danger); + + false + }) + } + + #[inline] + fn find(&self, key: &K) -> Option<(usize, usize)> + where K: Hash + Into, + HeaderName: PartialEq, + { + if self.entries.is_empty() { + return None; + } + + let hash = hash_elem_using(&self.danger, key); + let mask = self.mask; + let mut probe = desired_pos(mask, hash); + let mut dist = 0; + + probe_loop!(probe < self.indices.len(), { + if let Some((i, entry_hash)) = self.indices[probe].resolve() { + if dist > probe_distance(mask, entry_hash, probe) { + // give up when probe distance is too long + return None; + } else if entry_hash == hash && self.entries[i].key == *key { + return Some((probe, i)); + } + } else { + return None; + } + + dist += 1; + }); + } + + /// phase 2 is post-insert where we forward-shift `Pos` in the indices. + #[inline] + fn insert_phase_two(&mut self, + key: HeaderName, + value: T, + hash: HashValue, + probe: usize, + danger: bool) -> usize + { + // Push the value and get the index + let index = self.entries.len(); + self.insert_entry(hash, key, value); + + let num_displaced = do_insert_phase_two( + &mut self.indices, + probe, + Pos::new(index, hash)); + + if danger || num_displaced >= DISPLACEMENT_THRESHOLD { + // Increase danger level + self.danger.to_yellow(); + } + + index + } + + /// Removes a key from the map, returning the value associated with the key. + /// + /// Returns `None` if the map does not contain the key. If there are + /// multiple values associated with the key, then the first one is returned. + /// See `remove_entry_mult` on `OccupiedEntry` for an API that yields all + /// values. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// let prev = map.remove(HOST).unwrap(); + /// assert_eq!("hello.world", prev); + /// + /// assert!(map.remove(HOST).is_none()); + /// ``` + pub fn remove(&mut self, key: K) -> Option + where K: AsHeaderName + { + match key.find(self) { + Some((probe, idx)) => { + if let Some(links) = self.entries[idx].links { + self.remove_all_extra_values(links.next); + } + + let entry = self.remove_found(probe, idx); + + Some(entry.value) + } + None => None, + } + } + + /// Remove an entry from the map. + #[inline] + fn remove_found(&mut self, probe: usize, found: usize) -> Bucket { + // index `probe` and entry `found` is to be removed + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + self.indices[probe] = Pos::none(); + let entry = self.entries.swap_remove(found); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(found) { + // was not last element + // examine new element in `found` and find it in indices + let mut probe = desired_pos(self.mask, entry.hash); + + probe_loop!(probe < self.indices.len(), { + if let Some((i, _)) = self.indices[probe].resolve() { + if i >= self.entries.len() { + // found it + self.indices[probe] = Pos::new(found, entry.hash); + break; + } + } + }); + + // Update links + if let Some(links) = entry.links { + self.extra_values[links.next].prev = Link::Entry(found); + self.extra_values[links.tail].next = Link::Entry(found); + } + } + + // backward shift deletion in self.indices + // after probe, shift all non-ideally placed indices backward + if self.entries.len() > 0 { + let mut last_probe = probe; + let mut probe = probe + 1; + + probe_loop!(probe < self.indices.len(), { + if let Some((_, entry_hash)) = self.indices[probe].resolve() { + if probe_distance(self.mask, entry_hash, probe) > 0 { + self.indices[last_probe] = self.indices[probe]; + self.indices[probe] = Pos::none(); + } else { + break; + } + } else { + break; + } + + last_probe = probe; + }); + } + + entry + } + + /// Removes the `ExtraValue` at the given index. + #[inline] + fn remove_extra_value(&mut self, idx: usize) -> ExtraValue { + let prev; + let next; + + { + debug_assert!(self.extra_values.len() > idx); + let extra = &self.extra_values[idx]; + prev = extra.prev; + next = extra.next; + } + + // First unlink the extra value + match (prev, next) { + (Link::Entry(prev), Link::Entry(next)) => { + debug_assert_eq!(prev, next); + debug_assert!(self.entries.len() > prev); + + self.entries[prev].links = None; + } + (Link::Entry(prev), Link::Extra(next)) => { + debug_assert!(self.entries.len() > prev); + debug_assert!(self.entries[prev].links.is_some()); + + self.entries[prev].links.as_mut().unwrap() + .next = next; + + debug_assert!(self.extra_values.len() > next); + self.extra_values[next].prev = Link::Entry(prev); + } + (Link::Extra(prev), Link::Entry(next)) => { + debug_assert!(self.entries.len() > next); + debug_assert!(self.entries[next].links.is_some()); + + self.entries[next].links.as_mut().unwrap() + .tail = prev; + + debug_assert!(self.extra_values.len() > prev); + self.extra_values[prev].next = Link::Entry(next); + } + (Link::Extra(prev), Link::Extra(next)) => { + debug_assert!(self.extra_values.len() > next); + debug_assert!(self.extra_values.len() > prev); + + self.extra_values[prev].next = Link::Extra(next); + self.extra_values[next].prev = Link::Extra(prev); + } + } + + // Remove the extra value + let mut extra = self.extra_values.swap_remove(idx); + + // This is the index of the value that was moved (possibly `extra`) + let old_idx = self.extra_values.len(); + + // Update the links + if extra.prev == Link::Extra(old_idx) { + extra.prev = Link::Extra(idx); + } + + if extra.next == Link::Extra(old_idx) { + extra.next = Link::Extra(idx); + } + + // Check if another entry was displaced. If it was, then the links + // need to be fixed. + if idx != old_idx { + let next; + let prev; + + { + debug_assert!(self.extra_values.len() > idx); + let moved = &self.extra_values[idx]; + next = moved.next; + prev = moved.prev; + } + + // An entry was moved, we have to the links + match prev { + Link::Entry(entry_idx) => { + // It is critical that we do not attempt to read the + // header name or value as that memory may have been + // "released" already. + debug_assert!(self.entries.len() > entry_idx); + debug_assert!(self.entries[entry_idx].links.is_some()); + + let links = self.entries[entry_idx].links.as_mut().unwrap(); + links.next = idx; + } + Link::Extra(extra_idx) => { + debug_assert!(self.extra_values.len() > extra_idx); + self.extra_values[extra_idx].next = Link::Extra(idx); + } + } + + match next { + Link::Entry(entry_idx) => { + debug_assert!(self.entries.len() > entry_idx); + debug_assert!(self.entries[entry_idx].links.is_some()); + + let links = self.entries[entry_idx].links.as_mut().unwrap(); + links.tail = idx; + } + Link::Extra(extra_idx) => { + debug_assert!(self.extra_values.len() > extra_idx); + self.extra_values[extra_idx].prev = Link::Extra(idx); + } + } + } + + debug_assert!({ + for v in &self.extra_values { + assert!(v.next != Link::Extra(old_idx)); + assert!(v.prev != Link::Extra(old_idx)); + } + + true + }); + + extra + } + + fn remove_all_extra_values(&mut self, mut head: usize) { + loop { + let extra = self.remove_extra_value(head); + + if let Link::Extra(idx) = extra.next { + head = idx; + } else { + break; + } + } + } + + #[inline] + fn insert_entry(&mut self, hash: HashValue, key: HeaderName, value: T) { + assert!(self.entries.len() < MAX_SIZE, "header map at capacity"); + + self.entries.push(Bucket { + hash: hash, + key: key, + value: value, + links: None, + }); + } + + fn rebuild(&mut self) { + // Loop over all entries and re-insert them into the map + 'outer: + for (index, entry) in self.entries.iter_mut().enumerate() { + let hash = hash_elem_using(&self.danger, &entry.key); + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + + // Update the entry's hash code + entry.hash = hash; + + probe_loop!(probe < self.indices.len(), { + if let Some((_, entry_hash)) = self.indices[probe].resolve() { + // if existing element probed less than us, swap + let their_dist = probe_distance(self.mask, entry_hash, probe); + + if their_dist < dist { + // Robinhood + break; + } + } else { + // Vacant slot + self.indices[probe] = Pos::new(index, hash); + continue 'outer; + } + + dist += 1; + }); + + do_insert_phase_two( + &mut self.indices, + probe, + Pos::new(index, hash)); + } + } + + fn reinsert_entry_in_order(&mut self, pos: Pos) { + if let Some((_, entry_hash)) = pos.resolve() { + // Find first empty bucket and insert there + let mut probe = desired_pos(self.mask, entry_hash); + + probe_loop!(probe < self.indices.len(), { + if self.indices[probe].resolve().is_none() { + // empty bucket, insert here + self.indices[probe] = pos; + return; + } + }); + } + } + + fn reserve_one(&mut self) { + let len = self.entries.len(); + + if self.danger.is_yellow() { + let load_factor = self.entries.len() as f32 / self.indices.len() as f32; + + if load_factor >= LOAD_FACTOR_THRESHOLD { + // Transition back to green danger level + self.danger.to_green(); + + // Double the capacity + let new_cap = self.indices.len() * 2; + + // Grow the capacity + self.grow(new_cap); + } else { + self.danger.to_red(); + + // Rebuild hash table + for index in self.indices.iter_mut() { + *index = Pos::none(); + } + + self.rebuild(); + } + } else if len == self.capacity() { + if len == 0 { + let new_raw_cap = 8; + self.mask = 8 - 1; + self.indices = vec![Pos::none(); new_raw_cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(new_raw_cap)); + } else { + let raw_cap = self.indices.len(); + self.grow(raw_cap << 1); + } + } + } + + #[inline] + fn grow(&mut self, new_raw_cap: usize) { + // This path can never be reached when handling the first allocation in + // the map. + + // find first ideally placed element -- start of cluster + let mut first_ideal = 0; + + for (i, pos) in self.indices.iter().enumerate() { + if let Some((_, entry_hash)) = pos.resolve() { + if 0 == probe_distance(self.mask, entry_hash, i) { + first_ideal = i; + break; + } + } + } + + // visit the entries in an order where we can simply reinsert them + // into self.indices without any bucket stealing. + let old_indices = mem::replace(&mut self.indices, vec![Pos::none(); new_raw_cap].into_boxed_slice()); + self.mask = new_raw_cap.wrapping_sub(1) as Size; + + for &pos in &old_indices[first_ideal..] { + self.reinsert_entry_in_order(pos); + } + + for &pos in &old_indices[..first_ideal] { + self.reinsert_entry_in_order(pos); + } + + // Reserve additional entry slots + let more = self.capacity() - self.entries.len(); + self.entries.reserve_exact(more); + } +} + +impl<'a, T> IntoIterator for &'a HeaderMap { + type Item = (&'a HeaderName, &'a T); + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut HeaderMap { + type Item = (&'a HeaderName, &'a mut T); + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl IntoIterator for HeaderMap { + type Item = (Option, T); + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves keys and values + /// out of the map in arbitary order. The map cannot be used after calling + /// this. + /// + /// For each yielded item that has `None` provided for the `HeaderName`, + /// then the associated header name is the same as that of the previously + /// yielded item. The first yielded item will have `HeaderName` set. + /// + /// # Examples + /// + /// Basic usage. + /// + /// ``` + /// # use http::header; + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// map.insert(header::CONTENT_LENGTH, "123".parse().unwrap()); + /// map.insert(header::CONTENT_TYPE, "json".parse().unwrap()); + /// + /// let mut iter = map.into_iter(); + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// Multiple values per key. + /// + /// ``` + /// # use http::header; + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// map.append(header::CONTENT_LENGTH, "123".parse().unwrap()); + /// map.append(header::CONTENT_LENGTH, "456".parse().unwrap()); + /// + /// map.append(header::CONTENT_TYPE, "json".parse().unwrap()); + /// map.append(header::CONTENT_TYPE, "html".parse().unwrap()); + /// map.append(header::CONTENT_TYPE, "xml".parse().unwrap()); + /// + /// let mut iter = map.into_iter(); + /// + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "456".parse().unwrap()))); + /// + /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "html".parse().unwrap()))); + /// assert_eq!(iter.next(), Some((None, "xml".parse().unwrap()))); + /// assert!(iter.next().is_none()); + /// ``` + fn into_iter(self) -> IntoIter { + IntoIter { + next: None, + entries: self.entries.into_iter(), + extra_values: self.extra_values, + } + } +} + +impl FromIterator<(HeaderName, T)> for HeaderMap +{ + fn from_iter(iter: I) -> Self + where I: IntoIterator + { + let mut map = HeaderMap::default(); + map.extend(iter); + map + } +} + +impl Extend<(Option, T)> for HeaderMap { + /// Extend a `HeaderMap` with the contents of another `HeaderMap`. + /// + /// This function expects the yielded items to follow the same structure as + /// `IntoIter`. + /// + /// # Panics + /// + /// This panics if the first yielded item does not have a `HeaderName`. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// map.insert(ACCEPT, "text/plain".parse().unwrap()); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// let mut extra = HeaderMap::new(); + /// + /// extra.insert(HOST, "foo.bar".parse().unwrap()); + /// extra.insert(COOKIE, "hello".parse().unwrap()); + /// extra.append(COOKIE, "world".parse().unwrap()); + /// + /// map.extend(extra); + /// + /// assert_eq!(map["host"], "foo.bar"); + /// assert_eq!(map["accept"], "text/plain"); + /// assert_eq!(map["cookie"], "hello"); + /// + /// let v = map.get_all("host"); + /// assert_eq!(1, v.iter().count()); + /// + /// let v = map.get_all("cookie"); + /// assert_eq!(2, v.iter().count()); + /// ``` + fn extend, T)>>(&mut self, iter: I) { + let mut iter = iter.into_iter(); + + // The structure of this is a bit weird, but it is mostly to make the + // borrow checker happy. + let (mut key, mut val) = match iter.next() { + Some((Some(key), val)) => (key, val), + Some((None, _)) => panic!("expected a header name, but got None"), + None => return, + }; + + 'outer: + loop { + let mut entry = match self.entry2(key) { + Entry::Occupied(mut e) => { + // Replace all previous values while maintaining a handle to + // the entry. + e.insert(val); + e + } + Entry::Vacant(e) => { + e.insert_entry(val) + } + }; + + // As long as `HeaderName` is none, keep inserting the value into + // the current entry + 'inner: + loop { + match iter.next() { + Some((Some(k), v)) => { + key = k; + val = v; + continue 'outer; + } + Some((None, v)) => { + entry.append(v); + } + None => { + return; + } + } + } + } + } +} + +impl Extend<(HeaderName, T)> for HeaderMap +{ + fn extend>(&mut self, iter: I) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iter.into_iter(); + + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + + self.reserve(reserve); + + for (k, v) in iter { + self.append(k, v); + } + } +} + +impl PartialEq for HeaderMap { + fn eq(&self, other: &HeaderMap) -> bool { + if self.len() != other.len() { + return false; + } + + self.keys().all(|key| { + self.get_all(key) == other.get_all(key) + }) + } +} + +impl Eq for HeaderMap {} + +impl fmt::Debug for HeaderMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Default for HeaderMap { + fn default() -> Self { + HeaderMap::with_capacity(0) + } +} + +impl<'a, K, T> ops::Index for HeaderMap + where K: AsHeaderName, +{ + type Output = T; + + /// # Panics + /// Using the index operator will cause a panic if the header you're querying isn't set. + #[inline] + fn index(&self, index: K) -> &T { + self.get(index).expect("no entry found for key") + } +} + +/// phase 2 is post-insert where we forward-shift `Pos` in the indices. +/// +/// returns the number of displaced elements +#[inline] +fn do_insert_phase_two(indices: &mut [Pos], + mut probe: usize, + mut old_pos: Pos) + -> usize +{ + let mut num_displaced = 0; + + probe_loop!(probe < indices.len(), { + let pos = &mut indices[probe]; + + if pos.is_none() { + *pos = old_pos; + break; + } else { + num_displaced += 1; + old_pos = mem::replace(pos, old_pos); + } + }); + + num_displaced +} + +#[inline] +fn append_value(entry_idx: usize, + entry: &mut Bucket, + extra: &mut Vec>, + value: T) +{ + match entry.links { + Some(links) => { + let idx = extra.len(); + extra.push(ExtraValue { + value: value, + prev: Link::Extra(links.tail), + next: Link::Entry(entry_idx), + }); + + extra[links.tail].next = Link::Extra(idx); + + entry.links = Some(Links { + tail: idx, + .. links + }); + } + None => { + let idx = extra.len(); + extra.push(ExtraValue { + value: value, + prev: Link::Entry(entry_idx), + next: Link::Entry(entry_idx), + }); + + entry.links = Some(Links { + next: idx, + tail: idx, + }); + } + } +} + +// ===== impl Iter ===== + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = (&'a HeaderName, &'a T); + + fn next(&mut self) -> Option { + self.inner.next_unsafe().map(|(key, ptr)| { + (key, unsafe { &*ptr }) + }) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} +unsafe impl<'a, T: Sync> Send for Iter<'a, T> {} + +// ===== impl IterMut ===== + +impl<'a, T> IterMut<'a, T> { + fn next_unsafe(&mut self) -> Option<(&'a HeaderName, *mut T)> { + use self::Cursor::*; + + if self.cursor.is_none() { + if (self.entry + 1) >= unsafe { &*self.map }.entries.len() { + return None; + } + + self.entry += 1; + self.cursor = Some(Cursor::Head); + } + + let entry = unsafe { &(*self.map).entries[self.entry] }; + + match self.cursor.unwrap() { + Head => { + self.cursor = entry.links.map(|l| Values(l.next)); + Some((&entry.key, &entry.value as *const _ as *mut _)) + } + Values(idx) => { + let extra = unsafe { &(*self.map).extra_values[idx] }; + + match extra.next { + Link::Entry(_) => self.cursor = None, + Link::Extra(i) => self.cursor = Some(Values(i)), + } + + Some((&entry.key, &extra.value as *const _ as *mut _)) + } + } + } +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = (&'a HeaderName, &'a mut T); + + fn next(&mut self) -> Option { + self.next_unsafe().map(|(key, ptr)| { + (key, unsafe { &mut *ptr }) + }) + } + + fn size_hint(&self) -> (usize, Option) { + let map = unsafe { &*self.map }; + debug_assert!(map.entries.len() >= self.entry); + + let lower = map.entries.len() - self.entry; + // We could pessimistically guess at the upper bound, saying + // that its lower + map.extra_values.len(). That could be + // way over though, such as if we're near the end, and have + // already gone through several extra values... + (lower, None) + } +} + +unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} +unsafe impl<'a, T: Send> Send for IterMut<'a, T> {} + +// ===== impl Keys ===== + +impl<'a, T> Iterator for Keys<'a, T> { + type Item = &'a HeaderName; + + fn next(&mut self) -> Option { + self.inner.next().map(|b| &b.key) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl<'a, T> ExactSizeIterator for Keys<'a, T> {} + +// ===== impl Values ==== + +impl<'a, T> Iterator for Values<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +// ===== impl ValuesMut ==== + +impl<'a, T> Iterator for ValuesMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +// ===== impl Drain ===== + +impl<'a, T> Iterator for Drain<'a, T> { + type Item = (HeaderName, ValueDrain<'a, T>); + + fn next(&mut self) -> Option { + let idx = self.idx; + + if idx == unsafe { (*self.map).entries.len() } { + return None; + } + + self.idx += 1; + + let key; + let value; + let next; + + unsafe { + let entry = &(*self.map).entries[idx]; + + // Read the header name + key = ptr::read(&entry.key as *const _); + value = ptr::read(&entry.value as *const _); + next = entry.links.map(|l| l.next); + }; + + let values = ValueDrain { + map: self.map, + first: Some(value), + next: next, + lt: PhantomData, + }; + + Some((key, values)) + } + + fn size_hint(&self) -> (usize, Option) { + let lower = unsafe { (*self.map).entries.len() } - self.idx; + (lower, Some(lower)) + } +} + +impl<'a, T> Drop for Drain<'a, T> { + fn drop(&mut self) { + unsafe { + let map = &mut *self.map; + debug_assert!(map.extra_values.is_empty()); + map.entries.set_len(0); + } + } +} + +unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} +unsafe impl<'a, T: Send> Send for Drain<'a, T> {} + +// ===== impl Entry ===== + +impl<'a, T> Entry<'a, T> { + /// Ensures a value is in the entry by inserting the default if empty. + /// + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map: HeaderMap = HeaderMap::default(); + /// + /// let headers = &[ + /// "content-length", + /// "x-hello", + /// "Content-Length", + /// "x-world", + /// ]; + /// + /// for &header in headers { + /// let counter = map.entry(header) + /// .expect("valid header names") + /// .or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(map["content-length"], 2); + /// assert_eq!(map["x-hello"], 1); + /// ``` + pub fn or_insert(self, default: T) -> &'a mut T { + use self::Entry::*; + + match self { + Occupied(e) => e.into_mut(), + Vacant(e) => e.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default + /// function if empty. + /// + /// The default function is not called if the entry exists in the map. + /// Returns a mutable reference to the **first** value in the entry. + /// + /// # Examples + /// + /// Basic usage. + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// let res = map.entry("x-hello").unwrap() + /// .or_insert_with(|| "world".parse().unwrap()); + /// + /// assert_eq!(res, "world"); + /// ``` + /// + /// The default function is not called if the entry exists in the map. + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// let res = map.entry("host") + /// .expect("host is a valid string") + /// .or_insert_with(|| unreachable!()); + /// + /// + /// assert_eq!(res, "world"); + /// ``` + pub fn or_insert_with T>(self, default: F) -> &'a mut T { + use self::Entry::*; + + match self { + Occupied(e) => e.into_mut(), + Vacant(e) => e.insert(default()), + } + } + + /// Returns a reference to the entry's key + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(map.entry("x-hello").unwrap().key(), "x-hello"); + /// ``` + pub fn key(&self) -> &HeaderName { + use self::Entry::*; + + match *self { + Vacant(ref e) => e.key(), + Occupied(ref e) => e.key(), + } + } +} + +// ===== impl VacantEntry ===== + +impl<'a, T> VacantEntry<'a, T> { + /// Returns a reference to the entry's key + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// let mut map = HeaderMap::new(); + /// + /// assert_eq!(map.entry("x-hello").unwrap().key().as_str(), "x-hello"); + /// ``` + pub fn key(&self) -> &HeaderName { + &self.key + } + + /// Take ownership of the key + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry}; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() { + /// assert_eq!(v.into_key().as_str(), "x-hello"); + /// } + /// ``` + pub fn into_key(self) -> HeaderName { + self.key + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. A mutable reference + /// to the inserted value will be returned. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry}; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() { + /// v.insert("world".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world"); + /// ``` + pub fn insert(self, value: T) -> &'a mut T { + // Ensure that there is space in the map + let index = self.map.insert_phase_two( + self.key, + value.into(), + self.hash, + self.probe, + self.danger); + + &mut self.map.entries[index].value + } + + /// Insert the value into the entry. + /// + /// The value will be associated with this entry's key. The new + /// `OccupiedEntry` is returned, allowing for further manipulation. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// let mut map = HeaderMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("x-hello").unwrap() { + /// let mut e = v.insert_entry("world".parse().unwrap()); + /// e.insert("world2".parse().unwrap()); + /// } + /// + /// assert_eq!(map["x-hello"], "world2"); + /// ``` + pub fn insert_entry(self, value: T) -> OccupiedEntry<'a, T> { + // Ensure that there is space in the map + let index = self.map.insert_phase_two( + self.key, + value.into(), + self.hash, + self.probe, + self.danger); + + OccupiedEntry { + map: self.map, + index: index, + probe: self.probe, + } + } +} + + +// ===== impl GetAll ===== + +impl<'a, T: 'a> GetAll<'a, T> { + /// Returns an iterator visiting all values associated with the entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::HeaderMap; + /// # use http::header::HOST; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// map.append(HOST, "hello.earth".parse().unwrap()); + /// + /// let values = map.get_all("host"); + /// let mut iter = values.iter(); + /// assert_eq!(&"hello.world", iter.next().unwrap()); + /// assert_eq!(&"hello.earth", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// ``` + pub fn iter(&self) -> ValueIter<'a, T> { + // This creates a new GetAll struct so that the lifetime + // isn't bound to &self. + GetAll { + map: self.map, + index: self.index, + }.into_iter() + } +} + +impl<'a, T: PartialEq> PartialEq for GetAll<'a, T> { + fn eq(&self, other: &Self) -> bool { + self.iter().eq(other.iter()) + } +} + +impl<'a, T> IntoIterator for GetAll<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.map.value_iter(self.index) + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b GetAll<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.map.value_iter(self.index) + } +} + +// ===== impl ValueIter ===== + +impl<'a, T: 'a> Iterator for ValueIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + use self::Cursor::*; + + + match self.front { + Some(Head) => { + let entry = &self.map.entries[self.index]; + + if self.back == Some(Head) { + self.front = None; + self.back = None; + } else { + // Update the iterator state + match entry.links { + Some(links) => { + self.front = Some(Values(links.next)); + } + None => unreachable!(), + } + } + + Some(&entry.value) + } + Some(Values(idx)) => { + let extra = &self.map.extra_values[idx]; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.next { + Link::Entry(_) => self.front = None, + Link::Extra(i) => self.front = Some(Values(i)), + } + } + + Some(&extra.value) + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + match (self.front, self.back) { + // Exactly 1 value... + (Some(Cursor::Head), Some(Cursor::Head)) => (1, Some(1)), + // At least 1... + (Some(_), _) => (1, None), + // No more values... + (None, _) => (0, Some(0)), + } + } +} + +impl<'a, T: 'a> DoubleEndedIterator for ValueIter<'a, T> { + fn next_back(&mut self) -> Option { + use self::Cursor::*; + + + match self.back { + Some(Head) => { + self.front = None; + self.back = None; + Some(&self.map.entries[self.index].value) + } + Some(Values(idx)) => { + let extra = &self.map.extra_values[idx]; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.prev { + Link::Entry(_) => self.back = Some(Head), + Link::Extra(idx) => self.back = Some(Values(idx)), + } + } + + Some(&extra.value) + } + None => None, + } + } +} + +// ===== impl ValueIterMut ===== + +impl<'a, T: 'a> Iterator for ValueIterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + use self::Cursor::*; + + let entry = unsafe { &mut (*self.map).entries[self.index] }; + + match self.front { + Some(Head) => { + if self.back == Some(Head) { + self.front = None; + self.back = None; + } else { + // Update the iterator state + match entry.links { + Some(links) => { + self.front = Some(Values(links.next)); + } + None => unreachable!(), + } + } + + Some(&mut entry.value) + } + Some(Values(idx)) => { + let extra = unsafe { &mut (*self.map).extra_values[idx] }; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.next { + Link::Entry(_) => self.front = None, + Link::Extra(i) => self.front = Some(Values(i)), + } + } + + Some(&mut extra.value) + } + None => None, + } + } +} + +impl<'a, T: 'a> DoubleEndedIterator for ValueIterMut<'a, T> { + fn next_back(&mut self) -> Option { + use self::Cursor::*; + + let entry = unsafe { &mut (*self.map).entries[self.index] }; + + match self.back { + Some(Head) => { + self.front = None; + self.back = None; + Some(&mut entry.value) + } + Some(Values(idx)) => { + let extra = unsafe { &mut (*self.map).extra_values[idx] }; + + if self.front == self.back { + self.front = None; + self.back = None; + } else { + match extra.prev { + Link::Entry(_) => self.back = Some(Head), + Link::Extra(idx) => self.back = Some(Values(idx)), + } + } + + Some(&mut extra.value) + } + None => None, + } + } +} + +unsafe impl<'a, T: Sync> Sync for ValueIterMut<'a, T> {} +unsafe impl<'a, T: Send> Send for ValueIterMut<'a, T> {} + +// ===== impl IntoIter ===== + +impl Iterator for IntoIter { + type Item = (Option, T); + + fn next(&mut self) -> Option { + if let Some(next) = self.next { + self.next = match self.extra_values[next].next { + Link::Entry(_) => None, + Link::Extra(v) => Some(v), + }; + + let value = unsafe { ptr::read(&self.extra_values[next].value) }; + + return Some((None, value)); + } + + if let Some(bucket) = self.entries.next() { + self.next = bucket.links.map(|l| l.next); + let name = Some(bucket.key); + let value = bucket.value; + + return Some((name, value)); + } + + None + } + + fn size_hint(&self) -> (usize, Option) { + let (lower, _) = self.entries.size_hint(); + // There could be more than just the entries upper, as there + // could be items in the `extra_values`. We could guess, saying + // `upper + extra_values.len()`, but that could overestimate by a lot. + (lower, None) + } +} + +impl Drop for IntoIter { + fn drop(&mut self) { + // Ensure the iterator is consumed + for _ in self.by_ref() { } + + // All the values have already been yielded out. + unsafe { self.extra_values.set_len(0); } + } +} + +// ===== impl OccupiedEntry ===== + +impl<'a, T> OccupiedEntry<'a, T> { + /// Returns a reference to the entry's key. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host").unwrap() { + /// assert_eq!("host", e.key()); + /// } + /// ``` + pub fn key(&self) -> &HeaderName { + &self.map.entries[self.index].key + } + + /// Get a reference to the first value in the entry. + /// + /// Values are stored in insertion order. + /// + /// # Panics + /// + /// `get` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// assert_eq!(e.get(), &"hello.world"); + /// + /// e.append("hello.earth".parse().unwrap()); + /// + /// assert_eq!(e.get(), &"hello.world"); + /// } + /// ``` + pub fn get(&self) -> &T { + &self.map.entries[self.index].value + } + + /// Get a mutable reference to the first value in the entry. + /// + /// Values are stored in insertion order. + /// + /// # Panics + /// + /// `get_mut` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello.world".to_string()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// e.get_mut().push_str("-2"); + /// assert_eq!(e.get(), &"hello.world-2"); + /// } + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.map.entries[self.index].value + } + + /// Converts the `OccupiedEntry` into a mutable reference to the **first** + /// value. + /// + /// The lifetime of the returned reference is bound to the original map. + /// + /// # Panics + /// + /// `into_mut` panics if there are no values associated with the entry. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "hello.world".to_string()); + /// map.append(HOST, "hello.earth".to_string()); + /// + /// if let Entry::Occupied(e) = map.entry("host").unwrap() { + /// e.into_mut().push_str("-2"); + /// } + /// + /// assert_eq!("hello.world-2", map["host"]); + /// ``` + pub fn into_mut(self) -> &'a mut T { + &mut self.map.entries[self.index].value + } + + /// Sets the value of the entry. + /// + /// All previous values associated with the entry are removed and the first + /// one is returned. See `insert_mult` for an API that returns all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "hello.world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// let mut prev = e.insert("earth".parse().unwrap()); + /// assert_eq!("hello.world", prev); + /// } + /// + /// assert_eq!("earth", map["host"]); + /// ``` + pub fn insert(&mut self, value: T) -> T { + self.map.insert_occupied(self.index, value.into()) + } + + /// Sets the value of the entry. + /// + /// This function does the same as `insert` except it returns an iterator + /// that yields all values previously associated with the key. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// map.append(HOST, "world2".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// let mut prev = e.insert_mult("earth".parse().unwrap()); + /// assert_eq!("world", prev.next().unwrap()); + /// assert_eq!("world2", prev.next().unwrap()); + /// assert!(prev.next().is_none()); + /// } + /// + /// assert_eq!("earth", map["host"]); + /// ``` + pub fn insert_mult(&mut self, value: T) -> ValueDrain { + self.map.insert_occupied_mult(self.index, value.into()) + } + + /// Insert the value into the entry. + /// + /// The new value is appended to the end of the entry's value list. All + /// previous values associated with the entry are retained. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// e.append("earth".parse().unwrap()); + /// } + /// + /// let values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!("world", *i.next().unwrap()); + /// assert_eq!("earth", *i.next().unwrap()); + /// ``` + pub fn append(&mut self, value: T) { + let idx = self.index; + let entry = &mut self.map.entries[idx]; + append_value(idx, entry, &mut self.map.extra_values, value.into()); + } + + /// Remove the entry from the map. + /// + /// All values associated with the entry are removed and the first one is + /// returned. See `remove_entry_mult` for an API that returns all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host").unwrap() { + /// let mut prev = e.remove(); + /// assert_eq!("world", prev); + /// } + /// + /// assert!(!map.contains_key("host")); + /// ``` + pub fn remove(self) -> T { + self.remove_entry().1 + } + + /// Remove the entry from the map. + /// + /// The key and all values associated with the entry are removed and the + /// first one is returned. See `remove_entry_mult` for an API that returns + /// all values. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host").unwrap() { + /// let (key, mut prev) = e.remove_entry(); + /// assert_eq!("host", key.as_str()); + /// assert_eq!("world", prev); + /// } + /// + /// assert!(!map.contains_key("host")); + /// ``` + pub fn remove_entry(self) -> (HeaderName, T) { + let entry = self.map.remove_found(self.probe, self.index); + + if let Some(links) = entry.links { + self.map.remove_all_extra_values(links.next); + } + + (entry.key, entry.value) + } + + /// Remove the entry from the map. + /// + /// The key and all values associated with the entry are removed and + /// returned. + pub fn remove_entry_mult(self) -> (HeaderName, ValueDrain<'a, T>) { + let entry = self.map.remove_found(self.probe, self.index); + let drain = ValueDrain { + map: self.map as *mut _, + first: Some(entry.value), + next: entry.links.map(|l| l.next), + lt: PhantomData, + }; + (entry.key, drain) + } + + /// Returns an iterator visiting all values associated with the entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::new(); + /// map.insert(HOST, "world".parse().unwrap()); + /// map.append(HOST, "earth".parse().unwrap()); + /// + /// if let Entry::Occupied(e) = map.entry("host").unwrap() { + /// let mut iter = e.iter(); + /// assert_eq!(&"world", iter.next().unwrap()); + /// assert_eq!(&"earth", iter.next().unwrap()); + /// assert!(iter.next().is_none()); + /// } + /// ``` + pub fn iter(&self) -> ValueIter { + self.map.value_iter(Some(self.index)) + } + + /// Returns an iterator mutably visiting all values associated with the + /// entry. + /// + /// Values are iterated in insertion order. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderMap, Entry, HOST}; + /// let mut map = HeaderMap::default(); + /// map.insert(HOST, "world".to_string()); + /// map.append(HOST, "earth".to_string()); + /// + /// if let Entry::Occupied(mut e) = map.entry("host").unwrap() { + /// for e in e.iter_mut() { + /// e.push_str("-boop"); + /// } + /// } + /// + /// let mut values = map.get_all("host"); + /// let mut i = values.iter(); + /// assert_eq!(&"world-boop", i.next().unwrap()); + /// assert_eq!(&"earth-boop", i.next().unwrap()); + /// ``` + pub fn iter_mut(&mut self) -> ValueIterMut { + self.map.value_iter_mut(self.index) + } +} + +impl<'a, T> IntoIterator for OccupiedEntry<'a, T> { + type Item = &'a mut T; + type IntoIter = ValueIterMut<'a, T>; + + fn into_iter(self) -> ValueIterMut<'a, T> { + self.map.value_iter_mut(self.index) + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b OccupiedEntry<'a, T> { + type Item = &'a T; + type IntoIter = ValueIter<'a, T>; + + fn into_iter(self) -> ValueIter<'a, T> { + self.iter() + } +} + +impl<'a, 'b: 'a, T> IntoIterator for &'b mut OccupiedEntry<'a, T> { + type Item = &'a mut T; + type IntoIter = ValueIterMut<'a, T>; + + fn into_iter(self) -> ValueIterMut<'a, T> { + self.iter_mut() + } +} + +// ===== impl ValueDrain ===== + +impl<'a, T> Iterator for ValueDrain<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + if self.first.is_some() { + self.first.take() + } else if let Some(next) = self.next { + // Remove the extra value + let extra = unsafe { &mut (*self.map) }.remove_extra_value(next); + + match extra.next { + Link::Extra(idx) => self.next = Some(idx), + Link::Entry(_) => self.next = None, + } + + Some(extra.value) + } else { + None + } + } + + fn size_hint(&self) -> (usize, Option) { + match (&self.first, self.next) { + // Exactly 1 + (&Some(_), None) => (1, Some(1)), + // At least 1 + (&_, Some(_)) => (1, None), + // No more + (&None, None) => (0, Some(0)), + } + } +} + +impl<'a, T> Drop for ValueDrain<'a, T> { + fn drop(&mut self) { + while let Some(_) = self.next() { + } + } +} + +unsafe impl<'a, T: Sync> Sync for ValueDrain<'a, T> {} +unsafe impl<'a, T: Send> Send for ValueDrain<'a, T> {} + +// ===== impl Pos ===== + +impl Pos { + #[inline] + fn new(index: usize, hash: HashValue) -> Self { + Pos { + index: index as Size, + hash: hash, + } + } + + #[inline] + fn none() -> Self { + Pos { + index: !0, + hash: HashValue(0), + } + } + + #[inline] + fn is_some(&self) -> bool { + !self.is_none() + } + + #[inline] + fn is_none(&self) -> bool { + self.index == !0 + } + + #[inline] + fn resolve(&self) -> Option<(usize, HashValue)> { + if self.is_some() { + Some((self.index, self.hash)) + } else { + None + } + } +} + +impl Danger { + fn is_red(&self) -> bool { + match *self { + Danger::Red(_) => true, + _ => false, + } + } + + fn to_red(&mut self) { + debug_assert!(self.is_yellow()); + *self = Danger::Red(RandomState::new()); + } + + fn is_yellow(&self) -> bool { + match *self { + Danger::Yellow => true, + _ => false, + } + } + + fn to_yellow(&mut self) { + match *self { + Danger::Green => { + *self = Danger::Yellow; + } + _ => {} + } + } + + fn to_green(&mut self) { + debug_assert!(self.is_yellow()); + *self = Danger::Green; + } +} + +// ===== impl Utils ===== + +#[inline] +fn usable_capacity(cap: usize) -> usize { + cap - cap / 4 +} + +#[inline] +fn to_raw_capacity(n: usize) -> usize { + n + n / 3 +} + +#[inline] +fn desired_pos(mask: Size, hash: HashValue) -> usize { + (hash.0 & mask) +} + +/// The number of steps that `current` is forward of the desired position for hash +#[inline] +fn probe_distance(mask: Size, hash: HashValue, current: usize) -> usize { + current.wrapping_sub(desired_pos(mask, hash)) & mask +} + +fn hash_elem_using(danger: &Danger, k: &K) -> HashValue + where K: Hash +{ + use fnv::FnvHasher; + + const MASK: u64 = (MAX_SIZE as u64) - 1; + + let hash = match *danger { + // Safe hash + Danger::Red(ref hasher) => { + let mut h = hasher.build_hasher(); + k.hash(&mut h); + h.finish() + } + // Fast hash + _ => { + let mut h = FnvHasher::default(); + k.hash(&mut h); + h.finish() + } + }; + + HashValue((hash & MASK) as usize) +} + +/* + * + * ===== impl IntoHeaderName / AsHeaderName ===== + * + */ + + +mod into_header_name { + use super::{HdrName, HeaderMap, HeaderName}; + + /// A marker trait used to identify values that can be used as insert keys + /// to a `HeaderMap`. + pub trait IntoHeaderName: Sealed {} + + // All methods are on this pub(super) trait, instead of `IntoHeaderName`, + // so that they aren't publicly exposed to the world. + // + // Being on the `IntoHeaderName` trait would mean users could call + // `"host".insert(&mut map, "localhost")`. + // + // Ultimately, this allows us to adjust the signatures of these methods + // without breaking any external crate. + pub trait Sealed { + #[doc(hidden)] + fn insert(self, map: &mut HeaderMap, val: T) -> Option; + + #[doc(hidden)] + fn append(self, map: &mut HeaderMap, val: T) -> bool; + } + + // ==== impls ==== + + impl Sealed for HeaderName { + #[doc(hidden)] + #[inline] + fn insert(self, map: &mut HeaderMap, val: T) -> Option { + map.insert2(self, val) + } + + #[doc(hidden)] + #[inline] + fn append(self, map: &mut HeaderMap, val: T) -> bool { + map.append2(self, val) + } + } + + impl IntoHeaderName for HeaderName {} + + impl<'a> Sealed for &'a HeaderName { + #[doc(hidden)] + #[inline] + fn insert(self, map: &mut HeaderMap, val: T) -> Option { + map.insert2(self, val) + } + #[doc(hidden)] + #[inline] + fn append(self, map: &mut HeaderMap, val: T) -> bool { + map.append2(self, val) + } + } + + impl<'a> IntoHeaderName for &'a HeaderName {} + + impl Sealed for &'static str { + #[doc(hidden)] + #[inline] + fn insert(self, map: &mut HeaderMap, val: T) -> Option { + HdrName::from_static(self, move |hdr| map.insert2(hdr, val)) + } + #[doc(hidden)] + #[inline] + fn append(self, map: &mut HeaderMap, val: T) -> bool { + HdrName::from_static(self, move |hdr| map.append2(hdr, val)) + } + } + + impl IntoHeaderName for &'static str {} +} + +mod as_header_name { + use super::{Entry, HdrName, HeaderMap, HeaderName, InvalidHeaderName}; + + /// A marker trait used to identify values that can be used as search keys + /// to a `HeaderMap`. + pub trait AsHeaderName: Sealed {} + + // All methods are on this pub(super) trait, instead of `AsHeaderName`, + // so that they aren't publicly exposed to the world. + // + // Being on the `AsHeaderName` trait would mean users could call + // `"host".find(&map)`. + // + // Ultimately, this allows us to adjust the signatures of these methods + // without breaking any external crate. + pub trait Sealed { + #[doc(hidden)] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName>; + + #[doc(hidden)] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)>; + } + + // ==== impls ==== + + impl Sealed for HeaderName { + #[doc(hidden)] + #[inline] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { + Ok(map.entry2(self)) + } + + #[doc(hidden)] + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + map.find(self) + } + } + + impl AsHeaderName for HeaderName {} + + impl<'a> Sealed for &'a HeaderName { + #[doc(hidden)] + #[inline] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { + Ok(map.entry2(self)) + } + + #[doc(hidden)] + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + map.find(*self) + } + } + + impl<'a> AsHeaderName for &'a HeaderName {} + + impl<'a> Sealed for &'a str { + #[doc(hidden)] + #[inline] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { + HdrName::from_bytes(self.as_bytes(), move |hdr| map.entry2(hdr)) + } + + #[doc(hidden)] + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + HdrName::from_bytes(self.as_bytes(), move |hdr| map.find(&hdr)).unwrap_or(None) + } + } + + impl<'a> AsHeaderName for &'a str {} + + impl Sealed for String { + #[doc(hidden)] + #[inline] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { + self.as_str().entry(map) + } + + #[doc(hidden)] + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + Sealed::find(&self.as_str(), map) + } + } + + impl AsHeaderName for String {} + + impl<'a> Sealed for &'a String { + #[doc(hidden)] + #[inline] + fn entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { + self.as_str().entry(map) + } + + #[doc(hidden)] + #[inline] + fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { + Sealed::find(*self, map) + } + } + + impl<'a> AsHeaderName for &'a String {} +} + + +#[test] +fn test_bounds() { + fn check_bounds() {} + + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); +} + +#[test] +fn skip_duplicates_during_key_iteration() { + let mut map = HeaderMap::new(); + map.append("a", HeaderValue::from_static("a")); + map.append("a", HeaderValue::from_static("b")); + assert_eq!(map.keys().count(), map.keys_len()); +} diff --git a/third_party/rust/http/src/header/mod.rs b/third_party/rust/http/src/header/mod.rs new file mode 100644 index 000000000000..ee80fc7f64e1 --- /dev/null +++ b/third_party/rust/http/src/header/mod.rs @@ -0,0 +1,194 @@ +//! HTTP header types +//! +//! The module provides [`HeaderName`], [`HeaderMap`], and a number of types +//! used for interacting with `HeaderMap`. These types allow representing both +//! HTTP/1 and HTTP/2 headers. +//! +//! # `HeaderName` +//! +//! The `HeaderName` type represents both standard header names as well as +//! custom header names. The type handles the case insensitive nature of header +//! names and is used as the key portion of `HeaderMap`. Header names are +//! normalized to lower case. In other words, when creating a `HeaderName` with +//! a string, even if upper case characters are included, when getting a string +//! representation of the `HeaderName`, it will be all lower case. This allows +//! for faster `HeaderMap` comparison operations. +//! +//! The internal representation is optimized to efficiently handle the cases +//! most commonly encountered when working with HTTP. Standard header names are +//! special cased and are represented internally as an enum. Short custom +//! headers will be stored directly in the `HeaderName` struct and will not +//! incur any allocation overhead, however longer strings will require an +//! allocation for storage. +//! +//! ## Limitations +//! +//! `HeaderName` has a max length of 32,768 for header names. Attempting to +//! parse longer names will result in a panic. +//! +//! # `HeaderMap` +//! +//! `HeaderMap` is a map structure of header names highly optimized for use +//! cases common with HTTP. It is a [multimap] structure, where each header name +//! may have multiple associated header values. Given this, some of the APIs +//! diverge from [`HashMap`]. +//! +//! ## Overview +//! +//! Just like `HashMap` in Rust's stdlib, `HeaderMap` is based on [Robin Hood +//! hashing]. This algorithm tends to reduce the worst case search times in the +//! table and enables high load factors without seriously affecting performance. +//! Internally, keys and values are stored in vectors. As such, each insertion +//! will not incur allocation overhead. However, once the underlying vector +//! storage is full, a larger vector must be allocated and all values copied. +//! +//! ## Deterministic ordering +//! +//! Unlike Rust's `HashMap`, values in `HeaderMap` are deterministically +//! ordered. Roughly, values are ordered by insertion. This means that a +//! function that deterministically operates on a header map can rely on the +//! iteration order to remain consistent across processes and platforms. +//! +//! ## Adaptive hashing +//! +//! `HeaderMap` uses an adaptive hashing strategy in order to efficiently handle +//! most common cases. All standard headers have statically computed hash values +//! which removes the need to perform any hashing of these headers at runtime. +//! The default hash function emphasizes performance over robustness. However, +//! `HeaderMap` detects high collision rates and switches to a secure hash +//! function in those events. The threshold is set such that only denial of +//! service attacks should trigger it. +//! +//! ## Limitations +//! +//! `HeaderMap` can store a maximum of 32,768 headers (header name / value +//! pairs). Attempting to insert more will result in a panic. +//! +//! [`HeaderName`]: struct.HeaderName.html +//! [`HeaderMap`]: struct.HeaderMap.html +//! [multimap]: https://en.wikipedia.org/wiki/Multimap +//! [`HashMap`]: https://doc.rust-lang.org/std/collections/struct.HashMap.html +//! [Robin Hood hashing]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing + +mod map; +mod name; +mod value; + +pub use self::map::{ + HeaderMap, + AsHeaderName, + IntoHeaderName, + Iter, + Keys, + Values, + Drain, + GetAll, + Entry, + VacantEntry, + OccupiedEntry, + ValueIter, + ValueIterMut, + ValueDrain, + IntoIter, +}; +pub use self::name::{ + HeaderName, + InvalidHeaderName, + InvalidHeaderNameBytes, +}; +pub use self::value::{ + HeaderValue, + InvalidHeaderValue, + InvalidHeaderValueBytes, + ToStrError, +}; + +// Use header name constants +pub use self::name::{ + ACCEPT, + ACCEPT_CHARSET, + ACCEPT_ENCODING, + ACCEPT_LANGUAGE, + ACCEPT_RANGES, + ACCESS_CONTROL_ALLOW_CREDENTIALS, + ACCESS_CONTROL_ALLOW_HEADERS, + ACCESS_CONTROL_ALLOW_METHODS, + ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, + ACCESS_CONTROL_MAX_AGE, + ACCESS_CONTROL_REQUEST_HEADERS, + ACCESS_CONTROL_REQUEST_METHOD, + AGE, + ALLOW, + ALT_SVC, + AUTHORIZATION, + CACHE_CONTROL, + CONNECTION, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + CONTENT_LENGTH, + CONTENT_LOCATION, + CONTENT_RANGE, + CONTENT_SECURITY_POLICY, + CONTENT_SECURITY_POLICY_REPORT_ONLY, + CONTENT_TYPE, + COOKIE, + DNT, + DATE, + ETAG, + EXPECT, + EXPIRES, + FORWARDED, + FROM, + HOST, + IF_MATCH, + IF_MODIFIED_SINCE, + IF_NONE_MATCH, + IF_RANGE, + IF_UNMODIFIED_SINCE, + LAST_MODIFIED, + LINK, + LOCATION, + MAX_FORWARDS, + ORIGIN, + PRAGMA, + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + PUBLIC_KEY_PINS, + PUBLIC_KEY_PINS_REPORT_ONLY, + RANGE, + REFERER, + REFERRER_POLICY, + REFRESH, + RETRY_AFTER, + SEC_WEBSOCKET_ACCEPT, + SEC_WEBSOCKET_EXTENSIONS, + SEC_WEBSOCKET_KEY, + SEC_WEBSOCKET_PROTOCOL, + SEC_WEBSOCKET_VERSION, + SERVER, + SET_COOKIE, + STRICT_TRANSPORT_SECURITY, + TE, + TRAILER, + TRANSFER_ENCODING, + UPGRADE, + UPGRADE_INSECURE_REQUESTS, + USER_AGENT, + VARY, + VIA, + WARNING, + WWW_AUTHENTICATE, + X_CONTENT_TYPE_OPTIONS, + X_DNS_PREFETCH_CONTROL, + X_FRAME_OPTIONS, + X_XSS_PROTECTION, +}; + +/// Maximum length of a header name +/// +/// Generally, 64kb for a header name is WAY too much than would ever be needed +/// in practice. Restricting it to this size enables using `u16` values to +/// represent offsets when dealing with header names. +const MAX_HEADER_NAME_LEN: usize = 1 << 16; diff --git a/third_party/rust/http/src/header/name.rs b/third_party/rust/http/src/header/name.rs new file mode 100644 index 000000000000..cf4088c4f9f7 --- /dev/null +++ b/third_party/rust/http/src/header/name.rs @@ -0,0 +1,2188 @@ +use HttpTryFrom; +use byte_str::ByteStr; +use bytes::{Bytes, BytesMut}; + +use std::{fmt, mem}; +use std::borrow::Borrow; +use std::hash::{Hash, Hasher}; +use std::str::FromStr; +use std::error::Error; + +/// Represents an HTTP header field name +/// +/// Header field names identify the header. Header sets may include multiple +/// headers with the same name. The HTTP specification defines a number of +/// standard headers, but HTTP messages may include non-standard header names as +/// well as long as they adhere to the specification. +/// +/// `HeaderName` is used as the [`HeaderMap`] key. Constants are available for +/// all standard header names in the [`header`] module. +/// +/// # Representation +/// +/// `HeaderName` represents standard header names using an `enum`, as such they +/// will not require an allocation for storage. All custom header names are +/// lower cased upon conversion to a `HeaderName` value. This avoids the +/// overhead of dynamically doing lower case conversion during the hash code +/// computation and the comparison operation. +/// +/// [`HeaderMap`]: struct.HeaderMap.html +/// [`header`]: index.html +#[derive(Clone, Eq, PartialEq, Hash)] +pub struct HeaderName { + inner: Repr, +} + +// Almost a full `HeaderName` +#[derive(Debug, Hash)] +pub struct HdrName<'a> { + inner: Repr>, +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +enum Repr { + Standard(StandardHeader), + Custom(T), +} + +// Used to hijack the Hash impl +#[derive(Debug, Clone, Eq, PartialEq)] +struct Custom(ByteStr); + +#[derive(Debug, Clone)] +struct MaybeLower<'a> { + buf: &'a [u8], + lower: bool, +} + +/// A possible error when converting a `HeaderName` from another type. +#[derive(Debug)] +pub struct InvalidHeaderName { + _priv: (), +} + +/// A possible error when converting a `HeaderName` from another type. +#[derive(Debug)] +pub struct InvalidHeaderNameBytes(InvalidHeaderName) ; + +macro_rules! standard_headers { + ( + $( + $(#[$docs:meta])* + ($konst:ident, $upcase:ident, $name:expr); + )+ + ) => { + #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] + enum StandardHeader { + $( + $konst, + )+ + } + + $( + $(#[$docs])* + pub const $upcase: HeaderName = HeaderName { + inner: Repr::Standard(StandardHeader::$konst), + }; + )+ + + impl StandardHeader { + #[inline] + fn as_str(&self) -> &'static str { + match *self { + $( + StandardHeader::$konst => $name, + )+ + } + } + } + + #[cfg(test)] + const TEST_HEADERS: &'static [(StandardHeader, &'static str)] = &[ + $( + (StandardHeader::$konst, $name), + )+ + ]; + + #[test] + fn test_parse_standard_headers() { + for &(std, name) in TEST_HEADERS { + // Test lower case + assert_eq!(HeaderName::from_bytes(name.as_bytes()).unwrap(), HeaderName::from(std)); + + // Test upper case + let upper = name.to_uppercase().to_string(); + assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), HeaderName::from(std)); + } + } + + #[test] + fn test_standard_headers_into_bytes() { + for &(std, name) in TEST_HEADERS { + let std = HeaderName::from(std); + // Test lower case + let name_bytes = name.as_bytes(); + let bytes: Bytes = + HeaderName::from_bytes(name_bytes).unwrap().into(); + assert_eq!(bytes, name_bytes); + assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), std); + + // Test upper case + let upper = name.to_uppercase().to_string(); + let bytes: Bytes = + HeaderName::from_bytes(upper.as_bytes()).unwrap().into(); + assert_eq!(bytes, name.as_bytes()); + assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), + std); + + + } + + } + } +} + +// Generate constants for all standard HTTP headers. This includes a static hash +// code for the "fast hash" path. The hash code for static headers *do not* have +// to match the text representation of those headers. This is because header +// strings are always converted to the static values (when they match) before +// being hashed. This means that it is impossible to compare the static hash +// code of CONTENT_LENGTH with "content-length". +standard_headers! { + /// Advertises which content types the client is able to understand. + /// + /// The Accept request HTTP header advertises which content types, expressed + /// as MIME types, the client is able to understand. Using content + /// negotiation, the server then selects one of the proposals, uses it and + /// informs the client of its choice with the Content-Type response header. + /// Browsers set adequate values for this header depending of the context + /// where the request is done: when fetching a CSS stylesheet a different + /// value is set for the request than when fetching an image, video or a + /// script. + (Accept, ACCEPT, "accept"); + + /// Advertises which character set the client is able to understand. + /// + /// The Accept-Charset request HTTP header advertises which character set + /// the client is able to understand. Using content negotiation, the server + /// then selects one of the proposals, uses it and informs the client of its + /// choice within the Content-Type response header. Browsers usually don't + /// set this header as the default value for each content type is usually + /// correct and transmitting it would allow easier fingerprinting. + /// + /// If the server cannot serve any matching character set, it can + /// theoretically send back a 406 (Not Acceptable) error code. But, for a + /// better user experience, this is rarely done and the more common way is + /// to ignore the Accept-Charset header in this case. + (AcceptCharset, ACCEPT_CHARSET, "accept-charset"); + + /// Advertises which content encoding the client is able to understand. + /// + /// The Accept-Encoding request HTTP header advertises which content + /// encoding, usually a compression algorithm, the client is able to + /// understand. Using content negotiation, the server selects one of the + /// proposals, uses it and informs the client of its choice with the + /// Content-Encoding response header. + /// + /// Even if both the client and the server supports the same compression + /// algorithms, the server may choose not to compress the body of a + /// response, if the identity value is also acceptable. Two common cases + /// lead to this: + /// + /// * The data to be sent is already compressed and a second compression + /// won't lead to smaller data to be transmitted. This may the case with + /// some image formats; + /// + /// * The server is overloaded and cannot afford the computational overhead + /// induced by the compression requirement. Typically, Microsoft recommends + /// not to compress if a server use more than 80 % of its computational + /// power. + /// + /// As long as the identity value, meaning no encryption, is not explicitly + /// forbidden, by an identity;q=0 or a *;q=0 without another explicitly set + /// value for identity, the server must never send back a 406 Not Acceptable + /// error. + (AcceptEncoding, ACCEPT_ENCODING, "accept-encoding"); + + /// Advertises which languages the client is able to understand. + /// + /// The Accept-Language request HTTP header advertises which languages the + /// client is able to understand, and which locale variant is preferred. + /// Using content negotiation, the server then selects one of the proposals, + /// uses it and informs the client of its choice with the Content-Language + /// response header. Browsers set adequate values for this header according + /// their user interface language and even if a user can change it, this + /// happens rarely (and is frown upon as it leads to fingerprinting). + /// + /// This header is a hint to be used when the server has no way of + /// determining the language via another way, like a specific URL, that is + /// controlled by an explicit user decision. It is recommended that the + /// server never overrides an explicit decision. The content of the + /// Accept-Language is often out of the control of the user (like when + /// traveling and using an Internet Cafe in a different country); the user + /// may also want to visit a page in another language than the locale of + /// their user interface. + /// + /// If the server cannot serve any matching language, it can theoretically + /// send back a 406 (Not Acceptable) error code. But, for a better user + /// experience, this is rarely done and more common way is to ignore the + /// Accept-Language header in this case. + (AcceptLanguage, ACCEPT_LANGUAGE, "accept-language"); + + /// Marker used by the server to advertise partial request support. + /// + /// The Accept-Ranges response HTTP header is a marker used by the server to + /// advertise its support of partial requests. The value of this field + /// indicates the unit that can be used to define a range. + /// + /// In presence of an Accept-Ranges header, the browser may try to resume an + /// interrupted download, rather than to start it from the start again. + (AcceptRanges, ACCEPT_RANGES, "accept-ranges"); + + /// Preflight response indicating if the response to the request can be + /// exposed to the page. + /// + /// The Access-Control-Allow-Credentials response header indicates whether + /// or not the response to the request can be exposed to the page. It can be + /// exposed when the true value is returned; it can't in other cases. + /// + /// Credentials are cookies, authorization headers or TLS client + /// certificates. + /// + /// When used as part of a response to a preflight request, this indicates + /// whether or not the actual request can be made using credentials. Note + /// that simple GET requests are not preflighted, and so if a request is + /// made for a resource with credentials, if this header is not returned + /// with the resource, the response is ignored by the browser and not + /// returned to web content. + /// + /// The Access-Control-Allow-Credentials header works in conjunction with + /// the XMLHttpRequest.withCredentials property or with the credentials + /// option in the Request() constructor of the Fetch API. Credentials must + /// be set on both sides (the Access-Control-Allow-Credentials header and in + /// the XHR or Fetch request) in order for the CORS request with credentials + /// to succeed. + (AccessControlAllowCredentials, ACCESS_CONTROL_ALLOW_CREDENTIALS, "access-control-allow-credentials"); + + /// Preflight response indicating permitted HTTP headers. + /// + /// The Access-Control-Allow-Headers response header is used in response to + /// a preflight request to indicate which HTTP headers will be available via + /// Access-Control-Expose-Headers when making the actual request. + /// + /// The simple headers, Accept, Accept-Language, Content-Language, + /// Content-Type (but only with a MIME type of its parsed value (ignoring + /// parameters) of either application/x-www-form-urlencoded, + /// multipart/form-data, or text/plain), are always available and don't need + /// to be listed by this header. + /// + /// This header is required if the request has an + /// Access-Control-Request-Headers header. + (AccessControlAllowHeaders, ACCESS_CONTROL_ALLOW_HEADERS, "access-control-allow-headers"); + + /// Preflight header response indicating permitted access methods. + /// + /// The Access-Control-Allow-Methods response header specifies the method or + /// methods allowed when accessing the resource in response to a preflight + /// request. + (AccessControlAllowMethods, ACCESS_CONTROL_ALLOW_METHODS, "access-control-allow-methods"); + + /// Indicates whether the response can be shared with resources with the + /// given origin. + (AccessControlAllowOrigin, ACCESS_CONTROL_ALLOW_ORIGIN, "access-control-allow-origin"); + + /// Indicates which headers can be exposed as part of the response by + /// listing their names. + (AccessControlExposeHeaders, ACCESS_CONTROL_EXPOSE_HEADERS, "access-control-expose-headers"); + + /// Indicates how long the results of a preflight request can be cached. + (AccessControlMaxAge, ACCESS_CONTROL_MAX_AGE, "access-control-max-age"); + + /// Informs the server which HTTP headers will be used when an actual + /// request is made. + (AccessControlRequestHeaders, ACCESS_CONTROL_REQUEST_HEADERS, "access-control-request-headers"); + + /// Informs the server know which HTTP method will be used when the actual + /// request is made. + (AccessControlRequestMethod, ACCESS_CONTROL_REQUEST_METHOD, "access-control-request-method"); + + /// Indicates the time in seconds the object has been in a proxy cache. + /// + /// The Age header is usually close to zero. If it is Age: 0, it was + /// probably just fetched from the origin server; otherwise It is usually + /// calculated as a difference between the proxy's current date and the Date + /// general header included in the HTTP response. + (Age, AGE, "age"); + + /// Lists the set of methods support by a resource. + /// + /// This header must be sent if the server responds with a 405 Method Not + /// Allowed status code to indicate which request methods can be used. An + /// empty Allow header indicates that the resource allows no request + /// methods, which might occur temporarily for a given resource, for + /// example. + (Allow, ALLOW, "allow"); + + /// Advertises the availability of alternate services to clients. + (AltSvc, ALT_SVC, "alt-svc"); + + /// Contains the credentials to authenticate a user agent with a server. + /// + /// Usually this header is included after the server has responded with a + /// 401 Unauthorized status and the WWW-Authenticate header. + (Authorization, AUTHORIZATION, "authorization"); + + /// Specifies directives for caching mechanisms in both requests and + /// responses. + /// + /// Caching directives are unidirectional, meaning that a given directive in + /// a request is not implying that the same directive is to be given in the + /// response. + (CacheControl, CACHE_CONTROL, "cache-control"); + + /// Controls whether or not the network connection stays open after the + /// current transaction finishes. + /// + /// If the value sent is keep-alive, the connection is persistent and not + /// closed, allowing for subsequent requests to the same server to be done. + /// + /// Except for the standard hop-by-hop headers (Keep-Alive, + /// Transfer-Encoding, TE, Connection, Trailer, Upgrade, Proxy-Authorization + /// and Proxy-Authenticate), any hop-by-hop headers used by the message must + /// be listed in the Connection header, so that the first proxy knows he has + /// to consume them and not to forward them further. Standard hop-by-hop + /// headers can be listed too (it is often the case of Keep-Alive, but this + /// is not mandatory. + (Connection, CONNECTION, "connection"); + + /// Indicates if the content is expected to be displayed inline. + /// + /// In a regular HTTP response, the Content-Disposition response header is a + /// header indicating if the content is expected to be displayed inline in + /// the browser, that is, as a Web page or as part of a Web page, or as an + /// attachment, that is downloaded and saved locally. + /// + /// In a multipart/form-data body, the HTTP Content-Disposition general + /// header is a header that can be used on the subpart of a multipart body + /// to give information about the field it applies to. The subpart is + /// delimited by the boundary defined in the Content-Type header. Used on + /// the body itself, Content-Disposition has no effect. + /// + /// The Content-Disposition header is defined in the larger context of MIME + /// messages for e-mail, but only a subset of the possible parameters apply + /// to HTTP forms and POST requests. Only the value form-data, as well as + /// the optional directive name and filename, can be used in the HTTP + /// context. + (ContentDisposition, CONTENT_DISPOSITION, "content-disposition"); + + /// Used to compress the media-type. + /// + /// When present, its value indicates what additional content encoding has + /// been applied to the entity-body. It lets the client know, how to decode + /// in order to obtain the media-type referenced by the Content-Type header. + /// + /// It is recommended to compress data as much as possible and therefore to + /// use this field, but some types of resources, like jpeg images, are + /// already compressed. Sometimes using additional compression doesn't + /// reduce payload size and can even make the payload longer. + (ContentEncoding, CONTENT_ENCODING, "content-encoding"); + + /// Used to describe the languages intended for the audience. + /// + /// This header allows a user to differentiate according to the users' own + /// preferred language. For example, if "Content-Language: de-DE" is set, it + /// says that the document is intended for German language speakers + /// (however, it doesn't indicate the document is written in German. For + /// example, it might be written in English as part of a language course for + /// German speakers). + /// + /// If no Content-Language is specified, the default is that the content is + /// intended for all language audiences. Multiple language tags are also + /// possible, as well as applying the Content-Language header to various + /// media types and not only to textual documents. + (ContentLanguage, CONTENT_LANGUAGE, "content-language"); + + /// Indicates the size fo the entity-body. + /// + /// The header value must be a decimal indicating the number of octets sent + /// to the recipient. + (ContentLength, CONTENT_LENGTH, "content-length"); + + /// Indicates an alternate location for the returned data. + /// + /// The principal use case is to indicate the URL of the resource + /// transmitted as the result of content negotiation. + /// + /// Location and Content-Location are different: Location indicates the + /// target of a redirection (or the URL of a newly created document), while + /// Content-Location indicates the direct URL to use to access the resource, + /// without the need of further content negotiation. Location is a header + /// associated with the response, while Content-Location is associated with + /// the entity returned. + (ContentLocation, CONTENT_LOCATION, "content-location"); + + /// Indicates where in a full body message a partial message belongs. + (ContentRange, CONTENT_RANGE, "content-range"); + + /// Allows controlling resources the user agent is allowed to load for a + /// given page. + /// + /// With a few exceptions, policies mostly involve specifying server origins + /// and script endpoints. This helps guard against cross-site scripting + /// attacks (XSS). + (ContentSecurityPolicy, CONTENT_SECURITY_POLICY, "content-security-policy"); + + /// Allows experimenting with policies by monitoring their effects. + /// + /// The HTTP Content-Security-Policy-Report-Only response header allows web + /// developers to experiment with policies by monitoring (but not enforcing) + /// their effects. These violation reports consist of JSON documents sent + /// via an HTTP POST request to the specified URI. + (ContentSecurityPolicyReportOnly, CONTENT_SECURITY_POLICY_REPORT_ONLY, "content-security-policy-report-only"); + + /// Used to indicate the media type of the resource. + /// + /// In responses, a Content-Type header tells the client what the content + /// type of the returned content actually is. Browsers will do MIME sniffing + /// in some cases and will not necessarily follow the value of this header; + /// to prevent this behavior, the header X-Content-Type-Options can be set + /// to nosniff. + /// + /// In requests, (such as POST or PUT), the client tells the server what + /// type of data is actually sent. + (ContentType, CONTENT_TYPE, "content-type"); + + /// Contains stored HTTP cookies previously sent by the server with the + /// Set-Cookie header. + /// + /// The Cookie header might be omitted entirely, if the privacy setting of + /// the browser are set to block them, for example. + (Cookie, COOKIE, "cookie"); + + /// Indicates the client's tracking preference. + /// + /// This header lets users indicate whether they would prefer privacy rather + /// than personalized content. + (Dnt, DNT, "dnt"); + + /// Contains the date and time at which the message was originated. + (Date, DATE, "date"); + + /// Identifier for a specific version of a resource. + /// + /// This header allows caches to be more efficient, and saves bandwidth, as + /// a web server does not need to send a full response if the content has + /// not changed. On the other side, if the content has changed, etags are + /// useful to help prevent simultaneous updates of a resource from + /// overwriting each other ("mid-air collisions"). + /// + /// If the resource at a given URL changes, a new Etag value must be + /// generated. Etags are therefore similar to fingerprints and might also be + /// used for tracking purposes by some servers. A comparison of them allows + /// to quickly determine whether two representations of a resource are the + /// same, but they might also be set to persist indefinitely by a tracking + /// server. + (Etag, ETAG, "etag"); + + /// Indicates expectations that need to be fulfilled by the server in order + /// to properly handle the request. + /// + /// The only expectation defined in the specification is Expect: + /// 100-continue, to which the server shall respond with: + /// + /// * 100 if the information contained in the header is sufficient to cause + /// an immediate success, + /// + /// * 417 (Expectation Failed) if it cannot meet the expectation; or any + /// other 4xx status otherwise. + /// + /// For example, the server may reject a request if its Content-Length is + /// too large. + /// + /// No common browsers send the Expect header, but some other clients such + /// as cURL do so by default. + (Expect, EXPECT, "expect"); + + /// Contains the date/time after which the response is considered stale. + /// + /// Invalid dates, like the value 0, represent a date in the past and mean + /// that the resource is already expired. + /// + /// If there is a Cache-Control header with the "max-age" or "s-max-age" + /// directive in the response, the Expires header is ignored. + (Expires, EXPIRES, "expires"); + + /// Contains information from the client-facing side of proxy servers that + /// is altered or lost when a proxy is involved in the path of the request. + /// + /// The alternative and de-facto standard versions of this header are the + /// X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Proto headers. + /// + /// This header is used for debugging, statistics, and generating + /// location-dependent content and by design it exposes privacy sensitive + /// information, such as the IP address of the client. Therefore the user's + /// privacy must be kept in mind when deploying this header. + (Forwarded, FORWARDED, "forwarded"); + + /// Contains an Internet email address for a human user who controls the + /// requesting user agent. + /// + /// If you are running a robotic user agent (e.g. a crawler), the From + /// header should be sent, so you can be contacted if problems occur on + /// servers, such as if the robot is sending excessive, unwanted, or invalid + /// requests. + (From, FROM, "from"); + + /// Specifies the domain name of the server and (optionally) the TCP port + /// number on which the server is listening. + /// + /// If no port is given, the default port for the service requested (e.g., + /// "80" for an HTTP URL) is implied. + /// + /// A Host header field must be sent in all HTTP/1.1 request messages. A 400 + /// (Bad Request) status code will be sent to any HTTP/1.1 request message + /// that lacks a Host header field or contains more than one. + (Host, HOST, "host"); + + /// Makes a request conditional based on the E-Tag. + /// + /// For GET and HEAD methods, the server will send back the requested + /// resource only if it matches one of the listed ETags. For PUT and other + /// non-safe methods, it will only upload the resource in this case. + /// + /// The comparison with the stored ETag uses the strong comparison + /// algorithm, meaning two files are considered identical byte to byte only. + /// This is weakened when the W/ prefix is used in front of the ETag. + /// + /// There are two common use cases: + /// + /// * For GET and HEAD methods, used in combination with an Range header, it + /// can guarantee that the new ranges requested comes from the same resource + /// than the previous one. If it doesn't match, then a 416 (Range Not + /// Satisfiable) response is returned. + /// + /// * For other methods, and in particular for PUT, If-Match can be used to + /// prevent the lost update problem. It can check if the modification of a + /// resource that the user wants to upload will not override another change + /// that has been done since the original resource was fetched. If the + /// request cannot be fulfilled, the 412 (Precondition Failed) response is + /// returned. + (IfMatch, IF_MATCH, "if-match"); + + /// Makes a request conditional based on the modification date. + /// + /// The If-Modified-Since request HTTP header makes the request conditional: + /// the server will send back the requested resource, with a 200 status, + /// only if it has been last modified after the given date. If the request + /// has not been modified since, the response will be a 304 without any + /// body; the Last-Modified header will contain the date of last + /// modification. Unlike If-Unmodified-Since, If-Modified-Since can only be + /// used with a GET or HEAD. + /// + /// When used in combination with If-None-Match, it is ignored, unless the + /// server doesn't support If-None-Match. + /// + /// The most common use case is to update a cached entity that has no + /// associated ETag. + (IfModifiedSince, IF_MODIFIED_SINCE, "if-modified-since"); + + /// Makes a request conditional based on the E-Tag. + /// + /// The If-None-Match HTTP request header makes the request conditional. For + /// GET and HEAD methods, the server will send back the requested resource, + /// with a 200 status, only if it doesn't have an ETag matching the given + /// ones. For other methods, the request will be processed only if the + /// eventually existing resource's ETag doesn't match any of the values + /// listed. + /// + /// When the condition fails for GET and HEAD methods, then the server must + /// return HTTP status code 304 (Not Modified). For methods that apply + /// server-side changes, the status code 412 (Precondition Failed) is used. + /// Note that the server generating a 304 response MUST generate any of the + /// following header fields that would have been sent in a 200 (OK) response + /// to the same request: Cache-Control, Content-Location, Date, ETag, + /// Expires, and Vary. + /// + /// The comparison with the stored ETag uses the weak comparison algorithm, + /// meaning two files are considered identical not only if they are + /// identical byte to byte, but if the content is equivalent. For example, + /// two pages that would differ only by the date of generation in the footer + /// would be considered as identical. + /// + /// When used in combination with If-Modified-Since, it has precedence (if + /// the server supports it). + /// + /// There are two common use cases: + /// + /// * For `GET` and `HEAD` methods, to update a cached entity that has an associated ETag. + /// * For other methods, and in particular for `PUT`, `If-None-Match` used with + /// the `*` value can be used to save a file not known to exist, + /// guaranteeing that another upload didn't happen before, losing the data + /// of the previous put; this problems is the variation of the lost update + /// problem. + (IfNoneMatch, IF_NONE_MATCH, "if-none-match"); + + /// Makes a request conditional based on range. + /// + /// The If-Range HTTP request header makes a range request conditional: if + /// the condition is fulfilled, the range request will be issued and the + /// server sends back a 206 Partial Content answer with the appropriate + /// body. If the condition is not fulfilled, the full resource is sent back, + /// with a 200 OK status. + /// + /// This header can be used either with a Last-Modified validator, or with + /// an ETag, but not with both. + /// + /// The most common use case is to resume a download, to guarantee that the + /// stored resource has not been modified since the last fragment has been + /// received. + (IfRange, IF_RANGE, "if-range"); + + /// Makes the request conditional based on the last modification date. + /// + /// The If-Unmodified-Since request HTTP header makes the request + /// conditional: the server will send back the requested resource, or accept + /// it in the case of a POST or another non-safe method, only if it has not + /// been last modified after the given date. If the request has been + /// modified after the given date, the response will be a 412 (Precondition + /// Failed) error. + /// + /// There are two common use cases: + /// + /// * In conjunction non-safe methods, like POST, it can be used to + /// implement an optimistic concurrency control, like done by some wikis: + /// editions are rejected if the stored document has been modified since the + /// original has been retrieved. + /// + /// * In conjunction with a range request with a If-Range header, it can be + /// used to ensure that the new fragment requested comes from an unmodified + /// document. + (IfUnmodifiedSince, IF_UNMODIFIED_SINCE, "if-unmodified-since"); + + /// Content-Types that are acceptable for the response. + (LastModified, LAST_MODIFIED, "last-modified"); + + /// Allows the server to point an interested client to another resource + /// containing metadata about the requested resource. + (Link, LINK, "link"); + + /// Indicates the URL to redirect a page to. + /// + /// The Location response header indicates the URL to redirect a page to. It + /// only provides a meaning when served with a 3xx status response. + /// + /// The HTTP method used to make the new request to fetch the page pointed + /// to by Location depends of the original method and of the kind of + /// redirection: + /// + /// * If 303 (See Also) responses always lead to the use of a GET method, + /// 307 (Temporary Redirect) and 308 (Permanent Redirect) don't change the + /// method used in the original request; + /// + /// * 301 (Permanent Redirect) and 302 (Found) doesn't change the method + /// most of the time, though older user-agents may (so you basically don't + /// know). + /// + /// All responses with one of these status codes send a Location header. + /// + /// Beside redirect response, messages with 201 (Created) status also + /// include the Location header. It indicates the URL to the newly created + /// resource. + /// + /// Location and Content-Location are different: Location indicates the + /// target of a redirection (or the URL of a newly created resource), while + /// Content-Location indicates the direct URL to use to access the resource + /// when content negotiation happened, without the need of further content + /// negotiation. Location is a header associated with the response, while + /// Content-Location is associated with the entity returned. + (Location, LOCATION, "location"); + + /// Indicates the max number of intermediaries the request should be sent + /// through. + (MaxForwards, MAX_FORWARDS, "max-forwards"); + + /// Indicates where a fetch originates from. + /// + /// It doesn't include any path information, but only the server name. It is + /// sent with CORS requests, as well as with POST requests. It is similar to + /// the Referer header, but, unlike this header, it doesn't disclose the + /// whole path. + (Origin, ORIGIN, "origin"); + + /// HTTP/1.0 header usually used for backwards compatibility. + /// + /// The Pragma HTTP/1.0 general header is an implementation-specific header + /// that may have various effects along the request-response chain. It is + /// used for backwards compatibility with HTTP/1.0 caches where the + /// Cache-Control HTTP/1.1 header is not yet present. + (Pragma, PRAGMA, "pragma"); + + /// Defines the authentication method that should be used to gain access to + /// a proxy. + /// + /// Unlike `www-authenticate`, the `proxy-authenticate` header field applies + /// only to the next outbound client on the response chain. This is because + /// only the client that chose a given proxy is likely to have the + /// credentials necessary for authentication. However, when multiple proxies + /// are used within the same administrative domain, such as office and + /// regional caching proxies within a large corporate network, it is common + /// for credentials to be generated by the user agent and passed through the + /// hierarchy until consumed. Hence, in such a configuration, it will appear + /// as if Proxy-Authenticate is being forwarded because each proxy will send + /// the same challenge set. + /// + /// The `proxy-authenticate` header is sent along with a `407 Proxy + /// Authentication Required`. + (ProxyAuthenticate, PROXY_AUTHENTICATE, "proxy-authenticate"); + + /// Contains the credentials to authenticate a user agent to a proxy server. + /// + /// This header is usually included after the server has responded with a + /// 407 Proxy Authentication Required status and the Proxy-Authenticate + /// header. + (ProxyAuthorization, PROXY_AUTHORIZATION, "proxy-authorization"); + + /// Associates a specific cryptographic public key with a certain server. + /// + /// This decreases the risk of MITM attacks with forged certificates. If one + /// or several keys are pinned and none of them are used by the server, the + /// browser will not accept the response as legitimate, and will not display + /// it. + (PublicKeyPins, PUBLIC_KEY_PINS, "public-key-pins"); + + /// Sends reports of pinning violation to the report-uri specified in the + /// header. + /// + /// Unlike `Public-Key-Pins`, this header still allows browsers to connect + /// to the server if the pinning is violated. + (PublicKeyPinsReportOnly, PUBLIC_KEY_PINS_REPORT_ONLY, "public-key-pins-report-only"); + + /// Indicates the part of a document that the server should return. + /// + /// Several parts can be requested with one Range header at once, and the + /// server may send back these ranges in a multipart document. If the server + /// sends back ranges, it uses the 206 Partial Content for the response. If + /// the ranges are invalid, the server returns the 416 Range Not Satisfiable + /// error. The server can also ignore the Range header and return the whole + /// document with a 200 status code. + (Range, RANGE, "range"); + + /// Contains the address of the previous web page from which a link to the + /// currently requested page was followed. + /// + /// The Referer header allows servers to identify where people are visiting + /// them from and may use that data for analytics, logging, or optimized + /// caching, for example. + (Referer, REFERER, "referer"); + + /// Governs which referrer information should be included with requests + /// made. + (ReferrerPolicy, REFERRER_POLICY, "referrer-policy"); + + /// Informs the web browser that the current page or frame should be + /// refreshed. + (Refresh, REFRESH, "refresh"); + + /// The Retry-After response HTTP header indicates how long the user agent + /// should wait before making a follow-up request. There are two main cases + /// this header is used: + /// + /// * When sent with a 503 (Service Unavailable) response, it indicates how + /// long the service is expected to be unavailable. + /// + /// * When sent with a redirect response, such as 301 (Moved Permanently), + /// it indicates the minimum time that the user agent is asked to wait + /// before issuing the redirected request. + (RetryAfter, RETRY_AFTER, "retry-after"); + + /// The |Sec-WebSocket-Accept| header field is used in the WebSocket + /// opening handshake. It is sent from the server to the client to + /// confirm that the server is willing to initiate the WebSocket + /// connection. + (SecWebSocketAccept, SEC_WEBSOCKET_ACCEPT, "sec-websocket-accept"); + + /// The |Sec-WebSocket-Extensions| header field is used in the WebSocket + /// opening handshake. It is initially sent from the client to the + /// server, and then subsequently sent from the server to the client, to + /// agree on a set of protocol-level extensions to use for the duration + /// of the connection. + (SecWebSocketExtensions, SEC_WEBSOCKET_EXTENSIONS, "sec-websocket-extensions"); + + /// The |Sec-WebSocket-Key| header field is used in the WebSocket opening + /// handshake. It is sent from the client to the server to provide part + /// of the information used by the server to prove that it received a + /// valid WebSocket opening handshake. This helps ensure that the server + /// does not accept connections from non-WebSocket clients (e.g., HTTP + /// clients) that are being abused to send data to unsuspecting WebSocket + /// servers. + (SecWebSocketKey, SEC_WEBSOCKET_KEY, "sec-websocket-key"); + + /// The |Sec-WebSocket-Protocol| header field is used in the WebSocket + /// opening handshake. It is sent from the client to the server and back + /// from the server to the client to confirm the subprotocol of the + /// connection. This enables scripts to both select a subprotocol and be + /// sure that the server agreed to serve that subprotocol. + (SecWebSocketProtocol, SEC_WEBSOCKET_PROTOCOL, "sec-websocket-protocol"); + + /// The |Sec-WebSocket-Version| header field is used in the WebSocket + /// opening handshake. It is sent from the client to the server to + /// indicate the protocol version of the connection. This enables + /// servers to correctly interpret the opening handshake and subsequent + /// data being sent from the data, and close the connection if the server + /// cannot interpret that data in a safe manner. + (SecWebSocketVersion, SEC_WEBSOCKET_VERSION, "sec-websocket-version"); + + /// Contains information about the software used by the origin server to + /// handle the request. + /// + /// Overly long and detailed Server values should be avoided as they + /// potentially reveal internal implementation details that might make it + /// (slightly) easier for attackers to find and exploit known security + /// holes. + (Server, SERVER, "server"); + + /// Used to send cookies from the server to the user agent. + (SetCookie, SET_COOKIE, "set-cookie"); + + /// Tells the client to communicate with HTTPS instead of using HTTP. + (StrictTransportSecurity, STRICT_TRANSPORT_SECURITY, "strict-transport-security"); + + /// Informs the server of transfer encodings willing to be accepted as part + /// of the response. + /// + /// See also the Transfer-Encoding response header for more details on + /// transfer encodings. Note that chunked is always acceptable for HTTP/1.1 + /// recipients and you that don't have to specify "chunked" using the TE + /// header. However, it is useful for setting if the client is accepting + /// trailer fields in a chunked transfer coding using the "trailers" value. + (Te, TE, "te"); + + /// Allows the sender to include additional fields at the end of chunked + /// messages. + (Trailer, TRAILER, "trailer"); + + /// Specifies the form of encoding used to safely transfer the entity to the + /// client. + /// + /// `transfer-encoding` is a hop-by-hop header, that is applying to a + /// message between two nodes, not to a resource itself. Each segment of a + /// multi-node connection can use different `transfer-encoding` values. If + /// you want to compress data over the whole connection, use the end-to-end + /// header `content-encoding` header instead. + /// + /// When present on a response to a `HEAD` request that has no body, it + /// indicates the value that would have applied to the corresponding `GET` + /// message. + (TransferEncoding, TRANSFER_ENCODING, "transfer-encoding"); + + /// Contains a string that allows identifying the requesting client's + /// software. + (UserAgent, USER_AGENT, "user-agent"); + + /// Used as part of the exchange to upgrade the protocol. + (Upgrade, UPGRADE, "upgrade"); + + /// Sends a signal to the server expressing the client’s preference for an + /// encrypted and authenticated response. + (UpgradeInsecureRequests, UPGRADE_INSECURE_REQUESTS, "upgrade-insecure-requests"); + + /// Determines how to match future requests with cached responses. + /// + /// The `vary` HTTP response header determines how to match future request + /// headers to decide whether a cached response can be used rather than + /// requesting a fresh one from the origin server. It is used by the server + /// to indicate which headers it used when selecting a representation of a + /// resource in a content negotiation algorithm. + /// + /// The `vary` header should be set on a 304 Not Modified response exactly + /// like it would have been set on an equivalent 200 OK response. + (Vary, VARY, "vary"); + + /// Added by proxies to track routing. + /// + /// The `via` general header is added by proxies, both forward and reverse + /// proxies, and can appear in the request headers and the response headers. + /// It is used for tracking message forwards, avoiding request loops, and + /// identifying the protocol capabilities of senders along the + /// request/response chain. + (Via, VIA, "via"); + + /// General HTTP header contains information about possible problems with + /// the status of the message. + /// + /// More than one `warning` header may appear in a response. Warning header + /// fields can in general be applied to any message, however some warn-codes + /// are specific to caches and can only be applied to response messages. + (Warning, WARNING, "warning"); + + /// Defines the authentication method that should be used to gain access to + /// a resource. + (WwwAuthenticate, WWW_AUTHENTICATE, "www-authenticate"); + + /// Marker used by the server to indicate that the MIME types advertised in + /// the `content-type` headers should not be changed and be followed. + /// + /// This allows to opt-out of MIME type sniffing, or, in other words, it is + /// a way to say that the webmasters knew what they were doing. + /// + /// This header was introduced by Microsoft in IE 8 as a way for webmasters + /// to block content sniffing that was happening and could transform + /// non-executable MIME types into executable MIME types. Since then, other + /// browsers have introduced it, even if their MIME sniffing algorithms were + /// less aggressive. + /// + /// Site security testers usually expect this header to be set. + (XContentTypeOptions, X_CONTENT_TYPE_OPTIONS, "x-content-type-options"); + + /// Controls DNS prefetching. + /// + /// The `x-dns-prefetch-control` HTTP response header controls DNS + /// prefetching, a feature by which browsers proactively perform domain name + /// resolution on both links that the user may choose to follow as well as + /// URLs for items referenced by the document, including images, CSS, + /// JavaScript, and so forth. + /// + /// This prefetching is performed in the background, so that the DNS is + /// likely to have been resolved by the time the referenced items are + /// needed. This reduces latency when the user clicks a link. + (XDnsPrefetchControl, X_DNS_PREFETCH_CONTROL, "x-dns-prefetch-control"); + + /// Indicates whether or not a browser should be allowed to render a page in + /// a frame. + /// + /// Sites can use this to avoid clickjacking attacks, by ensuring that their + /// content is not embedded into other sites. + /// + /// The added security is only provided if the user accessing the document + /// is using a browser supporting `x-frame-options`. + (XFrameOptions, X_FRAME_OPTIONS, "x-frame-options"); + + /// Stop pages from loading when an XSS attack is detected. + /// + /// The HTTP X-XSS-Protection response header is a feature of Internet + /// Explorer, Chrome and Safari that stops pages from loading when they + /// detect reflected cross-site scripting (XSS) attacks. Although these + /// protections are largely unnecessary in modern browsers when sites + /// implement a strong Content-Security-Policy that disables the use of + /// inline JavaScript ('unsafe-inline'), they can still provide protections + /// for users of older web browsers that don't yet support CSP. + (XXssProtection, X_XSS_PROTECTION, "x-xss-protection"); +} + +/// Valid header name characters +/// +/// ```not_rust +/// field-name = token +/// token = 1* +/// separators = "(" | ")" | "<" | ">" | "@" +/// | "," | ";" | ":" | "\" | <"> +/// | "/" | "[" | "]" | "?" | "=" +/// | "{" | "}" | SP | HT +/// ``` +const HEADER_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x + 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x + 0, 0, 0, 0, 0, b'a', b'b', b'c', b'd', b'e', // 6x + b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', // 7x + b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', // 8x + b'z', 0, 0, 0, 0, b'_', 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +const HEADER_CHARS_H2: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x + 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8x + 0, 0, 0, 0, 0, b'_', 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +macro_rules! eq { + ($v:ident[$n:expr] == $a:tt) => { + $v[$n] == $a + }; + ($v:ident[$n:expr] == $a:tt $($rest:tt)+) => { + $v[$n] == $a && eq!($v[($n+1)] == $($rest)+) + }; + ($v:ident == $a:tt $($rest:tt)*) => { + $v[0] == $a && eq!($v[1] == $($rest)*) + }; +} + +fn parse_hdr<'a>(data: &'a [u8], b: &'a mut [u8; 64], table: &[u8; 256]) + -> Result, InvalidHeaderName> +{ + use self::StandardHeader::*; + + let len = data.len(); + + let validate = |buf: &'a [u8], len: usize| { + let buf = &buf[..len]; + if buf.iter().any(|&b| b == 0) { + Err(InvalidHeaderName::new()) + } else { + Ok(HdrName::custom(buf, true)) + } + }; + + + macro_rules! to_lower { + ($d:ident, $src:ident, 1) => { $d[0] = table[$src[0] as usize]; }; + ($d:ident, $src:ident, 2) => { to_lower!($d, $src, 1); $d[1] = table[$src[1] as usize]; }; + ($d:ident, $src:ident, 3) => { to_lower!($d, $src, 2); $d[2] = table[$src[2] as usize]; }; + ($d:ident, $src:ident, 4) => { to_lower!($d, $src, 3); $d[3] = table[$src[3] as usize]; }; + ($d:ident, $src:ident, 5) => { to_lower!($d, $src, 4); $d[4] = table[$src[4] as usize]; }; + ($d:ident, $src:ident, 6) => { to_lower!($d, $src, 5); $d[5] = table[$src[5] as usize]; }; + ($d:ident, $src:ident, 7) => { to_lower!($d, $src, 6); $d[6] = table[$src[6] as usize]; }; + ($d:ident, $src:ident, 8) => { to_lower!($d, $src, 7); $d[7] = table[$src[7] as usize]; }; + ($d:ident, $src:ident, 9) => { to_lower!($d, $src, 8); $d[8] = table[$src[8] as usize]; }; + ($d:ident, $src:ident, 10) => { to_lower!($d, $src, 9); $d[9] = table[$src[9] as usize]; }; + ($d:ident, $src:ident, 11) => { to_lower!($d, $src, 10); $d[10] = table[$src[10] as usize]; }; + ($d:ident, $src:ident, 12) => { to_lower!($d, $src, 11); $d[11] = table[$src[11] as usize]; }; + ($d:ident, $src:ident, 13) => { to_lower!($d, $src, 12); $d[12] = table[$src[12] as usize]; }; + ($d:ident, $src:ident, 14) => { to_lower!($d, $src, 13); $d[13] = table[$src[13] as usize]; }; + ($d:ident, $src:ident, 15) => { to_lower!($d, $src, 14); $d[14] = table[$src[14] as usize]; }; + ($d:ident, $src:ident, 16) => { to_lower!($d, $src, 15); $d[15] = table[$src[15] as usize]; }; + ($d:ident, $src:ident, 17) => { to_lower!($d, $src, 16); $d[16] = table[$src[16] as usize]; }; + ($d:ident, $src:ident, 18) => { to_lower!($d, $src, 17); $d[17] = table[$src[17] as usize]; }; + ($d:ident, $src:ident, 19) => { to_lower!($d, $src, 18); $d[18] = table[$src[18] as usize]; }; + ($d:ident, $src:ident, 20) => { to_lower!($d, $src, 19); $d[19] = table[$src[19] as usize]; }; + ($d:ident, $src:ident, 21) => { to_lower!($d, $src, 20); $d[20] = table[$src[20] as usize]; }; + ($d:ident, $src:ident, 22) => { to_lower!($d, $src, 21); $d[21] = table[$src[21] as usize]; }; + ($d:ident, $src:ident, 23) => { to_lower!($d, $src, 22); $d[22] = table[$src[22] as usize]; }; + ($d:ident, $src:ident, 24) => { to_lower!($d, $src, 23); $d[23] = table[$src[23] as usize]; }; + ($d:ident, $src:ident, 25) => { to_lower!($d, $src, 24); $d[24] = table[$src[24] as usize]; }; + ($d:ident, $src:ident, 26) => { to_lower!($d, $src, 25); $d[25] = table[$src[25] as usize]; }; + ($d:ident, $src:ident, 27) => { to_lower!($d, $src, 26); $d[26] = table[$src[26] as usize]; }; + ($d:ident, $src:ident, 28) => { to_lower!($d, $src, 27); $d[27] = table[$src[27] as usize]; }; + ($d:ident, $src:ident, 29) => { to_lower!($d, $src, 28); $d[28] = table[$src[28] as usize]; }; + ($d:ident, $src:ident, 30) => { to_lower!($d, $src, 29); $d[29] = table[$src[29] as usize]; }; + ($d:ident, $src:ident, 31) => { to_lower!($d, $src, 30); $d[30] = table[$src[30] as usize]; }; + ($d:ident, $src:ident, 32) => { to_lower!($d, $src, 31); $d[31] = table[$src[31] as usize]; }; + ($d:ident, $src:ident, 33) => { to_lower!($d, $src, 32); $d[32] = table[$src[32] as usize]; }; + ($d:ident, $src:ident, 34) => { to_lower!($d, $src, 33); $d[33] = table[$src[33] as usize]; }; + ($d:ident, $src:ident, 35) => { to_lower!($d, $src, 34); $d[34] = table[$src[34] as usize]; }; + } + + assert!(len < super::MAX_HEADER_NAME_LEN, + "header name too long -- max length is {}", + super::MAX_HEADER_NAME_LEN); + + match len { + 0 => { + Err(InvalidHeaderName::new()) + } + 2 => { + to_lower!(b, data, 2); + + if eq!(b == b't' b'e') { + Ok(Te.into()) + } else { + validate(b, len) + } + } + 3 => { + to_lower!(b, data, 3); + + if eq!(b == b'a' b'g' b'e') { + Ok(Age.into()) + } else if eq!(b == b'v' b'i' b'a') { + Ok(Via.into()) + } else if eq!(b == b'd' b'n' b't') { + Ok(Dnt.into()) + } else { + validate(b, len) + } + } + 4 => { + to_lower!(b, data, 4); + + if eq!(b == b'd' b'a' b't' b'e') { + Ok(Date.into()) + } else if eq!(b == b'e' b't' b'a' b'g') { + Ok(Etag.into()) + } else if eq!(b == b'f' b'r' b'o' b'm') { + Ok(From.into()) + } else if eq!(b == b'h' b'o' b's' b't') { + Ok(Host.into()) + } else if eq!(b == b'l' b'i' b'n' b'k') { + Ok(Link.into()) + } else if eq!(b == b'v' b'a' b'r' b'y') { + Ok(Vary.into()) + } else { + validate(b, len) + } + } + 5 => { + to_lower!(b, data, 5); + + if eq!(b == b'a' b'l' b'l' b'o' b'w') { + Ok(Allow.into()) + } else if eq!(b == b'r' b'a' b'n' b'g' b'e') { + Ok(Range.into()) + } else { + validate(b, len) + } + } + 6 => { + to_lower!(b, data, 6); + + if eq!(b == b'a' b'c' b'c' b'e' b'p' b't') { + return Ok(Accept.into()) + } else if eq!(b == b'c' b'o' b'o' b'k' b'i' b'e') { + return Ok(Cookie.into()) + } else if eq!(b == b'e' b'x' b'p' b'e' b'c' b't') { + return Ok(Expect.into()) + } else if eq!(b == b'o' b'r' b'i' b'g' b'i' b'n') { + return Ok(Origin.into()) + } else if eq!(b == b'p' b'r' b'a' b'g' b'm' b'a') { + return Ok(Pragma.into()) + } else if b[0] == b's' { + if eq!(b[1] == b'e' b'r' b'v' b'e' b'r') { + return Ok(Server.into()) + } + } + + validate(b, len) + } + 7 => { + to_lower!(b, data, 7); + + if eq!(b == b'a' b'l' b't' b'-' b's' b'v' b'c') { + Ok(AltSvc.into()) + } else if eq!(b == b'e' b'x' b'p' b'i' b'r' b'e' b's') { + Ok(Expires.into()) + } else if eq!(b == b'r' b'e' b'f' b'e' b'r' b'e' b'r') { + Ok(Referer.into()) + } else if eq!(b == b'r' b'e' b'f' b'r' b'e' b's' b'h') { + Ok(Refresh.into()) + } else if eq!(b == b't' b'r' b'a' b'i' b'l' b'e' b'r') { + Ok(Trailer.into()) + } else if eq!(b == b'u' b'p' b'g' b'r' b'a' b'd' b'e') { + Ok(Upgrade.into()) + } else if eq!(b == b'w' b'a' b'r' b'n' b'i' b'n' b'g') { + Ok(Warning.into()) + } else { + validate(b, len) + } + } + 8 => { + to_lower!(b, data, 8); + + if eq!(b == b'i' b'f' b'-') { + if eq!(b[3] == b'm' b'a' b't' b'c' b'h') { + return Ok(IfMatch.into()) + } else if eq!(b[3] == b'r' b'a' b'n' b'g' b'e') { + return Ok(IfRange.into()) + } + } else if eq!(b == b'l' b'o' b'c' b'a' b't' b'i' b'o' b'n') { + return Ok(Location.into()) + } + + validate(b, len) + } + 9 => { + to_lower!(b, data, 9); + + if eq!(b == b'f' b'o' b'r' b'w' b'a' b'r' b'd' b'e' b'd') { + Ok(Forwarded.into()) + } else { + validate(b, len) + } + } + 10 => { + to_lower!(b, data, 10); + + if eq!(b == b'c' b'o' b'n' b'n' b'e' b'c' b't' b'i' b'o' b'n') { + Ok(Connection.into()) + } else if eq!(b == b's' b'e' b't' b'-' b'c' b'o' b'o' b'k' b'i' b'e') { + Ok(SetCookie.into()) + } else if eq!(b == b'u' b's' b'e' b'r' b'-' b'a' b'g' b'e' b'n' b't') { + Ok(UserAgent.into()) + } else { + validate(b, len) + } + } + 11 => { + to_lower!(b, data, 11); + + if eq!(b == b'r' b'e' b't' b'r' b'y' b'-' b'a' b'f' b't' b'e' b'r') { + Ok(RetryAfter.into()) + } else { + validate(b, len) + } + } + 12 => { + to_lower!(b, data, 12); + + if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b't' b'y' b'p' b'e') { + Ok(ContentType.into()) + } else if eq!(b == b'm' b'a' b'x' b'-' b'f' b'o' b'r' b'w' b'a' b'r' b'd' b's') { + Ok(MaxForwards.into()) + } else { + validate(b, len) + } + } + 13 => { + to_lower!(b, data, 13); + + if b[0] == b'a' { + if eq!(b[1] == b'c' b'c' b'e' b'p' b't' b'-' b'r' b'a' b'n' b'g' b'e' b's') { + return Ok(AcceptRanges.into()) + } else if eq!(b[1] == b'u' b't' b'h' b'o' b'r' b'i' b'z' b'a' b't' b'i' b'o' b'n') { + return Ok(Authorization.into()) + } + } else if b[0] == b'c' { + if eq!(b[1] == b'a' b'c' b'h' b'e' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l') { + return Ok(CacheControl.into()) + } else if eq!(b[1] == b'o' b'n' b't' b'e' b'n' b't' b'-' b'r' b'a' b'n' b'g' b'e' ) { + return Ok(ContentRange.into()) + } + } else if eq!(b == b'i' b'f' b'-' b'n' b'o' b'n' b'e' b'-' b'm' b'a' b't' b'c' b'h') { + return Ok(IfNoneMatch.into()) + } else if eq!(b == b'l' b'a' b's' b't' b'-' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd') { + return Ok(LastModified.into()) + } + + validate(b, len) + } + 14 => { + to_lower!(b, data, 14); + + if eq!(b == b'a' b'c' b'c' b'e' b'p' b't' b'-' b'c' b'h' b'a' b'r' b's' b'e' b't') { + Ok(AcceptCharset.into()) + } else if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b'l' b'e' b'n' b'g' b't' b'h') { + Ok(ContentLength.into()) + } else { + validate(b, len) + } + } + 15 => { + to_lower!(b, data, 15); + + if eq!(b == b'a' b'c' b'c' b'e' b'p' b't' b'-') { // accept- + if eq!(b[7] == b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { + return Ok(AcceptEncoding.into()) + } else if eq!(b[7] == b'l' b'a' b'n' b'g' b'u' b'a' b'g' b'e') { + return Ok(AcceptLanguage.into()) + } + } else if eq!(b == b'p' b'u' b'b' b'l' b'i' b'c' b'-' b'k' b'e' b'y' b'-' b'p' b'i' b'n' b's') { + return Ok(PublicKeyPins.into()) + } else if eq!(b == b'x' b'-' b'f' b'r' b'a' b'm' b'e' b'-' b'o' b'p' b't' b'i' b'o' b'n' b's') { + return Ok(XFrameOptions.into()) + } + else if eq!(b == b'r' b'e' b'f' b'e' b'r' b'r' b'e' b'r' b'-' b'p' b'o' b'l' b'i' b'c' b'y') { + return Ok(ReferrerPolicy.into()) + } + + validate(b, len) + } + 16 => { + to_lower!(b, data, 16); + + if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-') { + if eq!(b[8] == b'l' b'a' b'n' b'g' b'u' b'a' b'g' b'e') { + return Ok(ContentLanguage.into()) + } else if eq!(b[8] == b'l' b'o' b'c' b'a' b't' b'i' b'o' b'n') { + return Ok(ContentLocation.into()) + } else if eq!(b[8] == b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { + return Ok(ContentEncoding.into()) + } + } else if eq!(b == b'w' b'w' b'w' b'-' b'a' b'u' b't' b'h' b'e' b'n' b't' b'i' b'c' b'a' b't' b'e') { + return Ok(WwwAuthenticate.into()) + } else if eq!(b == b'x' b'-' b'x' b's' b's' b'-' b'p' b'r' b'o' b't' b'e' b'c' b't' b'i' b'o' b'n') { + return Ok(XXssProtection.into()) + } + + validate(b, len) + } + 17 => { + to_lower!(b, data, 17); + + if eq!(b == b't' b'r' b'a' b'n' b's' b'f' b'e' b'r' b'-' b'e' b'n' b'c' b'o' b'd' b'i' b'n' b'g') { + Ok(TransferEncoding.into()) + } else if eq!(b == b'i' b'f' b'-' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd' b'-' b's' b'i' b'n' b'c' b'e') { + Ok(IfModifiedSince.into()) + } else if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'k' b'e' b'y') { + Ok(SecWebSocketKey.into()) + } else { + validate(b, len) + } + } + 18 => { + to_lower!(b, data, 18); + + if eq!(b == b'p' b'r' b'o' b'x' b'y' b'-' b'a' b'u' b't' b'h' b'e' b'n' b't' b'i' b'c' b'a' b't' b'e') { + Ok(ProxyAuthenticate.into()) + } else { + validate(b, len) + } + } + 19 => { + to_lower!(b, data, 19); + + if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b'd' b'i' b's' b'p' b'o' b's' b'i' b't' b'i' b'o' b'n') { + Ok(ContentDisposition.into()) + } else if eq!(b == b'i' b'f' b'-' b'u' b'n' b'm' b'o' b'd' b'i' b'f' b'i' b'e' b'd' b'-' b's' b'i' b'n' b'c' b'e') { + Ok(IfUnmodifiedSince.into()) + } else if eq!(b == b'p' b'r' b'o' b'x' b'y' b'-' b'a' b'u' b't' b'h' b'o' b'r' b'i' b'z' b'a' b't' b'i' b'o' b'n') { + Ok(ProxyAuthorization.into()) + } else { + validate(b, len) + } + } + 20 => { + to_lower!(b, data, 20); + + if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'a' b'c' b'c' b'e' b'p' b't') { + Ok(SecWebSocketAccept.into()) + } else { + validate(b, len) + } + } + 21 => { + to_lower!(b, data, 21); + + if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'v' b'e' b'r' b's' b'i' b'o' b'n') { + Ok(SecWebSocketVersion.into()) + } else { + validate(b, len) + } + } + 22 => { + to_lower!(b, data, 22); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'm' b'a' b'x' b'-' b'a' b'g' b'e') { + Ok(AccessControlMaxAge.into()) + } else if eq!(b == b'x' b'-' b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b't' b'y' b'p' b'e' b'-' b'o' b'p' b't' b'i' b'o' b'n' b's') { + Ok(XContentTypeOptions.into()) + } else if eq!(b == b'x' b'-' b'd' b'n' b's' b'-' b'p' b'r' b'e' b'f' b'e' b't' b'c' b'h' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l') { + Ok(XDnsPrefetchControl.into()) + } else if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'p' b'r' b'o' b't' b'o' b'c' b'o' b'l') { + Ok(SecWebSocketProtocol.into()) + } else { + validate(b, len) + } + } + 23 => { + to_lower!(b, data, 23); + + if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y' b'-' b'p' b'o' b'l' b'i' b'c' b'y') { + Ok(ContentSecurityPolicy.into()) + } else { + validate(b, len) + } + } + 24 => { + to_lower!(b, data, 24); + + if eq!(b == b's' b'e' b'c' b'-' b'w' b'e' b'b' b's' b'o' b'c' b'k' b'e' b't' b'-' b'e' b'x' b't' b'e' b'n' b's' b'i' b'o' b'n' b's') { + Ok(SecWebSocketExtensions.into()) + } else { + validate(b, len) + } + } + 25 => { + to_lower!(b, data, 25); + + if eq!(b == b's' b't' b'r' b'i' b'c' b't' b'-' b't' b'r' b'a' b'n' b's' b'p' b'o' b'r' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y') { + Ok(StrictTransportSecurity.into()) + } else if eq!(b == b'u' b'p' b'g' b'r' b'a' b'd' b'e' b'-' b'i' b'n' b's' b'e' b'c' b'u' b'r' b'e' b'-' b'r' b'e' b'q' b'u' b'e' b's' b't' b's') { + Ok(UpgradeInsecureRequests.into()) + } else { + validate(b, len) + } + } + 27 => { + to_lower!(b, data, 27); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-' b'o' b'r' b'i' b'g' b'i' b'n') { + Ok(AccessControlAllowOrigin.into()) + } else if eq!(b == b'p' b'u' b'b' b'l' b'i' b'c' b'-' b'k' b'e' b'y' b'-' b'p' b'i' b'n' b's' b'-' b'r' b'e' b'p' b'o' b'r' b't' b'-' b'o' b'n' b'l' b'y') { + Ok(PublicKeyPinsReportOnly.into()) + } else { + validate(b, len) + } + } + 28 => { + to_lower!(b, data, 28); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-') { + if eq!(b[21] == b'h' b'e' b'a' b'd' b'e' b'r' b's') { + return Ok(AccessControlAllowHeaders.into()) + } else if eq!(b[21] == b'm' b'e' b't' b'h' b'o' b'd' b's') { + return Ok(AccessControlAllowMethods.into()) + } + } + + validate(b, len) + } + 29 => { + to_lower!(b, data, 29); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-') { + if eq!(b[15] == b'e' b'x' b'p' b'o' b's' b'e' b'-' b'h' b'e' b'a' b'd' b'e' b'r' b's') { + return Ok(AccessControlExposeHeaders.into()) + } else if eq!(b[15] == b'r' b'e' b'q' b'u' b'e' b's' b't' b'-' b'm' b'e' b't' b'h' b'o' b'd') { + return Ok(AccessControlRequestMethod.into()) + } + } + + validate(b, len) + } + 30 => { + to_lower!(b, data, 30); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'r' b'e' b'q' b'u' b'e' b's' b't' b'-' b'h' b'e' b'a' b'd' b'e' b'r' b's') { + Ok(AccessControlRequestHeaders.into()) + } else { + validate(b, len) + } + } + 32 => { + to_lower!(b, data, 32); + + if eq!(b == b'a' b'c' b'c' b'e' b's' b's' b'-' b'c' b'o' b'n' b't' b'r' b'o' b'l' b'-' b'a' b'l' b'l' b'o' b'w' b'-' b'c' b'r' b'e' b'd' b'e' b'n' b't' b'i' b'a' b'l' b's') { + Ok(AccessControlAllowCredentials.into()) + } else { + validate(b, len) + } + } + 35 => { + to_lower!(b, data, 35); + + if eq!(b == b'c' b'o' b'n' b't' b'e' b'n' b't' b'-' b's' b'e' b'c' b'u' b'r' b'i' b't' b'y' b'-' b'p' b'o' b'l' b'i' b'c' b'y' b'-' b'r' b'e' b'p' b'o' b'r' b't' b'-' b'o' b'n' b'l' b'y') { + Ok(ContentSecurityPolicyReportOnly.into()) + } else { + validate(b, len) + } + } + _ => { + if len < 64 { + for i in 0..len { + b[i] = table[data[i] as usize]; + } + + validate(b, len) + } else { + Ok(HdrName::custom(data, false)) + } + } + } +} + +impl<'a> From for HdrName<'a> { + fn from(hdr: StandardHeader) -> HdrName<'a> { + HdrName { inner: Repr::Standard(hdr) } + } +} + +impl HeaderName { + /// Converts a slice of bytes to an HTTP header name. + /// + /// This function normalizes the input. + pub fn from_bytes(src: &[u8]) -> Result { + let mut buf = unsafe { mem::uninitialized() }; + match parse_hdr(src, &mut buf, &HEADER_CHARS)?.inner { + Repr::Standard(std) => Ok(std.into()), + Repr::Custom(MaybeLower { buf, lower: true }) => { + let buf = Bytes::from(buf); + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + Repr::Custom(MaybeLower { buf, lower: false }) => { + use bytes::{BufMut}; + let mut dst = BytesMut::with_capacity(buf.len()); + + for b in buf.iter() { + let b = HEADER_CHARS[*b as usize]; + + if b == 0 { + return Err(InvalidHeaderName::new()); + } + + dst.put(b); + } + + let val = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; + + Ok(Custom(val).into()) + } + } + } + + /// Converts a slice of bytes to an HTTP header name. + /// + /// This function expects the input to only contain lowercase characters. + /// This is useful when decoding HTTP/2.0 headers. The HTTP/2.0 + /// specification requires that all headers be represented in lower case. + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// + /// // Parsing a lower case header + /// let hdr = HeaderName::from_lowercase(b"content-length").unwrap(); + /// assert_eq!(CONTENT_LENGTH, hdr); + /// + /// // Parsing a header that contains uppercase characters + /// assert!(HeaderName::from_lowercase(b"Content-Length").is_err()); + /// ``` + pub fn from_lowercase(src: &[u8]) -> Result { + let mut buf = unsafe { mem::uninitialized() }; + match parse_hdr(src, &mut buf, &HEADER_CHARS_H2)?.inner { + Repr::Standard(std) => Ok(std.into()), + Repr::Custom(MaybeLower { buf, lower: true }) => { + let buf = Bytes::from(buf); + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + Repr::Custom(MaybeLower { buf, lower: false }) => { + for &b in buf.iter() { + if b != HEADER_CHARS[b as usize] { + return Err(InvalidHeaderName::new()); + } + } + + let buf = Bytes::from(buf); + let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; + Ok(Custom(val).into()) + } + } + } + + /// Converts a static string to a HTTP header name. + /// + /// This function panics when the static string is a invalid header. + /// + /// This function requires the static string to only contain lowercase + /// characters, numerals and symbols, as per the HTTP/2.0 specification + /// and header names internal representation within this library. + /// + /// + /// # Examples + /// + /// ``` + /// # use http::header::*; + /// // Parsing a standard header + /// let hdr = HeaderName::from_static("content-length"); + /// assert_eq!(CONTENT_LENGTH, hdr); + /// + /// // Parsing a custom header + /// let CUSTOM_HEADER: &'static str = "custom-header"; + /// + /// let a = HeaderName::from_lowercase(b"custom-header").unwrap(); + /// let b = HeaderName::from_static(CUSTOM_HEADER); + /// assert_eq!(a, b); + /// ``` + /// + /// ```should_panic + /// # use http::header::*; + /// # + /// // Parsing a header that contains invalid symbols(s): + /// HeaderName::from_static("content{}{}length"); // This line panics! + /// + /// // Parsing a header that contains invalid uppercase characters. + /// let a = HeaderName::from_static("foobar"); + /// let b = HeaderName::from_static("FOOBAR"); // This line panics! + /// ``` + pub fn from_static(src: &'static str) -> HeaderName { + let bytes = src.as_bytes(); + let mut buf = unsafe { mem::uninitialized() }; + match parse_hdr(bytes, &mut buf, &HEADER_CHARS_H2) { + Ok(hdr_name) => match hdr_name.inner { + Repr::Standard(std) => std.into(), + Repr::Custom(MaybeLower { buf: _, lower: true }) => { + let val = ByteStr::from_static(src); + Custom(val).into() + }, + Repr::Custom(MaybeLower { buf: _, lower: false }) => { + // With lower false, the string is left unchecked by + // parse_hdr and must be validated manually. + for &b in bytes.iter() { + if HEADER_CHARS_H2[b as usize] == 0 { + panic!("invalid header name") + } + } + + let val = ByteStr::from_static(src); + Custom(val).into() + } + }, + + Err(_) => panic!("invalid header name") + } + } + + /// Returns a `str` representation of the header. + /// + /// The returned string will always be lower case. + #[inline] + pub fn as_str(&self) -> &str { + match self.inner { + Repr::Standard(v) => v.as_str(), + Repr::Custom(ref v) => &*v.0, + } + } +} + +impl FromStr for HeaderName { + type Err = InvalidHeaderName; + + fn from_str(s: &str) -> Result { + HeaderName::from_bytes(s.as_bytes()) + .map_err(|_| InvalidHeaderName { + _priv: (), + }) + } +} + +impl AsRef for HeaderName { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl AsRef<[u8]> for HeaderName { + fn as_ref(&self) -> &[u8] { + self.as_str().as_bytes() + } +} + +impl Borrow for HeaderName { + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl fmt::Debug for HeaderName { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), fmt) + } +} + +impl InvalidHeaderName { + fn new() -> InvalidHeaderName { + InvalidHeaderName { _priv: () } + } +} + +impl<'a> From<&'a HeaderName> for HeaderName { + fn from(src: &'a HeaderName) -> HeaderName { + src.clone() + } +} + +#[doc(hidden)] +impl From> for Bytes +where T: Into { + fn from(repr: Repr) -> Bytes { + match repr { + Repr::Standard(header) => + Bytes::from_static(header.as_str().as_bytes()), + Repr::Custom(header) => header.into() + } + } +} + +impl From for Bytes { + #[inline] + fn from(Custom(inner): Custom) -> Bytes { + Bytes::from(inner) + } +} + +impl From for Bytes { + #[inline] + fn from(name: HeaderName) -> Bytes { + name.inner.into() + } +} + +impl<'a> HttpTryFrom<&'a HeaderName> for HeaderName { + type Error = ::error::Never; + + #[inline] + fn try_from(t: &'a HeaderName) -> Result { + Ok(t.clone()) + } +} + +impl<'a> HttpTryFrom<&'a str> for HeaderName { + type Error = InvalidHeaderName; + #[inline] + fn try_from(s: &'a str) -> Result { + Self::from_bytes(s.as_bytes()) + } +} + +impl<'a> HttpTryFrom<&'a [u8]> for HeaderName { + type Error = InvalidHeaderName; + #[inline] + fn try_from(s: &'a [u8]) -> Result { + Self::from_bytes(s) + } +} + +impl HttpTryFrom for HeaderName { + type Error = InvalidHeaderNameBytes; + #[inline] + fn try_from(bytes: Bytes) -> Result { + Self::from_bytes(bytes.as_ref()).map_err(InvalidHeaderNameBytes) + } +} + +#[doc(hidden)] +impl From for HeaderName { + fn from(src: StandardHeader) -> HeaderName { + HeaderName { + inner: Repr::Standard(src), + } + } +} + +#[doc(hidden)] +impl From for HeaderName { + fn from(src: Custom) -> HeaderName { + HeaderName { inner: Repr::Custom(src) } + } +} + +impl<'a> PartialEq<&'a HeaderName> for HeaderName { + #[inline] + fn eq(&self, other: &&'a HeaderName) -> bool { + *self == **other + } +} + + +impl<'a> PartialEq for &'a HeaderName { + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl PartialEq for HeaderName { + /// Performs a case-insensitive comparison of the string against the header + /// name + /// + /// # Examples + /// + /// ``` + /// use http::header::CONTENT_LENGTH; + /// + /// assert_eq!(CONTENT_LENGTH, "content-length"); + /// assert_eq!(CONTENT_LENGTH, "Content-Length"); + /// assert_ne!(CONTENT_LENGTH, "content length"); + /// ``` + #[inline] + fn eq(&self, other: &str) -> bool { + eq_ignore_ascii_case(self.as_ref(), other.as_bytes()) + } +} + + +impl PartialEq for str { + /// Performs a case-insensitive comparison of the string against the header + /// name + /// + /// # Examples + /// + /// ``` + /// use http::header::CONTENT_LENGTH; + /// + /// assert_eq!(CONTENT_LENGTH, "content-length"); + /// assert_eq!(CONTENT_LENGTH, "Content-Length"); + /// assert_ne!(CONTENT_LENGTH, "content length"); + /// ``` + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl<'a> PartialEq<&'a str> for HeaderName { + /// Performs a case-insensitive comparison of the string against the header + /// name + #[inline] + fn eq(&self, other: &&'a str) -> bool { + *self == **other + } +} + + +impl<'a> PartialEq for &'a str { + /// Performs a case-insensitive comparison of the string against the header + /// name + #[inline] + fn eq(&self, other: &HeaderName) -> bool { + *other == *self + } +} + +impl fmt::Display for InvalidHeaderName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Error for InvalidHeaderName { + fn description(&self) -> &str { + "invalid HTTP header name" + } +} + +impl fmt::Display for InvalidHeaderNameBytes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Error for InvalidHeaderNameBytes { + fn description(&self) -> &str { + self.0.description() + } +} + +// ===== HdrName ===== + +impl<'a> HdrName<'a> { + fn custom(buf: &'a [u8], lower: bool) -> HdrName<'a> { + HdrName { + inner: Repr::Custom(MaybeLower { + buf: buf, + lower: lower, + }), + } + } + + pub fn from_bytes(hdr: &[u8], f: F) -> Result + where F: FnOnce(HdrName) -> U, + { + let mut buf = unsafe { mem::uninitialized() }; + let hdr = parse_hdr(hdr, &mut buf, &HEADER_CHARS)?; + Ok(f(hdr)) + } + + pub fn from_static(hdr: &'static str, f: F) -> U + where F: FnOnce(HdrName) -> U, + { + let mut buf = unsafe { mem::uninitialized() }; + let hdr = parse_hdr(hdr.as_bytes(), &mut buf, &HEADER_CHARS) + .expect("static str is invalid name"); + f(hdr) + } +} + +#[doc(hidden)] +impl<'a> From> for HeaderName { + fn from(src: HdrName<'a>) -> HeaderName { + match src.inner { + Repr::Standard(s) => { + HeaderName { + inner: Repr::Standard(s), + } + } + Repr::Custom(maybe_lower) => { + if maybe_lower.lower { + let buf = Bytes::from(&maybe_lower.buf[..]); + let byte_str = unsafe { ByteStr::from_utf8_unchecked(buf) }; + + HeaderName { + inner: Repr::Custom(Custom(byte_str)), + } + } else { + use bytes::{BufMut}; + let mut dst = BytesMut::with_capacity(maybe_lower.buf.len()); + + for b in maybe_lower.buf.iter() { + dst.put(HEADER_CHARS[*b as usize]); + } + + let buf = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; + + HeaderName { + inner: Repr::Custom(Custom(buf)), + } + } + } + } + } +} + +#[doc(hidden)] +impl<'a> PartialEq> for HeaderName { + #[inline] + fn eq(&self, other: &HdrName<'a>) -> bool { + match self.inner { + Repr::Standard(a) => { + match other.inner { + Repr::Standard(b) => a == b, + _ => false, + } + } + Repr::Custom(Custom(ref a)) => { + match other.inner { + Repr::Custom(ref b) => { + if b.lower { + a.as_bytes() == b.buf + } else { + eq_ignore_ascii_case(a.as_bytes(), b.buf) + } + } + _ => false, + } + } + } + } +} + +// ===== Custom ===== + +impl Hash for Custom { + #[inline] + fn hash(&self, hasher: &mut H) { + hasher.write(self.0.as_bytes()) + } +} + +// ===== MaybeLower ===== + +impl<'a> Hash for MaybeLower<'a> { + #[inline] + fn hash(&self, hasher: &mut H) { + if self.lower { + hasher.write(self.buf); + } else { + for &b in self.buf { + hasher.write(&[HEADER_CHARS[b as usize]]); + } + } + } +} + +// Assumes that the left hand side is already lower case +#[inline] +fn eq_ignore_ascii_case(lower: &[u8], s: &[u8]) -> bool { + if lower.len() != s.len() { + return false; + } + + lower.iter().zip(s).all(|(a, b)| { + *a == HEADER_CHARS[*b as usize] + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use self::StandardHeader::Vary; + + #[test] + fn test_bounds() { + fn check_bounds() {} + check_bounds::(); + } + + #[test] + fn test_parse_invalid_headers() { + for i in 0..128 { + let hdr = vec![1u8; i]; + assert!(HeaderName::from_bytes(&hdr).is_err(), "{} invalid header chars did not fail", i); + } + } + + #[test] + fn test_from_hdr_name() { + use self::StandardHeader::Vary; + + let name = HeaderName::from(HdrName { + inner: Repr::Standard(Vary), + }); + + assert_eq!(name.inner, Repr::Standard(Vary)); + + let name = HeaderName::from(HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"hello-world", + lower: true, + }), + }); + + assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + + let name = HeaderName::from(HdrName { + inner: Repr::Custom(MaybeLower { + buf: b"Hello-World", + lower: false, + }), + }); + + assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); + } + + #[test] + fn test_eq_hdr_name() { + use self::StandardHeader::Vary; + + let a = HeaderName { inner: Repr::Standard(Vary) }; + let b = HdrName { inner: Repr::Standard(Vary) }; + + assert_eq!(a, b); + + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("vaary"))) }; + assert_ne!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: true, + })}; + + assert_eq!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"vaary", + lower: false, + })}; + + assert_eq!(a, b); + + let b = HdrName { inner: Repr::Custom(MaybeLower { + buf: b"VAARY", + lower: false, + })}; + + assert_eq!(a, b); + + let a = HeaderName { inner: Repr::Standard(Vary) }; + assert_ne!(a, b); + } + + #[test] + fn test_from_static_std() { + let a = HeaderName { inner: Repr::Standard(Vary) }; + + let b = HeaderName::from_static("vary"); + assert_eq!(a, b); + + let b = HeaderName::from_static("vaary"); + assert_ne!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_std_uppercase() { + HeaderName::from_static("Vary"); + } + + #[test] + #[should_panic] + fn test_from_static_std_symbol() { + HeaderName::from_static("vary{}"); + } + + // MaybeLower { lower: true } + #[test] + fn test_from_static_custom_short() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("customheader"))) }; + let b = HeaderName::from_static("customheader"); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_custom_short_uppercase() { + HeaderName::from_static("custom header"); + } + + #[test] + #[should_panic] + fn test_from_static_custom_short_symbol() { + HeaderName::from_static("CustomHeader"); + } + + // MaybeLower { lower: false } + #[test] + fn test_from_static_custom_long() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static( + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" + ))) }; + let b = HeaderName::from_static( + "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" + ); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_custom_long_uppercase() { + HeaderName::from_static( + "Longer-Than-63--ThisHeaderIsLongerThanSixtyThreeCharactersAndThusHandledDifferent" + ); + } + + #[test] + #[should_panic] + fn test_from_static_custom_long_symbol() { + HeaderName::from_static( + "longer-than-63--thisheader{}{}{}{}islongerthansixtythreecharactersandthushandleddifferent" + ); + } + + #[test] + fn test_from_static_custom_single_char() { + let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("a"))) }; + let b = HeaderName::from_static("a"); + assert_eq!(a, b); + } + + #[test] + #[should_panic] + fn test_from_static_empty() { + HeaderName::from_static(""); + } +} diff --git a/third_party/rust/http/src/header/value.rs b/third_party/rust/http/src/header/value.rs new file mode 100644 index 000000000000..da57dccee5a0 --- /dev/null +++ b/third_party/rust/http/src/header/value.rs @@ -0,0 +1,779 @@ +use bytes::{Bytes, BytesMut}; + +use std::{cmp, fmt, mem, str}; +use std::error::Error; +use std::str::FromStr; + +use ::convert::HttpTryFrom; +use ::error::Never; +use header::name::HeaderName; + +/// Represents an HTTP header field value. +/// +/// In practice, HTTP header field values are usually valid ASCII. However, the +/// HTTP spec allows for a header value to contain opaque bytes as well. In this +/// case, the header field value is not able to be represented as a string. +/// +/// To handle this, the `HeaderValue` is useable as a type and can be compared +/// with strings and implements `Debug`. A `to_str` fn is provided that returns +/// an `Err` if the header value contains non visible ascii characters. +#[derive(Clone, Hash)] +pub struct HeaderValue { + inner: Bytes, + is_sensitive: bool, +} + +/// A possible error when converting a `HeaderValue` from a string or byte +/// slice. +#[derive(Debug)] +pub struct InvalidHeaderValue { + _priv: (), +} + +/// A possible error when converting a `HeaderValue` from a string or byte +/// slice. +#[derive(Debug)] +pub struct InvalidHeaderValueBytes(InvalidHeaderValue); + +/// A possible error when converting a `HeaderValue` to a string representation. +/// +/// Header field values may contain opaque bytes, in which case it is not +/// possible to represent the value as a string. +#[derive(Debug)] +pub struct ToStrError { + _priv: (), +} + +impl HeaderValue { + /// Convert a static string to a `HeaderValue`. + /// + /// This function will not perform any copying, however the string is + /// checked to ensure that no invalid characters are present. Only visible + /// ASCII characters (32-127) are permitted. + /// + /// # Panics + /// + /// This function panics if the argument contains invalid header value + /// characters. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val, "hello"); + /// ``` + #[inline] + pub fn from_static(src: &'static str) -> HeaderValue { + let bytes = src.as_bytes(); + for &b in bytes { + if !is_visible_ascii(b) { + panic!("invalid header value"); + } + } + + HeaderValue { + inner: Bytes::from_static(bytes), + is_sensitive: false, + } + } + + /// Attempt to convert a string to a `HeaderValue`. + /// + /// If the argument contains invalid header value characters, an error is + /// returned. Only visible ASCII characters (32-127) are permitted. Use + /// `from_bytes` to create a `HeaderValue` that includes opaque octets + /// (128-255). + /// + /// This function is intended to be replaced in the future by a `TryFrom` + /// implementation once the trait is stabilized in std. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_str("hello").unwrap(); + /// assert_eq!(val, "hello"); + /// ``` + /// + /// An invalid value + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_str("\n"); + /// assert!(val.is_err()); + /// ``` + #[inline] + pub fn from_str(src: &str) -> Result { + HeaderValue::try_from(src) + } + + /// Converts a HeaderName into a HeaderValue + /// + /// Since every valid HeaderName is a valid HeaderValue this is done infallibly. + /// + /// # Examples + /// + /// ``` + /// # use http::header::{HeaderValue, HeaderName}; + /// # use http::header::ACCEPT; + /// let val = HeaderValue::from_name(ACCEPT); + /// assert_eq!(val, HeaderValue::from_bytes(b"accept").unwrap()); + /// ``` + #[inline] + pub fn from_name(name: HeaderName) -> HeaderValue { + name.into() + } + + /// Attempt to convert a byte slice to a `HeaderValue`. + /// + /// If the argument contains invalid header value bytes, an error is + /// returned. Only byte values between 32 and 255 (inclusive) are permitted, + /// excluding byte 127 (DEL). + /// + /// This function is intended to be replaced in the future by a `TryFrom` + /// implementation once the trait is stabilized in std. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_bytes(b"hello\xfa").unwrap(); + /// assert_eq!(val, &b"hello\xfa"[..]); + /// ``` + /// + /// An invalid value + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_bytes(b"\n"); + /// assert!(val.is_err()); + /// ``` + #[inline] + pub fn from_bytes(src: &[u8]) -> Result { + HeaderValue::try_from(src) + } + + /// Attempt to convert a `Bytes` buffer to a `HeaderValue`. + /// + /// If the argument contains invalid header value bytes, an error is + /// returned. Only byte values between 32 and 255 (inclusive) are permitted, + /// excluding byte 127 (DEL). + /// + /// This function is intended to be replaced in the future by a `TryFrom` + /// implementation once the trait is stabilized in std. + #[inline] + pub fn from_shared(src: Bytes) -> Result { + HeaderValue::try_from(src).map_err(InvalidHeaderValueBytes) + } + + /// Convert a `Bytes` directly into a `HeaderValue` without validating. + /// + /// This function does NOT validate that illegal bytes are not contained + /// within the buffer. + #[inline] + pub unsafe fn from_shared_unchecked(src: Bytes) -> HeaderValue { + HeaderValue { + inner: src, + is_sensitive: false, + } + } + + fn try_from + Into>(src: T) -> Result { + for &b in src.as_ref() { + if !is_valid(b) { + return Err(InvalidHeaderValue { + _priv: (), + }); + } + } + Ok(HeaderValue { + inner: src.into(), + is_sensitive: false, + }) + } + + /// Yields a `&str` slice if the `HeaderValue` only contains visible ASCII + /// chars. + /// + /// This function will perform a scan of the header value, checking all the + /// characters. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.to_str().unwrap(), "hello"); + /// ``` + pub fn to_str(&self) -> Result<&str, ToStrError> { + let bytes = self.as_ref(); + + for &b in bytes { + if !is_visible_ascii(b) { + return Err(ToStrError { _priv: () }); + } + } + + unsafe { Ok(str::from_utf8_unchecked(bytes)) } + } + + /// Returns the length of `self`. + /// + /// This length is in bytes. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.as_ref().len() + } + + /// Returns true if the `HeaderValue` has a length of zero bytes. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static(""); + /// assert!(val.is_empty()); + /// + /// let val = HeaderValue::from_static("hello"); + /// assert!(!val.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Converts a `HeaderValue` to a byte slice. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let val = HeaderValue::from_static("hello"); + /// assert_eq!(val.as_bytes(), b"hello"); + /// ``` + #[inline] + pub fn as_bytes(&self) -> &[u8] { + self.as_ref() + } + + /// Mark that the header value represents sensitive information. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let mut val = HeaderValue::from_static("my secret"); + /// + /// val.set_sensitive(true); + /// assert!(val.is_sensitive()); + /// + /// val.set_sensitive(false); + /// assert!(!val.is_sensitive()); + /// ``` + #[inline] + pub fn set_sensitive(&mut self, val: bool) { + self.is_sensitive = val; + } + + /// Returns `true` if the value represents sensitive data. + /// + /// Sensitive data could represent passwords or other data that should not + /// be stored on disk or in memory. This setting can be used by components + /// like caches to avoid storing the value. HPACK encoders must set the + /// header field to never index when `is_sensitive` returns true. + /// + /// Note that sensitivity is not factored into equality or ordering. + /// + /// # Examples + /// + /// ``` + /// # use http::header::HeaderValue; + /// let mut val = HeaderValue::from_static("my secret"); + /// + /// val.set_sensitive(true); + /// assert!(val.is_sensitive()); + /// + /// val.set_sensitive(false); + /// assert!(!val.is_sensitive()); + /// ``` + #[inline] + pub fn is_sensitive(&self) -> bool { + self.is_sensitive + } +} + +impl AsRef<[u8]> for HeaderValue { + #[inline] + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl fmt::Debug for HeaderValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.is_sensitive { + f.write_str("Sensitive") + } else { + f.write_str("\"")?; + let mut from = 0; + let bytes = self.as_bytes(); + for (i, &b) in bytes.iter().enumerate() { + if !is_visible_ascii(b) || b == b'"' { + if from != i { + f.write_str(unsafe { + str::from_utf8_unchecked(&bytes[from..i]) + })?; + } + if b == b'"' { + f.write_str("\\\"")?; + } else { + write!(f, "\\x{:x}", b)?; + } + from = i + 1; + } + } + + f.write_str(unsafe { + str::from_utf8_unchecked(&bytes[from..]) + })?; + f.write_str("\"") + } + } +} + +impl From for HeaderValue { + #[inline] + fn from(h: HeaderName) -> HeaderValue { + HeaderValue { + inner: h.into(), + is_sensitive: false, + } + } +} + +macro_rules! from_integers { + ($($name:ident: $t:ident => $max_len:expr),*) => {$( + impl From<$t> for HeaderValue { + fn from(num: $t) -> HeaderValue { + let mut buf = if mem::size_of::() - 1 < $max_len { + // On 32bit platforms, BytesMut max inline size + // is 15 bytes, but the $max_len could be bigger. + // + // The likelihood of the number *actually* being + // that big is very small, so only allocate + // if the number needs that space. + // + // The largest decimal number in 15 digits: + // It wold be 10.pow(15) - 1, but this is a constant + // version. + if num as u64 > 999_999_999_999_999_999 { + BytesMut::with_capacity($max_len) + } else { + // fits inline... + BytesMut::new() + } + } else { + // full value fits inline, so don't allocate! + BytesMut::new() + }; + let _ = ::itoa::fmt(&mut buf, num); + HeaderValue { + inner: buf.freeze(), + is_sensitive: false, + } + } + } + + impl HttpTryFrom<$t> for HeaderValue { + type Error = Never; + + #[inline] + fn try_from(num: $t) -> Result { + Ok(num.into()) + } + } + + #[test] + fn $name() { + let n: $t = 55; + let val = HeaderValue::from(n); + assert_eq!(val, &n.to_string()); + + let n = ::std::$t::MAX; + let val = HeaderValue::from(n); + assert_eq!(val, &n.to_string()); + } + )*}; +} + +from_integers! { + // integer type => maximum decimal length + + // u8 purposely left off... HeaderValue::from(b'3') could be confusing + from_u16: u16 => 5, + from_i16: i16 => 6, + from_u32: u32 => 10, + from_i32: i32 => 11, + from_u64: u64 => 20, + from_i64: i64 => 20 +} + +#[cfg(target_pointer_width = "16")] +from_integers! { + from_usize: usize => 5, + from_isize: isize => 6 +} + +#[cfg(target_pointer_width = "32")] +from_integers! { + from_usize: usize => 10, + from_isize: isize => 11 +} + +#[cfg(target_pointer_width = "64")] +from_integers! { + from_usize: usize => 20, + from_isize: isize => 20 +} + +#[cfg(test)] +mod from_header_name_tests { + use super::*; + use header::map::HeaderMap; + use header::name; + + #[test] + fn it_can_insert_header_name_as_header_value() { + let mut map = HeaderMap::new(); + map.insert(name::UPGRADE, name::SEC_WEBSOCKET_PROTOCOL.into()); + map.insert(name::ACCEPT, name::HeaderName::from_bytes(b"hello-world").unwrap().into()); + + assert_eq!( + map.get(name::UPGRADE).unwrap(), + HeaderValue::from_bytes(b"sec-websocket-protocol").unwrap() + ); + + assert_eq!( + map.get(name::ACCEPT).unwrap(), + HeaderValue::from_bytes(b"hello-world").unwrap() + ); + } +} + +impl FromStr for HeaderValue { + type Err = InvalidHeaderValue; + + #[inline] + fn from_str(s: &str) -> Result { + HeaderValue::from_str(s) + } +} + +impl From for Bytes { + #[inline] + fn from(value: HeaderValue) -> Bytes { + value.inner + } +} + +impl<'a> HttpTryFrom<&'a HeaderValue> for HeaderValue { + type Error = ::error::Never; + + #[inline] + fn try_from(t: &'a HeaderValue) -> Result { + Ok(t.clone()) + } +} + +impl<'a> HttpTryFrom<&'a str> for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl<'a> HttpTryFrom<&'a [u8]> for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + HeaderValue::from_bytes(t) + } +} + +impl HttpTryFrom for HeaderValue { + type Error = InvalidHeaderValueBytes; + + #[inline] + fn try_from(t: String) -> Result { + HeaderValue::from_shared(t.into()) + } +} + +impl HttpTryFrom for HeaderValue { + type Error = InvalidHeaderValueBytes; + + #[inline] + fn try_from(bytes: Bytes) -> Result { + HeaderValue::from_shared(bytes) + } +} + +impl HttpTryFrom for HeaderValue { + type Error = InvalidHeaderValue; + + #[inline] + fn try_from(name: HeaderName) -> Result { + // Infallable as header names have the same validations + Ok(name.into()) + } +} + +#[cfg(test)] +mod try_from_header_name_tests { + use super::*; + use header::name; + + #[test] + fn it_converts_using_try_from() { + assert_eq!( + HeaderValue::try_from(name::UPGRADE).unwrap(), + HeaderValue::from_bytes(b"upgrade").unwrap() + ); + } +} + +fn is_visible_ascii(b: u8) -> bool { + b >= 32 && b < 127 +} + +#[inline] +fn is_valid(b: u8) -> bool { + b >= 32 && b != 127 +} + +impl fmt::Display for InvalidHeaderValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Error for InvalidHeaderValue { + fn description(&self) -> &str { + "failed to parse header value" + } +} + +impl fmt::Display for InvalidHeaderValueBytes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Error for InvalidHeaderValueBytes { + fn description(&self) -> &str { + self.0.description() + } +} + +impl fmt::Display for ToStrError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Error for ToStrError { + fn description(&self) -> &str { + "failed to convert header to a str" + } +} + +// ===== PartialEq / PartialOrd ===== + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + self.inner == other.inner + } +} + +impl Eq for HeaderValue {} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.inner.partial_cmp(&other.inner) + } +} + +impl Ord for HeaderValue { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.inner.cmp(&other.inner) + } +} + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &str) -> bool { + self.inner == other.as_bytes() + } +} + +impl PartialEq<[u8]> for HeaderValue { + #[inline] + fn eq(&self, other: &[u8]) -> bool { + self.inner == other + } +} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &str) -> Option { + (*self.inner).partial_cmp(other.as_bytes()) + } +} + +impl PartialOrd<[u8]> for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &[u8]) -> Option { + (*self.inner).partial_cmp(other) + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialEq for [u8] { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialOrd for str { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +impl PartialOrd for [u8] { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.partial_cmp(other.as_bytes()) + } +} + +impl PartialEq for HeaderValue { + #[inline] + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd for HeaderValue { + #[inline] + fn partial_cmp(&self, other: &String) -> Option { + self.inner.partial_cmp(other.as_bytes()) + } +} + +impl PartialEq for String { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl PartialOrd for String { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +impl<'a> PartialEq for &'a HeaderValue { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + **self == *other + } +} + +impl<'a> PartialOrd for &'a HeaderValue { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + (**self).partial_cmp(other) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for HeaderValue + where HeaderValue: PartialEq +{ + #[inline] + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for HeaderValue + where HeaderValue: PartialOrd +{ + #[inline] + fn partial_cmp(&self, other: &&'a T) -> Option { + self.partial_cmp(*other) + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &HeaderValue) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd for &'a str { + #[inline] + fn partial_cmp(&self, other: &HeaderValue) -> Option { + self.as_bytes().partial_cmp(other.as_bytes()) + } +} + +#[test] +fn test_try_from() { + HeaderValue::try_from(vec![127]).unwrap_err(); +} + +#[test] +fn test_debug() { + let cases = &[ + ("hello", "\"hello\""), + ("hello \"world\"", "\"hello \\\"world\\\"\""), + ("\u{7FFF}hello", "\"\\xe7\\xbf\\xbfhello\""), + ]; + + for &(value, expected) in cases { + let val = HeaderValue::from_bytes(value.as_bytes()).unwrap(); + let actual = format!("{:?}", val); + assert_eq!(expected, actual); + } + + let mut sensitive = HeaderValue::from_static("password"); + sensitive.set_sensitive(true); + assert_eq!("Sensitive", format!("{:?}", sensitive)); +} diff --git a/third_party/rust/http/src/lib.rs b/third_party/rust/http/src/lib.rs new file mode 100644 index 000000000000..8f5032c8b0f7 --- /dev/null +++ b/third_party/rust/http/src/lib.rs @@ -0,0 +1,205 @@ +#![doc(html_root_url = "https://docs.rs/http/0.1.10")] + +//! A general purpose library of common HTTP types +//! +//! This crate is a general purpose library for common types found when working +//! with the HTTP protocol. You'll find `Request` and `Response` types for +//! working as either a client or a server as well as all of their components. +//! Notably you'll find `Uri` for what a `Request` is requesting, a `Method` +//! for how it's being requested, a `StatusCode` for what sort of response came +//! back, a `Version` for how this was communicated, and +//! `HeaderName`/`HeaderValue` definitions to get grouped in a `HeaderMap` to +//! work with request/response headers. +//! +//! You will notably *not* find an implementation of sending requests or +//! spinning up a server in this crate. It's intended that this crate is the +//! "standard library" for HTTP clients and servers without dictating any +//! particular implementation. Note that this crate is still early on in its +//! lifecycle so the support libraries that integrate with the `http` crate are +//! a work in progress! Stay tuned and we'll be sure to highlight crates here +//! in the future. +//! +//! ## Requests and Responses +//! +//! Perhaps the main two types in this crate are the `Request` and `Response` +//! types. A `Request` could either be constructed to get sent off as a client +//! or it can also be received to generate a `Response` for a server. Similarly +//! as a client a `Response` is what you get after sending a `Request`, whereas +//! on a server you'll be manufacturing a `Response` to send back to the client. +//! +//! Each type has a number of accessors for the component fields. For as a +//! server you might want to inspect a requests URI to dispatch it: +//! +//! ``` +//! use http::{Request, Response}; +//! +//! fn response(req: Request<()>) -> http::Result> { +//! match req.uri().path() { +//! "/" => index(req), +//! "/foo" => foo(req), +//! "/bar" => bar(req), +//! _ => not_found(req), +//! } +//! } +//! # fn index(_req: Request<()>) -> http::Result> { panic!() } +//! # fn foo(_req: Request<()>) -> http::Result> { panic!() } +//! # fn bar(_req: Request<()>) -> http::Result> { panic!() } +//! # fn not_found(_req: Request<()>) -> http::Result> { panic!() } +//! ``` +//! +//! On a `Request` you'll also find accessors like `method` to return a +//! `Method` and `headers` to inspect the various headers. A `Response` +//! has similar methods for headers, the status code, etc. +//! +//! In addition to getters, request/response types also have mutable accessors +//! to edit the request/response: +//! +//! ``` +//! use http::{Response, StatusCode}; +//! use http::header::{CONTENT_TYPE, HeaderValue}; +//! +//! fn add_server_headers(response: &mut Response) { +//! response.headers_mut() +//! .insert(CONTENT_TYPE, HeaderValue::from_static("text/html")); +//! *response.status_mut() = StatusCode::OK; +//! } +//! ``` +//! +//! And finally, one of the most important aspects of requests/responses, the +//! body! The `Request` and `Response` types in this crate are *generic* in +//! what their body is. This allows downstream libraries to use different +//! representations such as `Request>`, `Response`, +//! `Request, Error = _>>`, or even +//! `Response` where the custom type was deserialized from JSON. +//! +//! The body representation is intentionally flexible to give downstream +//! libraries maximal flexibility in implementing the body as appropriate. +//! +//! ## HTTP Headers +//! +//! Another major piece of functionality in this library is HTTP header +//! interpretation and generation. The `HeaderName` type serves as a way to +//! define header *names*, or what's to the left of the colon. A `HeaderValue` +//! conversely is the header *value*, or what's to the right of a colon. +//! +//! For example, if you have an HTTP request that looks like: +//! +//! ```http +//! GET /foo HTTP/1.1 +//! Accept: text/html +//! ``` +//! +//! Then `"Accept"` is a `HeaderName` while `"text/html"` is a `HeaderValue`. +//! Each of these is a dedicated type to allow for a number of interesting +//! optimizations and to also encode the static guarantees of each type. For +//! example a `HeaderName` is always a valid `&str`, but a `HeaderValue` may +//! not be valid UTF-8. +//! +//! The most common header names are already defined for you as constant values +//! in the `header` module of this crate. For example: +//! +//! ``` +//! use http::header::{self, HeaderName}; +//! +//! let name: HeaderName = header::ACCEPT; +//! assert_eq!(name.as_str(), "accept"); +//! ``` +//! +//! You can, however, also parse header names from strings: +//! +//! ``` +//! use http::header::{self, HeaderName}; +//! +//! let name = "Accept".parse::().unwrap(); +//! assert_eq!(name, header::ACCEPT); +//! ``` +//! +//! Header values can be created from string literals through the `from_static` +//! function: +//! +//! ``` +//! use http::header::HeaderValue; +//! +//! let value = HeaderValue::from_static("text/html"); +//! assert_eq!(value.as_bytes(), b"text/html"); +//! ``` +//! +//! And header values can also be parsed like names: +//! +//! ``` +//! use http::header::HeaderValue; +//! +//! let value = "text/html"; +//! let value = value.parse::().unwrap(); +//! ``` +//! +//! Most HTTP requests and responses tend to come with more than one header, so +//! it's not too useful to just work with names and values only! This crate also +//! provides a `HeaderMap` type which is a specialized hash map for keys as +//! `HeaderName` and generic values. This type, like header names, is optimized +//! for common usage but should continue to scale with your needs over time. +//! +//! # URIs +//! +//! Each HTTP `Request` has an associated URI with it. This may just be a path +//! like `/index.html` but it could also be an absolute URL such as +//! `https://www.rust-lang.org/index.html`. A `URI` has a number of accessors to +//! interpret it: +//! +//! ``` +//! use http::Uri; +//! +//! let uri = "https://www.rust-lang.org/index.html".parse::().unwrap(); +//! +//! assert_eq!(uri.scheme(), Some("https")); +//! assert_eq!(uri.host(), Some("www.rust-lang.org")); +//! assert_eq!(uri.path(), "/index.html"); +//! assert_eq!(uri.query(), None); +//! ``` + +#![deny(warnings, missing_docs, missing_debug_implementations)] + +extern crate bytes; +extern crate fnv; +extern crate itoa; + +pub mod header; +pub mod method; +pub mod request; +pub mod response; +pub mod status; +pub mod version; +pub mod uri; + +mod byte_str; +mod convert; +mod error; +mod extensions; + +pub use convert::HttpTryFrom; +pub use error::{Error, Result}; +pub use extensions::Extensions; +pub use header::HeaderMap; +pub use method::Method; +pub use request::Request; +pub use response::Response; +pub use status::StatusCode; +pub use uri::Uri; +pub use version::Version; + +fn _assert_types() { + fn assert_send() {} + fn assert_sync() {} + + assert_send::>(); + assert_send::>(); + + assert_sync::>(); + assert_sync::>(); +} + +mod sealed { + /// Private trait to this crate to prevent traits from being implemented in + /// downstream crates. + pub trait Sealed {} +} diff --git a/third_party/rust/http/src/method.rs b/third_party/rust/http/src/method.rs new file mode 100644 index 000000000000..953dd61a22b8 --- /dev/null +++ b/third_party/rust/http/src/method.rs @@ -0,0 +1,406 @@ +//! The HTTP request method +//! +//! This module contains HTTP-method related structs and errors and such. The +//! main type of this module, `Method`, is also reexported at the root of the +//! crate as `http::Method` and is intended for import through that location +//! primarily. +//! +//! # Examples +//! +//! ``` +//! use http::Method; +//! +//! assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); +//! assert!(Method::GET.is_idempotent()); +//! assert_eq!(Method::POST.as_str(), "POST"); +//! ``` + +use HttpTryFrom; +use self::Inner::*; + +use std::{fmt, str}; +use std::convert::AsRef; +use std::error::Error; +use std::str::FromStr; + +/// The Request Method (VERB) +/// +/// This type also contains constants for a number of common HTTP methods such +/// as GET, POST, etc. +/// +/// Currently includes 8 variants representing the 8 methods defined in +/// [RFC 7230](https://tools.ietf.org/html/rfc7231#section-4.1), plus PATCH, +/// and an Extension variant for all extensions. +/// +/// # Examples +/// +/// ``` +/// use http::Method; +/// +/// assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); +/// assert!(Method::GET.is_idempotent()); +/// assert_eq!(Method::POST.as_str(), "POST"); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Method(Inner); + +/// A possible error value when converting `Method` from bytes. +#[derive(Debug)] +pub struct InvalidMethod { + _priv: (), +} + +#[derive(Clone, PartialEq, Eq, Hash)] +enum Inner { + Options, + Get, + Post, + Put, + Delete, + Head, + Trace, + Connect, + Patch, + // If the extension is short enough, store it inline + ExtensionInline([u8; MAX_INLINE], u8), + // Otherwise, allocate it + ExtensionAllocated(Box<[u8]>), +} + +const MAX_INLINE: usize = 15; + +// From the HTTP spec section 5.1.1, the HTTP method is case-sensitive and can +// contain the following characters: +// +// ``` +// method = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// ``` +// +// https://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01#Method +// +const METHOD_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 1x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 2x + b'\0', b'\0', b'\0', b'!', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 3x + b'\0', b'\0', b'*', b'+', b'\0', b'-', b'.', b'\0', b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'\0', b'\0', // 5x + b'\0', b'\0', b'\0', b'\0', b'\0', b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', b'\0', b'\0', b'\0', b'^', b'_', b'`', b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', b'\0', b'|', b'\0', b'~', b'\0', b'\0', b'\0', // 12x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 13x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 14x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 15x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 16x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 17x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 18x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 19x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 20x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 21x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 22x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 23x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 24x + b'\0', b'\0', b'\0', b'\0', b'\0', b'\0' // 25x +]; + + +impl Method { + /// GET + pub const GET: Method = Method(Get); + + /// POST + pub const POST: Method = Method(Post); + + /// PUT + pub const PUT: Method = Method(Put); + + /// DELETE + pub const DELETE: Method = Method(Delete); + + /// HEAD + pub const HEAD: Method = Method(Head); + + /// OPTIONS + pub const OPTIONS: Method = Method(Options); + + /// CONNECT + pub const CONNECT: Method = Method(Connect); + + /// PATCH + pub const PATCH: Method = Method(Patch); + + /// TRACE + pub const TRACE: Method = Method(Trace); + + /// Converts a slice of bytes to an HTTP method. + pub fn from_bytes(src: &[u8]) -> Result { + match src.len() { + 3 => { + match src { + b"GET" => Ok(Method(Get)), + b"PUT" => Ok(Method(Put)), + _ => Method::extension_inline(src), + } + } + 4 => { + match src { + b"POST" => Ok(Method(Post)), + b"HEAD" => Ok(Method(Head)), + _ => Method::extension_inline(src), + } + } + 5 => { + match src { + b"PATCH" => Ok(Method(Patch)), + b"TRACE" => Ok(Method(Trace)), + _ => Method::extension_inline(src), + } + } + 6 => { + match src { + b"DELETE" => Ok(Method(Delete)), + _ => Method::extension_inline(src), + } + } + 7 => { + match src { + b"OPTIONS" => Ok(Method(Options)), + b"CONNECT" => Ok(Method(Connect)), + _ => Method::extension_inline(src), + } + } + _ => { + if src.len() < MAX_INLINE { + Method::extension_inline(src) + } else { + let mut data: Vec = vec![0; src.len()]; + + write_checked(src, &mut data)?; + + Ok(Method(ExtensionAllocated(data.into_boxed_slice()))) + } + } + } + } + + fn extension_inline(src: &[u8]) -> Result { + let mut data: [u8; MAX_INLINE] = Default::default(); + + write_checked(src, &mut data)?; + + Ok(Method(ExtensionInline(data, src.len() as u8))) + } + + /// Whether a method is considered "safe", meaning the request is + /// essentially read-only. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) + /// for more words. + pub fn is_safe(&self) -> bool { + match self.0 { + Get | Head | Options | Trace => true, + _ => false + } + } + + /// Whether a method is considered "idempotent", meaning the request has + /// the same result if executed multiple times. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for + /// more words. + pub fn is_idempotent(&self) -> bool { + if self.is_safe() { + true + } else { + match self.0 { + Put | Delete => true, + _ => false + } + } + } + + /// Return a &str representation of the HTTP method + #[inline] + pub fn as_str(&self) -> &str { + match self.0 { + Options => "OPTIONS", + Get => "GET", + Post => "POST", + Put => "PUT", + Delete => "DELETE", + Head => "HEAD", + Trace => "TRACE", + Connect => "CONNECT", + Patch => "PATCH", + ExtensionInline(ref data, len) => { + unsafe { + str::from_utf8_unchecked(&data[..len as usize]) + } + } + ExtensionAllocated(ref data) => { + unsafe { + str::from_utf8_unchecked(data) + } + } + } + } +} + +fn write_checked(src: &[u8], dst: &mut [u8]) -> Result<(), InvalidMethod> { + for (i, &b) in src.iter().enumerate() { + let b = METHOD_CHARS[b as usize]; + + if b == 0 { + return Err(InvalidMethod::new()); + } + + dst[i] = b; + } + + Ok(()) +} + +impl AsRef for Method { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl<'a> PartialEq<&'a Method> for Method { + #[inline] + fn eq(&self, other: & &'a Method) -> bool { + self == *other + } +} + +impl<'a> PartialEq for &'a Method { + #[inline] + fn eq(&self, other: &Method) -> bool { + *self == other + } +} + +impl PartialEq for Method { + #[inline] + fn eq(&self, other: &str) -> bool { + self.as_ref() == other + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &Method) -> bool { + self == other.as_ref() + } +} + +impl<'a> PartialEq<&'a str> for Method { + #[inline] + fn eq(&self, other: &&'a str) -> bool { + self.as_ref() == *other + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &Method) -> bool { + *self == other.as_ref() + } +} + +impl fmt::Debug for Method { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.as_ref()) + } +} + +impl fmt::Display for Method { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(self.as_ref()) + } +} + +impl Default for Method { + #[inline] + fn default() -> Method { + Method::GET + } +} + +impl<'a> HttpTryFrom<&'a Method> for Method { + type Error = ::error::Never; + + #[inline] + fn try_from(t: &'a Method) -> Result { + Ok(t.clone()) + } +} + +impl<'a> HttpTryFrom<&'a [u8]> for Method { + type Error = InvalidMethod; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + Method::from_bytes(t) + } +} + +impl<'a> HttpTryFrom<&'a str> for Method { + type Error = InvalidMethod; + + #[inline] + fn try_from(t: &'a str) -> Result { + HttpTryFrom::try_from(t.as_bytes()) + } +} + +impl FromStr for Method { + type Err = InvalidMethod; + + #[inline] + fn from_str(t: &str) -> Result { + HttpTryFrom::try_from(t) + } +} + +impl InvalidMethod { + fn new() -> InvalidMethod { + InvalidMethod { + _priv: (), + } + } +} + +impl fmt::Display for InvalidMethod { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +impl Error for InvalidMethod { + fn description(&self) -> &str { + "invalid HTTP method" + } +} + +#[test] +fn test_method_eq() { + assert_eq!(Method::GET, Method::GET); + assert_eq!(Method::GET, "GET"); + assert_eq!(&Method::GET, "GET"); + + assert_eq!("GET", Method::GET); + assert_eq!("GET", &Method::GET); + + assert_eq!(&Method::GET, Method::GET); + assert_eq!(Method::GET, &Method::GET); +} diff --git a/third_party/rust/http/src/request.rs b/third_party/rust/http/src/request.rs new file mode 100644 index 000000000000..4d40c4d46b86 --- /dev/null +++ b/third_party/rust/http/src/request.rs @@ -0,0 +1,961 @@ +//! HTTP request types. +//! +//! This module contains structs related to HTTP requests, notably the +//! `Request` type itself as well as a builder to create requests. Typically +//! you'll import the `http::Request` type rather than reaching into this +//! module itself. +//! +//! # Examples +//! +//! Creating a `Request` to send +//! +//! ```no_run +//! use http::{Request, Response}; +//! +//! let mut request = Request::builder(); +//! request.uri("https://www.rust-lang.org/") +//! .header("User-Agent", "my-awesome-agent/1.0"); +//! +//! if needs_awesome_header() { +//! request.header("Awesome", "yes"); +//! } +//! +//! let response = send(request.body(()).unwrap()); +//! +//! # fn needs_awesome_header() -> bool { +//! # true +//! # } +//! # +//! fn send(req: Request<()>) -> Response<()> { +//! // ... +//! # panic!() +//! } +//! ``` +//! +//! Inspecting a request to see what was sent. +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn respond_to(req: Request<()>) -> http::Result> { +//! if req.uri() != "/awesome-url" { +//! return Response::builder() +//! .status(StatusCode::NOT_FOUND) +//! .body(()) +//! } +//! +//! let has_awesome_header = req.headers().contains_key("Awesome"); +//! let body = req.body(); +//! +//! // ... +//! # panic!() +//! } +//! ``` + +use std::any::Any; +use std::fmt; + +use {Uri, Error, Result, HttpTryFrom, Extensions}; +use header::{HeaderMap, HeaderName, HeaderValue}; +use method::Method; +use version::Version; + +/// Represents an HTTP request. +/// +/// An HTTP request consists of a head and a potentially optional body. The body +/// component is generic, enabling arbitrary types to represent the HTTP body. +/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a +/// value that has been deserialized. +/// +/// # Examples +/// +/// Creating a `Request` to send +/// +/// ```no_run +/// use http::{Request, Response}; +/// +/// let mut request = Request::builder(); +/// request.uri("https://www.rust-lang.org/") +/// .header("User-Agent", "my-awesome-agent/1.0"); +/// +/// if needs_awesome_header() { +/// request.header("Awesome", "yes"); +/// } +/// +/// let response = send(request.body(()).unwrap()); +/// +/// # fn needs_awesome_header() -> bool { +/// # true +/// # } +/// # +/// fn send(req: Request<()>) -> Response<()> { +/// // ... +/// # panic!() +/// } +/// ``` +/// +/// Inspecting a request to see what was sent. +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn respond_to(req: Request<()>) -> http::Result> { +/// if req.uri() != "/awesome-url" { +/// return Response::builder() +/// .status(StatusCode::NOT_FOUND) +/// .body(()) +/// } +/// +/// let has_awesome_header = req.headers().contains_key("Awesome"); +/// let body = req.body(); +/// +/// // ... +/// # panic!() +/// } +/// ``` +/// +/// Deserialize a request of bytes via json: +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Request; +/// use serde::de; +/// +/// fn deserialize(req: Request>) -> serde_json::Result> +/// where for<'de> T: de::Deserialize<'de>, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::from_slice(&body)?; +/// Ok(Request::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +/// +/// Or alternatively, serialize the body of a request to json +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Request; +/// use serde::ser; +/// +/// fn serialize(req: Request) -> serde_json::Result>> +/// where T: ser::Serialize, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::to_vec(&body)?; +/// Ok(Request::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +pub struct Request { + head: Parts, + body: T, +} + +/// Component parts of an HTTP `Request` +/// +/// The HTTP request head consists of a method, uri, version, and a set of +/// header fields. +pub struct Parts { + /// The request's method + pub method: Method, + + /// The request's URI + pub uri: Uri, + + /// The request's version + pub version: Version, + + /// The request's headers + pub headers: HeaderMap, + + /// The request's extensions + pub extensions: Extensions, + + _priv: (), +} + +/// An HTTP request builder +/// +/// This type can be used to construct an instance or `Request` +/// through a builder-like pattern. +#[derive(Debug)] +pub struct Builder { + head: Option, + err: Option, +} + +impl Request<()> { + /// Creates a new builder-style object to manufacture a `Request` + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::builder() + /// .method("GET") + /// .uri("https://www.rust-lang.org/") + /// .header("X-Custom-Foo", "Bar") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::new() + } + + + /// Creates a new `Builder` initialized with a GET method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::get("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn get(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::GET).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a PUT method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::put("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn put(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::PUT).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a POST method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::post("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn post(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::POST).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a DELETE method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::delete("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn delete(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::DELETE).uri(uri); + b + } + + /// Creates a new `Builder` initialized with an OPTIONS method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::options("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// # assert_eq!(*request.method(), Method::OPTIONS); + /// ``` + pub fn options(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::OPTIONS).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a HEAD method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::head("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn head(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::HEAD).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a CONNECT method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::connect("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn connect(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::CONNECT).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a PATCH method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::patch("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn patch(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::PATCH).uri(uri); + b + } + + /// Creates a new `Builder` initialized with a TRACE method and the given URI. + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Request`. + /// + /// # Example + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::trace("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn trace(uri: T) -> Builder + where Uri: HttpTryFrom { + let mut b = Builder::new(); + b.method(Method::TRACE).uri(uri); + b + } +} + +impl Request { + /// Creates a new blank `Request` with the body + /// + /// The component parts of this request will be set to their default, e.g. + /// the GET method, no headers, etc. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new("hello world"); + /// + /// assert_eq!(*request.method(), Method::GET); + /// assert_eq!(*request.body(), "hello world"); + /// ``` + #[inline] + pub fn new(body: T) -> Request { + Request { + head: Parts::new(), + body: body, + } + } + + /// Creates a new `Request` with the given components parts and body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new("hello world"); + /// let (mut parts, body) = request.into_parts(); + /// parts.method = Method::POST; + /// + /// let request = Request::from_parts(parts, body); + /// ``` + #[inline] + pub fn from_parts(parts: Parts, body: T) -> Request { + Request { + head: parts, + body: body, + } + } + + /// Returns a reference to the associated HTTP method. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(*request.method(), Method::GET); + /// ``` + #[inline] + pub fn method(&self) -> &Method { + &self.head.method + } + + /// Returns a mutable reference to the associated HTTP method. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.method_mut() = Method::PUT; + /// assert_eq!(*request.method(), Method::PUT); + /// ``` + #[inline] + pub fn method_mut(&mut self) -> &mut Method { + &mut self.head.method + } + + /// Returns a reference to the associated URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(*request.uri(), *"/"); + /// ``` + #[inline] + pub fn uri(&self) -> &Uri { + &self.head.uri + } + + /// Returns a mutable reference to the associated URI. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.uri_mut() = "/hello".parse().unwrap(); + /// assert_eq!(*request.uri(), *"/hello"); + /// ``` + #[inline] + pub fn uri_mut(&mut self) -> &mut Uri { + &mut self.head.uri + } + + /// Returns the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert_eq!(request.version(), Version::HTTP_11); + /// ``` + #[inline] + pub fn version(&self) -> Version { + self.head.version + } + + /// Returns a mutable reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request<()> = Request::default(); + /// *request.version_mut() = Version::HTTP_2; + /// assert_eq!(request.version(), Version::HTTP_2); + /// ``` + #[inline] + pub fn version_mut(&mut self) -> &mut Version { + &mut self.head.version + } + + /// Returns a reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert!(request.headers().is_empty()); + /// ``` + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.head.headers + } + + /// Returns a mutable reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut request: Request<()> = Request::default(); + /// request.headers_mut().insert(HOST, HeaderValue::from_static("world")); + /// assert!(!request.headers().is_empty()); + /// ``` + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.head.headers + } + + + /// Returns a reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request<()> = Request::default(); + /// assert!(request.extensions().get::().is_none()); + /// ``` + #[inline] + pub fn extensions(&self) -> &Extensions { + &self.head.extensions + } + + /// Returns a mutable reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut request: Request<()> = Request::default(); + /// request.extensions_mut().insert("hello"); + /// assert_eq!(request.extensions().get(), Some(&"hello")); + /// ``` + #[inline] + pub fn extensions_mut(&mut self) -> &mut Extensions { + &mut self.head.extensions + } + + /// Returns a reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request: Request = Request::default(); + /// assert!(request.body().is_empty()); + /// ``` + #[inline] + pub fn body(&self) -> &T { + &self.body + } + + /// Returns a mutable reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut request: Request = Request::default(); + /// request.body_mut().push_str("hello world"); + /// assert!(!request.body().is_empty()); + /// ``` + #[inline] + pub fn body_mut(&mut self) -> &mut T { + &mut self.body + } + + + /// Consumes the request, returning just the body. + /// + /// # Examples + /// + /// ``` + /// # use http::Request; + /// let request = Request::new(10); + /// let body = request.into_body(); + /// assert_eq!(body, 10); + /// ``` + #[inline] + pub fn into_body(self) -> T { + self.body + } + + /// Consumes the request returning the head and body parts. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::new(()); + /// let (parts, body) = request.into_parts(); + /// assert_eq!(parts.method, Method::GET); + /// ``` + #[inline] + pub fn into_parts(self) -> (Parts, T) { + (self.head, self.body) + } + + /// Consumes the request returning a new request with body mapped to the + /// return type of the passed in function. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let request = Request::builder().body("some string").unwrap(); + /// let mapped_request: Request<&[u8]> = request.map(|b| { + /// assert_eq!(b, "some string"); + /// b.as_bytes() + /// }); + /// assert_eq!(mapped_request.body(), &"some string".as_bytes()); + /// ``` + #[inline] + pub fn map(self, f: F) -> Request + where F: FnOnce(T) -> U + { + Request { body: f(self.body), head: self.head } + } +} + +impl Default for Request { + fn default() -> Request { + Request::new(T::default()) + } +} + +impl fmt::Debug for Request { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Request") + .field("method", self.method()) + .field("uri", self.uri()) + .field("version", &self.version()) + .field("headers", self.headers()) + // omits Extensions because not useful + .field("body", self.body()) + .finish() + } +} + +impl Parts { + /// Creates a new default instance of `Parts` + fn new() -> Parts { + Parts{ + method: Method::default(), + uri: Uri::default(), + version: Version::default(), + headers: HeaderMap::default(), + extensions: Extensions::default(), + _priv: (), + } + } +} + +impl fmt::Debug for Parts { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Parts") + .field("method", &self.method) + .field("uri", &self.uri) + .field("version", &self.version) + .field("headers", &self.headers) + // omits Extensions because not useful + // omits _priv because not useful + .finish() + } +} + +impl Builder { + /// Creates a new default instance of `Builder` to construct either a + /// `Head` or a `Request`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = request::Builder::new() + /// .method("POST") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn new() -> Builder { + Builder::default() + } + + /// Set the HTTP method for this request. + /// + /// This function will configure the HTTP method of the `Request` that will + /// be returned from `Builder::build`. + /// + /// By default this is `GET`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .method("POST") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn method(&mut self, method: T) -> &mut Builder + where Method: HttpTryFrom, + { + if let Some(head) = head(&mut self.head, &self.err) { + match HttpTryFrom::try_from(method) { + Ok(s) => head.method = s, + Err(e) => self.err = Some(e.into()), + } + } + self + } + + /// Set the URI for this request. + /// + /// This function will configure the URI of the `Request` that will + /// be returned from `Builder::build`. + /// + /// By default this is `/`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .uri("https://www.rust-lang.org/") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn uri(&mut self, uri: T) -> &mut Builder + where Uri: HttpTryFrom, + { + if let Some(head) = head(&mut self.head, &self.err) { + match HttpTryFrom::try_from(uri) { + Ok(s) => head.uri = s, + Err(e) => self.err = Some(e.into()), + } + } + self + } + + /// Set the HTTP version for this request. + /// + /// This function will configure the HTTP version of the `Request` that + /// will be returned from `Builder::build`. + /// + /// By default this is HTTP/1.1 + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .version(Version::HTTP_2) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn version(&mut self, version: Version) -> &mut Builder { + if let Some(head) = head(&mut self.head, &self.err) { + head.version = version; + } + self + } + + /// Appends a header to this request builder. + /// + /// This function will append the provided key/value as a header to the + /// internal `HeaderMap` being constructed. Essentially this is equivalent + /// to calling `HeaderMap::append`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::HeaderValue; + /// + /// let req = Request::builder() + /// .header("Accept", "text/html") + /// .header("X-Custom-Foo", "bar") + /// .body(()) + /// .unwrap(); + /// ``` + pub fn header(&mut self, key: K, value: V) -> &mut Builder + where HeaderName: HttpTryFrom, + HeaderValue: HttpTryFrom + { + if let Some(head) = head(&mut self.head, &self.err) { + match >::try_from(key) { + Ok(key) => { + match >::try_from(value) { + Ok(value) => { head.headers.append(key, value); } + Err(e) => self.err = Some(e.into()), + } + }, + Err(e) => self.err = Some(e.into()), + }; + } + self + } + + /// Adds an extension to this builder + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let req = Request::builder() + /// .extension("My Extension") + /// .body(()) + /// .unwrap(); + /// + /// assert_eq!(req.extensions().get::<&'static str>(), + /// Some(&"My Extension")); + /// ``` + pub fn extension(&mut self, extension: T) -> &mut Builder + where T: Any + Send + Sync + 'static, + { + if let Some(head) = head(&mut self.head, &self.err) { + head.extensions.insert(extension); + } + self + } + + fn take_parts(&mut self) -> Result { + let ret = self.head.take().expect("cannot reuse request builder"); + if let Some(e) = self.err.take() { + return Err(e) + } + Ok(ret) + } + + /// "Consumes" this builder, using the provided `body` to return a + /// constructed `Request`. + /// + /// # Errors + /// + /// This function may return an error if any previously configured argument + /// failed to parse or get converted to the internal representation. For + /// example if an invalid `head` was specified via `header("Foo", + /// "Bar\r\n")` the error will be returned when this function is called + /// rather than when `header` was called. + /// + /// # Panics + /// + /// This method will panic if the builder is reused. The `body` function can + /// only be called once. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let request = Request::builder() + /// .body(()) + /// .unwrap(); + /// ``` + pub fn body(&mut self, body: T) -> Result> { + Ok(Request { + head: self.take_parts()?, + body: body, + }) + } +} + +fn head<'a>(head: &'a mut Option, err: &Option) + -> Option<&'a mut Parts> +{ + if err.is_some() { + return None + } + head.as_mut() +} + +impl Default for Builder { + #[inline] + fn default() -> Builder { + Builder { + head: Some(Parts::new()), + err: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_can_map_a_body_from_one_type_to_another() { + let request= Request::builder().body("some string").unwrap(); + let mapped_request = request.map(|s| { + assert_eq!(s, "some string"); + 123u32 + }); + assert_eq!(mapped_request.body(), &123u32); + } +} diff --git a/third_party/rust/http/src/response.rs b/third_party/rust/http/src/response.rs new file mode 100644 index 000000000000..e628cf669f9a --- /dev/null +++ b/third_party/rust/http/src/response.rs @@ -0,0 +1,730 @@ +//! HTTP response types. +//! +//! This module contains structs related to HTTP responses, notably the +//! `Response` type itself as well as a builder to create responses. Typically +//! you'll import the `http::Response` type rather than reaching into this +//! module itself. +//! +//! # Examples +//! +//! Creating a `Response` to return +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn respond_to(req: Request<()>) -> http::Result> { +//! let mut response = Response::builder(); +//! response.header("Foo", "Bar") +//! .status(StatusCode::OK); +//! +//! if req.headers().contains_key("Another-Header") { +//! response.header("Another-Header", "Ack"); +//! } +//! +//! response.body(()) +//! } +//! ``` +//! +//! A simple 404 handler +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! +//! fn not_found(_req: Request<()>) -> http::Result> { +//! Response::builder() +//! .status(StatusCode::NOT_FOUND) +//! .body(()) +//! } +//! ``` +//! +//! Or otherwise inspecting the result of a request: +//! +//! ```no_run +//! use http::{Request, Response}; +//! +//! fn get(url: &str) -> http::Result> { +//! // ... +//! # panic!() +//! } +//! +//! let response = get("https://www.rust-lang.org/").unwrap(); +//! +//! if !response.status().is_success() { +//! panic!("failed to get a successful response status!"); +//! } +//! +//! if let Some(date) = response.headers().get("Date") { +//! // we've got a `Date` header! +//! } +//! +//! let body = response.body(); +//! // ... +//! ``` + +use std::any::Any; +use std::fmt; + +use {Error, Result, HttpTryFrom, Extensions}; +use header::{HeaderMap, HeaderName, HeaderValue}; +use status::StatusCode; +use version::Version; + +/// Represents an HTTP response +/// +/// An HTTP response consists of a head and a potentially optional body. The body +/// component is generic, enabling arbitrary types to represent the HTTP body. +/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a +/// value that has been deserialized. +/// +/// Typically you'll work with responses on the client side as the result of +/// sending a `Request` and on the server you'll be generating a `Request` to +/// send back to the client. +/// +/// # Examples +/// +/// Creating a `Response` to return +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn respond_to(req: Request<()>) -> http::Result> { +/// let mut response = Response::builder(); +/// response.header("Foo", "Bar") +/// .status(StatusCode::OK); +/// +/// if req.headers().contains_key("Another-Header") { +/// response.header("Another-Header", "Ack"); +/// } +/// +/// response.body(()) +/// } +/// ``` +/// +/// A simple 404 handler +/// +/// ``` +/// use http::{Request, Response, StatusCode}; +/// +/// fn not_found(_req: Request<()>) -> http::Result> { +/// Response::builder() +/// .status(StatusCode::NOT_FOUND) +/// .body(()) +/// } +/// ``` +/// +/// Or otherwise inspecting the result of a request: +/// +/// ```no_run +/// use http::{Request, Response}; +/// +/// fn get(url: &str) -> http::Result> { +/// // ... +/// # panic!() +/// } +/// +/// let response = get("https://www.rust-lang.org/").unwrap(); +/// +/// if !response.status().is_success() { +/// panic!("failed to get a successful response status!"); +/// } +/// +/// if let Some(date) = response.headers().get("Date") { +/// // we've got a `Date` header! +/// } +/// +/// let body = response.body(); +/// // ... +/// ``` +/// +/// Deserialize a response of bytes via json: +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Response; +/// use serde::de; +/// +/// fn deserialize(req: Response>) -> serde_json::Result> +/// where for<'de> T: de::Deserialize<'de>, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::from_slice(&body)?; +/// Ok(Response::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +/// +/// Or alternatively, serialize the body of a response to json +/// +/// ``` +/// # extern crate serde; +/// # extern crate serde_json; +/// # extern crate http; +/// use http::Response; +/// use serde::ser; +/// +/// fn serialize(req: Response) -> serde_json::Result>> +/// where T: ser::Serialize, +/// { +/// let (parts, body) = req.into_parts(); +/// let body = serde_json::to_vec(&body)?; +/// Ok(Response::from_parts(parts, body)) +/// } +/// # +/// # fn main() {} +/// ``` +pub struct Response { + head: Parts, + body: T, +} + +/// Component parts of an HTTP `Response` +/// +/// The HTTP response head consists of a status, version, and a set of +/// header fields. +pub struct Parts { + /// The response's status + pub status: StatusCode, + + /// The response's version + pub version: Version, + + /// The response's headers + pub headers: HeaderMap, + + /// The response's extensions + pub extensions: Extensions, + + _priv: (), +} + +/// An HTTP response builder +/// +/// This type can be used to construct an instance of `Response` through a +/// builder-like pattern. +#[derive(Debug)] +pub struct Builder { + head: Option, + err: Option, +} + +impl Response<()> { + /// Creates a new builder-style object to manufacture a `Response` + /// + /// This method returns an instance of `Builder` which can be used to + /// create a `Response`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::builder() + /// .status(200) + /// .header("X-Custom-Foo", "Bar") + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::new() + } +} + +impl Response { + /// Creates a new blank `Response` with the body + /// + /// The component ports of this response will be set to their default, e.g. + /// the ok status, no headers, etc. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::new("hello world"); + /// + /// assert_eq!(response.status(), StatusCode::OK); + /// assert_eq!(*response.body(), "hello world"); + /// ``` + #[inline] + pub fn new(body: T) -> Response { + Response { + head: Parts::new(), + body: body, + } + } + + /// Creates a new `Response` with the given head and body + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::new("hello world"); + /// let (mut parts, body) = response.into_parts(); + /// + /// parts.status = StatusCode::BAD_REQUEST; + /// let response = Response::from_parts(parts, body); + /// + /// assert_eq!(response.status(), StatusCode::BAD_REQUEST); + /// assert_eq!(*response.body(), "hello world"); + /// ``` + #[inline] + pub fn from_parts(parts: Parts, body: T) -> Response { + Response { + head: parts, + body: body, + } + } + + /// Returns the `StatusCode`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert_eq!(response.status(), StatusCode::OK); + /// ``` + #[inline] + pub fn status(&self) -> StatusCode { + self.head.status + } + + /// Returns a mutable reference to the associated `StatusCode`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response<()> = Response::default(); + /// *response.status_mut() = StatusCode::CREATED; + /// assert_eq!(response.status(), StatusCode::CREATED); + /// ``` + #[inline] + pub fn status_mut(&mut self) -> &mut StatusCode { + &mut self.head.status + } + + /// Returns a reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert_eq!(response.version(), Version::HTTP_11); + /// ``` + #[inline] + pub fn version(&self) -> Version { + self.head.version + } + + /// Returns a mutable reference to the associated version. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response<()> = Response::default(); + /// *response.version_mut() = Version::HTTP_2; + /// assert_eq!(response.version(), Version::HTTP_2); + /// ``` + #[inline] + pub fn version_mut(&mut self) -> &mut Version { + &mut self.head.version + } + + /// Returns a reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert!(response.headers().is_empty()); + /// ``` + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.head.headers + } + + /// Returns a mutable reference to the associated header field map. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut response: Response<()> = Response::default(); + /// response.headers_mut().insert(HOST, HeaderValue::from_static("world")); + /// assert!(!response.headers().is_empty()); + /// ``` + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.head.headers + } + + /// Returns a reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// assert!(response.extensions().get::().is_none()); + /// ``` + #[inline] + pub fn extensions(&self) -> &Extensions { + &self.head.extensions + } + + /// Returns a mutable reference to the associated extensions. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::*; + /// let mut response: Response<()> = Response::default(); + /// response.extensions_mut().insert("hello"); + /// assert_eq!(response.extensions().get(), Some(&"hello")); + /// ``` + #[inline] + pub fn extensions_mut(&mut self) -> &mut Extensions { + &mut self.head.extensions + } + + /// Returns a reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response = Response::default(); + /// assert!(response.body().is_empty()); + /// ``` + #[inline] + pub fn body(&self) -> &T { + &self.body + } + + /// Returns a mutable reference to the associated HTTP body. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let mut response: Response = Response::default(); + /// response.body_mut().push_str("hello world"); + /// assert!(!response.body().is_empty()); + /// ``` + #[inline] + pub fn body_mut(&mut self) -> &mut T { + &mut self.body + } + + /// Consumes the response, returning just the body. + /// + /// # Examples + /// + /// ``` + /// # use http::Response; + /// let response = Response::new(10); + /// let body = response.into_body(); + /// assert_eq!(body, 10); + /// ``` + #[inline] + pub fn into_body(self) -> T { + self.body + } + + /// Consumes the response returning the head and body parts. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response: Response<()> = Response::default(); + /// let (parts, body) = response.into_parts(); + /// assert_eq!(parts.status, StatusCode::OK); + /// ``` + #[inline] + pub fn into_parts(self) -> (Parts, T) { + (self.head, self.body) + } + + /// Consumes the response returning a new response with body mapped to the + /// return type of the passed in function. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// let response = Response::builder().body("some string").unwrap(); + /// let mapped_response: Response<&[u8]> = response.map(|b| { + /// assert_eq!(b, "some string"); + /// b.as_bytes() + /// }); + /// assert_eq!(mapped_response.body(), &"some string".as_bytes()); + /// ``` + #[inline] + pub fn map(self, f: F) -> Response + where F: FnOnce(T) -> U + { + Response { body: f(self.body), head: self.head } + } +} + +impl Default for Response { + #[inline] + fn default() -> Response { + Response::new(T::default()) + } +} + +impl fmt::Debug for Response { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Response") + .field("status", &self.status()) + .field("version", &self.version()) + .field("headers", self.headers()) + // omits Extensions because not useful + .field("body", self.body()) + .finish() + } +} + +impl Parts { + /// Creates a new default instance of `Parts` + fn new() -> Parts { + Parts{ + status: StatusCode::default(), + version: Version::default(), + headers: HeaderMap::default(), + extensions: Extensions::default(), + _priv: (), + } + } +} + +impl fmt::Debug for Parts { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Parts") + .field("status", &self.status) + .field("version", &self.version) + .field("headers", &self.headers) + // omits Extensions because not useful + // omits _priv because not useful + .finish() + } +} + +impl Builder { + /// Creates a new default instance of `Builder` to construct either a + /// `Head` or a `Response`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = response::Builder::new() + /// .status(200) + /// .body(()) + /// .unwrap(); + /// ``` + #[inline] + pub fn new() -> Builder { + Builder::default() + } + + /// Set the HTTP status for this response. + /// + /// This function will configure the HTTP status code of the `Response` that + /// will be returned from `Builder::build`. + /// + /// By default this is `200`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .status(200) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn status(&mut self, status: T) -> &mut Builder + where StatusCode: HttpTryFrom, + { + if let Some(head) = head(&mut self.head, &self.err) { + match HttpTryFrom::try_from(status) { + Ok(s) => head.status = s, + Err(e) => self.err = Some(e.into()), + } + } + self + } + + /// Set the HTTP version for this response. + /// + /// This function will configure the HTTP version of the `Response` that + /// will be returned from `Builder::build`. + /// + /// By default this is HTTP/1.1 + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .version(Version::HTTP_2) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn version(&mut self, version: Version) -> &mut Builder { + if let Some(head) = head(&mut self.head, &self.err) { + head.version = version; + } + self + } + + /// Appends a header to this response builder. + /// + /// This function will append the provided key/value as a header to the + /// internal `HeaderMap` being constructed. Essentially this is equivalent + /// to calling `HeaderMap::append`. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// # use http::header::HeaderValue; + /// + /// let response = Response::builder() + /// .header("Content-Type", "text/html") + /// .header("X-Custom-Foo", "bar") + /// .header("content-length", 0) + /// .body(()) + /// .unwrap(); + /// ``` + pub fn header(&mut self, key: K, value: V) -> &mut Builder + where HeaderName: HttpTryFrom, + HeaderValue: HttpTryFrom + { + if let Some(head) = head(&mut self.head, &self.err) { + match >::try_from(key) { + Ok(key) => { + match >::try_from(value) { + Ok(value) => { head.headers.append(key, value); } + Err(e) => self.err = Some(e.into()), + } + }, + Err(e) => self.err = Some(e.into()), + }; + } + self + } + + /// Adds an extension to this builder + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .extension("My Extension") + /// .body(()) + /// .unwrap(); + /// + /// assert_eq!(response.extensions().get::<&'static str>(), + /// Some(&"My Extension")); + /// ``` + pub fn extension(&mut self, extension: T) -> &mut Builder + where T: Any + Send + Sync + 'static, + { + if let Some(head) = head(&mut self.head, &self.err) { + head.extensions.insert(extension); + } + self + } + + fn take_parts(&mut self) -> Result { + let ret = self.head.take().expect("cannot reuse response builder"); + if let Some(e) = self.err.take() { + return Err(e) + } + Ok(ret) + } + + /// "Consumes" this builder, using the provided `body` to return a + /// constructed `Response`. + /// + /// # Errors + /// + /// This function may return an error if any previously configured argument + /// failed to parse or get converted to the internal representation. For + /// example if an invalid `head` was specified via `header("Foo", + /// "Bar\r\n")` the error will be returned when this function is called + /// rather than when `header` was called. + /// + /// # Panics + /// + /// This method will panic if the builder is reused. The `body` function can + /// only be called once. + /// + /// # Examples + /// + /// ``` + /// # use http::*; + /// + /// let response = Response::builder() + /// .body(()) + /// .unwrap(); + /// ``` + pub fn body(&mut self, body: T) -> Result> { + Ok(Response { + head: self.take_parts()?, + body: body, + }) + } +} + +fn head<'a>(head: &'a mut Option, err: &Option) + -> Option<&'a mut Parts> +{ + if err.is_some() { + return None + } + head.as_mut() +} + +impl Default for Builder { + #[inline] + fn default() -> Builder { + Builder { + head: Some(Parts::new()), + err: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_can_map_a_body_from_one_type_to_another() { + let response = Response::builder().body("some string").unwrap(); + let mapped_response = response.map(|s| { + assert_eq!(s, "some string"); + 123u32 + }); + assert_eq!(mapped_response.body(), &123u32); + } +} diff --git a/third_party/rust/http/src/status.rs b/third_party/rust/http/src/status.rs new file mode 100644 index 000000000000..45f93225fc1b --- /dev/null +++ b/third_party/rust/http/src/status.rs @@ -0,0 +1,556 @@ +//! HTTP status codes +//! +//! This module contains HTTP-status code related structs an errors. The main +//! type in this module is `StatusCode` which is not intended to be used through +//! this module but rather the `http::StatusCode` type. +//! +//! # Examples +//! +//! ``` +//! use http::StatusCode; +//! +//! assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); +//! assert_eq!(StatusCode::NOT_FOUND, 404); +//! assert!(StatusCode::OK.is_success()); +//! ``` + +use std::fmt; +use std::error::Error; +use std::str::FromStr; + +use HttpTryFrom; + +/// An HTTP status code (`status-code` in RFC 7230 et al.). +/// +/// This type contains constants for all common status codes. +/// It allows status codes in the range [100, 599]. +/// +/// IANA maintain the [Hypertext Transfer Protocol (HTTP) Status Code +/// Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) which is +/// the source for this enum (with one exception, 418 I'm a teapot, which is +/// inexplicably not in the register). +/// +/// # Examples +/// +/// ``` +/// use http::StatusCode; +/// +/// assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); +/// assert_eq!(StatusCode::NOT_FOUND.as_u16(), 404); +/// assert!(StatusCode::OK.is_success()); +/// ``` +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct StatusCode(u16); + +/// A possible error value when converting a `StatusCode` from a `u16` or `&str` +/// +/// This error indicates that the supplied input was not a valid number, was less +/// than 100, or was greater than 599. +#[derive(Debug)] +pub struct InvalidStatusCode { + _priv: (), +} + +impl StatusCode { + /// Converts a u16 to a status code. + /// + /// The function validates the correctness of the supplied u16. It must be + /// greater or equal to 100 but less than 600. + /// + /// # Example + /// + /// ``` + /// use http::StatusCode; + /// + /// let ok = StatusCode::from_u16(200).unwrap(); + /// assert_eq!(ok, StatusCode::OK); + /// + /// let err = StatusCode::from_u16(99); + /// assert!(err.is_err()); + /// ``` + #[inline] + pub fn from_u16(src: u16) -> Result { + if src < 100 || src >= 600 { + return Err(InvalidStatusCode::new()); + } + + Ok(StatusCode(src)) + } + + /// Converts a &[u8] to a status code + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() != 3 { + return Err(InvalidStatusCode::new()); + } + + let a = src[0].wrapping_sub(b'0') as u16; + let b = src[1].wrapping_sub(b'0') as u16; + let c = src[2].wrapping_sub(b'0') as u16; + + if a == 0 || a > 5 || b > 9 || c > 9 { + return Err(InvalidStatusCode::new()); + } + + let status = (a * 100) + (b * 10) + c; + Ok(StatusCode(status)) + } + + /// Returns the `u16` corresponding to this `StatusCode`. + /// + /// # Note + /// + /// This is the same as the `From` implementation, but + /// included as an inherent method because that implementation doesn't + /// appear in rustdocs, as well as a way to force the type instead of + /// relying on inference. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.as_u16(), 200); + /// ``` + #[inline] + pub fn as_u16(&self) -> u16 { + (*self).into() + } + + /// Returns a &str representation of the `StatusCode` + /// + /// The return value only includes a numerical representation of the + /// status code. The canonical reason is not included. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.as_str(), "200"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + CODES_AS_STR[(self.0 - 100) as usize] + } + + /// Get the standardised `reason-phrase` for this status code. + /// + /// This is mostly here for servers writing responses, but could potentially have application + /// at other times. + /// + /// The reason phrase is defined as being exclusively for human readers. You should avoid + /// deriving any meaning from it at all costs. + /// + /// Bear in mind also that in HTTP/2.0 the reason phrase is abolished from transmission, and so + /// this canonical reason phrase really is the only reason phrase you’ll find. + /// + /// # Example + /// + /// ``` + /// let status = http::StatusCode::OK; + /// assert_eq!(status.canonical_reason(), Some("OK")); + /// ``` + pub fn canonical_reason(&self) -> Option<&'static str> { + canonical_reason(self.0) + } + + + /// Check if status is within 100-199. + #[inline] + pub fn is_informational(&self) -> bool { + 200 > self.0 && self.0 >= 100 + } + + /// Check if status is within 200-299. + #[inline] + pub fn is_success(&self) -> bool { + 300 > self.0 && self.0 >= 200 + } + + /// Check if status is within 300-399. + #[inline] + pub fn is_redirection(&self) -> bool { + 400 > self.0 && self.0 >= 300 + } + + /// Check if status is within 400-499. + #[inline] + pub fn is_client_error(&self) -> bool { + 500 > self.0 && self.0 >= 400 + } + + /// Check if status is within 500-599. + #[inline] + pub fn is_server_error(&self) -> bool { + 600 > self.0 && self.0 >= 500 + } +} + +impl fmt::Debug for StatusCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } +} + +/// Formats the status code, *including* the canonical reason. +/// +/// # Example +/// +/// ``` +/// # use http::StatusCode; +/// assert_eq!(format!("{}", StatusCode::OK), "200 OK"); +/// ``` +impl fmt::Display for StatusCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", u16::from(*self), + self.canonical_reason().unwrap_or("")) + } +} + +impl Default for StatusCode { + #[inline] + fn default() -> StatusCode { + StatusCode::OK + } +} + +impl PartialEq for StatusCode { + #[inline] + fn eq(&self, other: &u16) -> bool { + self.as_u16() == *other + } +} + +impl PartialEq for u16 { + #[inline] + fn eq(&self, other: &StatusCode) -> bool { + *self == other.as_u16() + } +} + +impl From for u16 { + #[inline] + fn from(status: StatusCode) -> u16 { + status.0 + } +} + +impl FromStr for StatusCode { + type Err = InvalidStatusCode; + + fn from_str(s: &str) -> Result { + StatusCode::from_bytes(s.as_ref()) + } +} + +impl<'a> HttpTryFrom<&'a StatusCode> for StatusCode { + type Error = ::error::Never; + + #[inline] + fn try_from(t: &'a StatusCode) -> Result { + Ok(t.clone()) + } +} + +impl<'a> HttpTryFrom<&'a [u8]> for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: &'a [u8]) -> Result { + StatusCode::from_bytes(t) + } +} + +impl<'a> HttpTryFrom<&'a str> for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl HttpTryFrom for StatusCode { + type Error = InvalidStatusCode; + + #[inline] + fn try_from(t: u16) -> Result { + StatusCode::from_u16(t) + } +} + +impl InvalidStatusCode { + fn new() -> InvalidStatusCode { + InvalidStatusCode { + _priv: (), + } + } +} + +macro_rules! status_codes { + ( + $( + $(#[$docs:meta])* + ($num:expr, $konst:ident, $phrase:expr); + )+ + ) => { + impl StatusCode { + $( + $(#[$docs])* + pub const $konst: StatusCode = StatusCode($num); + )+ + + } + + fn canonical_reason(num: u16) -> Option<&'static str> { + match num { + $( + $num => Some($phrase), + )+ + _ => None + } + } + } +} + +status_codes! { + /// 100 Continue + /// [[RFC7231, Section 6.2.1](https://tools.ietf.org/html/rfc7231#section-6.2.1)] + (100, CONTINUE, "Continue"); + /// 101 Switching Protocols + /// [[RFC7231, Section 6.2.2](https://tools.ietf.org/html/rfc7231#section-6.2.2)] + (101, SWITCHING_PROTOCOLS, "Switching Protocols"); + /// 102 Processing + /// [[RFC2518](https://tools.ietf.org/html/rfc2518)] + (102, PROCESSING, "Processing"); + + /// 200 OK + /// [[RFC7231, Section 6.3.1](https://tools.ietf.org/html/rfc7231#section-6.3.1)] + (200, OK, "OK"); + /// 201 Created + /// [[RFC7231, Section 6.3.2](https://tools.ietf.org/html/rfc7231#section-6.3.2)] + (201, CREATED, "Created"); + /// 202 Accepted + /// [[RFC7231, Section 6.3.3](https://tools.ietf.org/html/rfc7231#section-6.3.3)] + (202, ACCEPTED, "Accepted"); + /// 203 Non-Authoritative Information + /// [[RFC7231, Section 6.3.4](https://tools.ietf.org/html/rfc7231#section-6.3.4)] + (203, NON_AUTHORITATIVE_INFORMATION, "Non Authoritative Information"); + /// 204 No Content + /// [[RFC7231, Section 6.3.5](https://tools.ietf.org/html/rfc7231#section-6.3.5)] + (204, NO_CONTENT, "No Content"); + /// 205 Reset Content + /// [[RFC7231, Section 6.3.6](https://tools.ietf.org/html/rfc7231#section-6.3.6)] + (205, RESET_CONTENT, "Reset Content"); + /// 206 Partial Content + /// [[RFC7233, Section 4.1](https://tools.ietf.org/html/rfc7233#section-4.1)] + (206, PARTIAL_CONTENT, "Partial Content"); + /// 207 Multi-Status + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (207, MULTI_STATUS, "Multi-Status"); + /// 208 Already Reported + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + (208, ALREADY_REPORTED, "Already Reported"); + + /// 226 IM Used + /// [[RFC3229](https://tools.ietf.org/html/rfc3229)] + (226, IM_USED, "IM Used"); + + /// 300 Multiple Choices + /// [[RFC7231, Section 6.4.1](https://tools.ietf.org/html/rfc7231#section-6.4.1)] + (300, MULTIPLE_CHOICES, "Multiple Choices"); + /// 301 Moved Permanently + /// [[RFC7231, Section 6.4.2](https://tools.ietf.org/html/rfc7231#section-6.4.2)] + (301, MOVED_PERMANENTLY, "Moved Permanently"); + /// 302 Found + /// [[RFC7231, Section 6.4.3](https://tools.ietf.org/html/rfc7231#section-6.4.3)] + (302, FOUND, "Found"); + /// 303 See Other + /// [[RFC7231, Section 6.4.4](https://tools.ietf.org/html/rfc7231#section-6.4.4)] + (303, SEE_OTHER, "See Other"); + /// 304 Not Modified + /// [[RFC7232, Section 4.1](https://tools.ietf.org/html/rfc7232#section-4.1)] + (304, NOT_MODIFIED, "Not Modified"); + /// 305 Use Proxy + /// [[RFC7231, Section 6.4.5](https://tools.ietf.org/html/rfc7231#section-6.4.5)] + (305, USE_PROXY, "Use Proxy"); + /// 307 Temporary Redirect + /// [[RFC7231, Section 6.4.7](https://tools.ietf.org/html/rfc7231#section-6.4.7)] + (307, TEMPORARY_REDIRECT, "Temporary Redirect"); + /// 308 Permanent Redirect + /// [[RFC7238](https://tools.ietf.org/html/rfc7238)] + (308, PERMANENT_REDIRECT, "Permanent Redirect"); + + /// 400 Bad Request + /// [[RFC7231, Section 6.5.1](https://tools.ietf.org/html/rfc7231#section-6.5.1)] + (400, BAD_REQUEST, "Bad Request"); + /// 401 Unauthorized + /// [[RFC7235, Section 3.1](https://tools.ietf.org/html/rfc7235#section-3.1)] + (401, UNAUTHORIZED, "Unauthorized"); + /// 402 Payment Required + /// [[RFC7231, Section 6.5.2](https://tools.ietf.org/html/rfc7231#section-6.5.2)] + (402, PAYMENT_REQUIRED, "Payment Required"); + /// 403 Forbidden + /// [[RFC7231, Section 6.5.3](https://tools.ietf.org/html/rfc7231#section-6.5.3)] + (403, FORBIDDEN, "Forbidden"); + /// 404 Not Found + /// [[RFC7231, Section 6.5.4](https://tools.ietf.org/html/rfc7231#section-6.5.4)] + (404, NOT_FOUND, "Not Found"); + /// 405 Method Not Allowed + /// [[RFC7231, Section 6.5.5](https://tools.ietf.org/html/rfc7231#section-6.5.5)] + (405, METHOD_NOT_ALLOWED, "Method Not Allowed"); + /// 406 Not Acceptable + /// [[RFC7231, Section 6.5.6](https://tools.ietf.org/html/rfc7231#section-6.5.6)] + (406, NOT_ACCEPTABLE, "Not Acceptable"); + /// 407 Proxy Authentication Required + /// [[RFC7235, Section 3.2](https://tools.ietf.org/html/rfc7235#section-3.2)] + (407, PROXY_AUTHENTICATION_REQUIRED, "Proxy Authentication Required"); + /// 408 Request Timeout + /// [[RFC7231, Section 6.5.7](https://tools.ietf.org/html/rfc7231#section-6.5.7)] + (408, REQUEST_TIMEOUT, "Request Timeout"); + /// 409 Conflict + /// [[RFC7231, Section 6.5.8](https://tools.ietf.org/html/rfc7231#section-6.5.8)] + (409, CONFLICT, "Conflict"); + /// 410 Gone + /// [[RFC7231, Section 6.5.9](https://tools.ietf.org/html/rfc7231#section-6.5.9)] + (410, GONE, "Gone"); + /// 411 Length Required + /// [[RFC7231, Section 6.5.10](https://tools.ietf.org/html/rfc7231#section-6.5.10)] + (411, LENGTH_REQUIRED, "Length Required"); + /// 412 Precondition Failed + /// [[RFC7232, Section 4.2](https://tools.ietf.org/html/rfc7232#section-4.2)] + (412, PRECONDITION_FAILED, "Precondition Failed"); + /// 413 Payload Too Large + /// [[RFC7231, Section 6.5.11](https://tools.ietf.org/html/rfc7231#section-6.5.11)] + (413, PAYLOAD_TOO_LARGE, "Payload Too Large"); + /// 414 URI Too Long + /// [[RFC7231, Section 6.5.12](https://tools.ietf.org/html/rfc7231#section-6.5.12)] + (414, URI_TOO_LONG, "URI Too Long"); + /// 415 Unsupported Media Type + /// [[RFC7231, Section 6.5.13](https://tools.ietf.org/html/rfc7231#section-6.5.13)] + (415, UNSUPPORTED_MEDIA_TYPE, "Unsupported Media Type"); + /// 416 Range Not Satisfiable + /// [[RFC7233, Section 4.4](https://tools.ietf.org/html/rfc7233#section-4.4)] + (416, RANGE_NOT_SATISFIABLE, "Range Not Satisfiable"); + /// 417 Expectation Failed + /// [[RFC7231, Section 6.5.14](https://tools.ietf.org/html/rfc7231#section-6.5.14)] + (417, EXPECTATION_FAILED, "Expectation Failed"); + /// 418 I'm a teapot + /// [curiously not registered by IANA but [RFC2324](https://tools.ietf.org/html/rfc2324)] + (418, IM_A_TEAPOT, "I'm a teapot"); + + /// 421 Misdirected Request + /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) + (421, MISDIRECTED_REQUEST, "Misdirected Request"); + /// 422 Unprocessable Entity + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (422, UNPROCESSABLE_ENTITY, "Unprocessable Entity"); + /// 423 Locked + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (423, LOCKED, "Locked"); + /// 424 Failed Dependency + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (424, FAILED_DEPENDENCY, "Failed Dependency"); + + /// 426 Upgrade Required + /// [[RFC7231, Section 6.5.15](https://tools.ietf.org/html/rfc7231#section-6.5.15)] + (426, UPGRADE_REQUIRED, "Upgrade Required"); + + /// 428 Precondition Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (428, PRECONDITION_REQUIRED, "Precondition Required"); + /// 429 Too Many Requests + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (429, TOO_MANY_REQUESTS, "Too Many Requests"); + + /// 431 Request Header Fields Too Large + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (431, REQUEST_HEADER_FIELDS_TOO_LARGE, "Request Header Fields Too Large"); + + /// 451 Unavailable For Legal Reasons + /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] + (451, UNAVAILABLE_FOR_LEGAL_REASONS, "Unavailable For Legal Reasons"); + + /// 500 Internal Server Error + /// [[RFC7231, Section 6.6.1](https://tools.ietf.org/html/rfc7231#section-6.6.1)] + (500, INTERNAL_SERVER_ERROR, "Internal Server Error"); + /// 501 Not Implemented + /// [[RFC7231, Section 6.6.2](https://tools.ietf.org/html/rfc7231#section-6.6.2)] + (501, NOT_IMPLEMENTED, "Not Implemented"); + /// 502 Bad Gateway + /// [[RFC7231, Section 6.6.3](https://tools.ietf.org/html/rfc7231#section-6.6.3)] + (502, BAD_GATEWAY, "Bad Gateway"); + /// 503 Service Unavailable + /// [[RFC7231, Section 6.6.4](https://tools.ietf.org/html/rfc7231#section-6.6.4)] + (503, SERVICE_UNAVAILABLE, "Service Unavailable"); + /// 504 Gateway Timeout + /// [[RFC7231, Section 6.6.5](https://tools.ietf.org/html/rfc7231#section-6.6.5)] + (504, GATEWAY_TIMEOUT, "Gateway Timeout"); + /// 505 HTTP Version Not Supported + /// [[RFC7231, Section 6.6.6](https://tools.ietf.org/html/rfc7231#section-6.6.6)] + (505, HTTP_VERSION_NOT_SUPPORTED, "HTTP Version Not Supported"); + /// 506 Variant Also Negotiates + /// [[RFC2295](https://tools.ietf.org/html/rfc2295)] + (506, VARIANT_ALSO_NEGOTIATES, "Variant Also Negotiates"); + /// 507 Insufficient Storage + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + (507, INSUFFICIENT_STORAGE, "Insufficient Storage"); + /// 508 Loop Detected + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + (508, LOOP_DETECTED, "Loop Detected"); + + /// 510 Not Extended + /// [[RFC2774](https://tools.ietf.org/html/rfc2774)] + (510, NOT_EXTENDED, "Not Extended"); + /// 511 Network Authentication Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + (511, NETWORK_AUTHENTICATION_REQUIRED, "Network Authentication Required"); +} + +impl fmt::Display for InvalidStatusCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.description()) + } +} + +impl Error for InvalidStatusCode { + fn description(&self) -> &str { + "invalid status code" + } +} + +macro_rules! status_code_strs { + ($($num:expr,)+) => { + const CODES_AS_STR: [&'static str; 500] = [ $( stringify!($num), )+ ]; + } +} + +status_code_strs!( + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, + + 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, + 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, + 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, + + 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, + 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, + 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, + 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, + ); diff --git a/third_party/rust/http/src/uri/authority.rs b/third_party/rust/http/src/uri/authority.rs new file mode 100644 index 000000000000..6232947dcf83 --- /dev/null +++ b/third_party/rust/http/src/uri/authority.rs @@ -0,0 +1,522 @@ +// Deprecated in 1.26, needed until our minimum version is >=1.23. +#[allow(unused, deprecated)] +use std::ascii::AsciiExt; +use std::{cmp, fmt, str}; +use std::hash::{Hash, Hasher}; +use std::str::FromStr; + +use bytes::Bytes; + +use byte_str::ByteStr; +use super::{ErrorKind, InvalidUri, InvalidUriBytes, URI_CHARS}; + +/// Represents the authority component of a URI. +#[derive(Clone)] +pub struct Authority { + pub(super) data: ByteStr, +} + +impl Authority { + pub(super) fn empty() -> Self { + Authority { data: ByteStr::new() } + } + + /// Attempt to convert an `Authority` from `Bytes`. + /// + /// This function will be replaced by a `TryFrom` implementation once the + /// trait lands in stable. + /// + /// # Examples + /// + /// ``` + /// # extern crate http; + /// # use http::uri::*; + /// extern crate bytes; + /// + /// use bytes::Bytes; + /// + /// # pub fn main() { + /// let bytes = Bytes::from("example.com"); + /// let authority = Authority::from_shared(bytes).unwrap(); + /// + /// assert_eq!(authority.host(), "example.com"); + /// # } + /// ``` + pub fn from_shared(s: Bytes) -> Result { + let authority_end = Authority::parse_non_empty(&s[..]).map_err(InvalidUriBytes)?; + + if authority_end != s.len() { + return Err(ErrorKind::InvalidUriChar.into()); + } + + Ok(Authority { + data: unsafe { ByteStr::from_utf8_unchecked(s) }, + }) + } + + /// Attempt to convert an `Authority` from a static string. + /// + /// This function will not perform any copying, and the string will be + /// checked if it is empty or contains an invalid character. + /// + /// # Panics + /// + /// This function panics if the argument contains invalid characters or + /// is empty. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::Authority; + /// let authority = Authority::from_static("example.com"); + /// assert_eq!(authority.host(), "example.com"); + /// ``` + pub fn from_static(src: &'static str) -> Self { + let s = src.as_bytes(); + let b = Bytes::from_static(s); + let authority_end = Authority::parse_non_empty(&b[..]).expect("static str is not valid authority"); + + if authority_end != b.len() { + panic!("static str is not valid authority"); + } + + Authority { + data: unsafe { ByteStr::from_utf8_unchecked(b) }, + } + } + + // Note: this may return an *empty* Authority. You might want `parse_non_empty`. + pub(super) fn parse(s: &[u8]) -> Result { + let mut colon_cnt = 0; + let mut start_bracket = false; + let mut end_bracket = false; + let mut end = s.len(); + + for (i, &b) in s.iter().enumerate() { + match URI_CHARS[b as usize] { + b'/' | b'?' | b'#' => { + end = i; + break; + } + b':' => { + colon_cnt += 1; + }, + b'[' => { + start_bracket = true; + } + b']' => { + end_bracket = true; + + // Those were part of an IPv6 hostname, so forget them... + colon_cnt = 0; + } + b'@' => { + // Those weren't a port colon, but part of the + // userinfo, so it needs to be forgotten. + colon_cnt = 0; + } + 0 => { + return Err(ErrorKind::InvalidUriChar.into()); + } + _ => {} + } + } + + if start_bracket ^ end_bracket { + return Err(ErrorKind::InvalidAuthority.into()); + } + + if colon_cnt > 1 { + // Things like 'localhost:8080:3030' are rejected. + return Err(ErrorKind::InvalidAuthority.into()); + } + + Ok(end) + } + + // Parse bytes as an Authority, not allowing an empty string. + // + // This should be used by functions that allow a user to parse + // an `Authority` by itself. + fn parse_non_empty(s: &[u8]) -> Result { + if s.is_empty() { + return Err(ErrorKind::Empty.into()); + } + Authority::parse(s) + } + + /// Get the host of this `Authority`. + /// + /// The host subcomponent of authority is identified by an IP literal + /// encapsulated within square brackets, an IPv4 address in dotted- decimal + /// form, or a registered name. The host subcomponent is **case-insensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |---------| + /// | + /// host + /// ``` + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// assert_eq!(authority.host(), "example.org"); + /// ``` + #[inline] + pub fn host(&self) -> &str { + host(self.as_str()) + } + + /// Get the port of this `Authority`. + /// + /// The port subcomponent of authority is designated by an optional port + /// number in decimal following the host and delimited from it by a single + /// colon (":") character. A value is only returned if one is specified in + /// the URI string, i.e., default port values are **not** returned. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// port + /// ``` + /// + /// # Examples + /// + /// Authority with port + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org:80".parse().unwrap(); + /// + /// assert_eq!(authority.port(), Some(80)); + /// ``` + /// + /// Authority without port + /// + /// ``` + /// # use http::uri::Authority; + /// let authority: Authority = "example.org".parse().unwrap(); + /// + /// assert!(authority.port().is_none()); + /// ``` + pub fn port(&self) -> Option { + let s = self.as_str(); + s.rfind(":").and_then(|i| { + u16::from_str(&s[i+1..]).ok() + }) + } + + /// Return a str representation of the authority + #[inline] + pub fn as_str(&self) -> &str { + &self.data[..] + } + + /// Converts this `Authority` back to a sequence of bytes + #[inline] + pub fn into_bytes(self) -> Bytes { + self.into() + } +} + +impl AsRef for Authority { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq for Authority { + fn eq(&self, other: &Authority) -> bool { + self.data.eq_ignore_ascii_case(&other.data) + } +} + +impl Eq for Authority {} + +/// Case-insensitive equality +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// let authority: Authority = "HELLO.com".parse().unwrap(); +/// assert_eq!(authority, "hello.coM"); +/// assert_eq!("hello.com", authority); +/// ``` +impl PartialEq for Authority { + fn eq(&self, other: &str) -> bool { + self.data.eq_ignore_ascii_case(other) + } +} + +impl PartialEq for str { + fn eq(&self, other: &Authority) -> bool { + self.eq_ignore_ascii_case(other.as_str()) + } +} + +impl<'a> PartialEq for &'a str { + fn eq(&self, other: &Authority) -> bool { + self.eq_ignore_ascii_case(other.as_str()) + } +} + +impl<'a> PartialEq<&'a str> for Authority { + fn eq(&self, other: &&'a str) -> bool { + self.data.eq_ignore_ascii_case(other) + } +} + +impl PartialEq for Authority { + fn eq(&self, other: &String) -> bool { + self.data.eq_ignore_ascii_case(other.as_str()) + } +} + +impl PartialEq for String { + fn eq(&self, other: &Authority) -> bool { + self.as_str().eq_ignore_ascii_case(other.as_str()) + } +} + +/// Case-insensitive ordering +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// let authority: Authority = "DEF.com".parse().unwrap(); +/// assert!(authority < "ghi.com"); +/// assert!(authority > "abc.com"); +/// ``` +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &str) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for str { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl<'a> PartialOrd for &'a str { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl<'a> PartialOrd<&'a str> for Authority { + fn partial_cmp(&self, other: &&'a str) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for Authority { + fn partial_cmp(&self, other: &String) -> Option { + let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +impl PartialOrd for String { + fn partial_cmp(&self, other: &Authority) -> Option { + let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); + left.partial_cmp(right) + } +} + +/// Case-insensitive hashing +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Authority; +/// # use std::hash::{Hash, Hasher}; +/// # use std::collections::hash_map::DefaultHasher; +/// +/// let a: Authority = "HELLO.com".parse().unwrap(); +/// let b: Authority = "hello.coM".parse().unwrap(); +/// +/// let mut s = DefaultHasher::new(); +/// a.hash(&mut s); +/// let a = s.finish(); +/// +/// let mut s = DefaultHasher::new(); +/// b.hash(&mut s); +/// let b = s.finish(); +/// +/// assert_eq!(a, b); +/// ``` +impl Hash for Authority { + fn hash(&self, state: &mut H) where H: Hasher { + self.data.len().hash(state); + for &b in self.data.as_bytes() { + state.write_u8(b.to_ascii_lowercase()); + } + } +} + +impl FromStr for Authority { + type Err = InvalidUri; + + fn from_str(s: &str) -> Result { + let end = Authority::parse_non_empty(s.as_bytes())?; + + if end != s.len() { + return Err(ErrorKind::InvalidAuthority.into()); + } + + Ok(Authority { data: s.into() }) + } +} + +impl From for Bytes { + #[inline] + fn from(src: Authority) -> Bytes { + src.data.into() + } +} + +impl fmt::Debug for Authority { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl fmt::Display for Authority { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +fn host(auth: &str) -> &str { + let host_port = auth.rsplitn(2, '@') + .next() + .expect("split always has at least 1 item"); + if host_port.as_bytes()[0] == b'[' { + let i = host_port.find(']') + .expect("parsing should validate brackets"); + &host_port[1..i] + } else { + host_port.split(':') + .next() + .expect("split always has at least 1 item") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_empty_string_is_error() { + let err = Authority::parse_non_empty(b"").unwrap_err(); + assert_eq!(err.0, ErrorKind::Empty); + } + + #[test] + fn equal_to_self_of_same_authority() { + let authority1: Authority = "example.com".parse().unwrap(); + let authority2: Authority = "EXAMPLE.COM".parse().unwrap(); + assert_eq!(authority1, authority2); + assert_eq!(authority2, authority1); + } + + #[test] + fn not_equal_to_self_of_different_authority() { + let authority1: Authority = "example.com".parse().unwrap(); + let authority2: Authority = "test.com".parse().unwrap(); + assert_ne!(authority1, authority2); + assert_ne!(authority2, authority1); + } + + #[test] + fn equates_with_a_str() { + let authority: Authority = "example.com".parse().unwrap(); + assert_eq!(&authority, "EXAMPLE.com"); + assert_eq!("EXAMPLE.com", &authority); + assert_eq!(authority, "EXAMPLE.com"); + assert_eq!("EXAMPLE.com", authority); + } + + #[test] + fn not_equal_with_a_str_of_a_different_authority() { + let authority: Authority = "example.com".parse().unwrap(); + assert_ne!(&authority, "test.com"); + assert_ne!("test.com", &authority); + assert_ne!(authority, "test.com"); + assert_ne!("test.com", authority); + } + + #[test] + fn equates_with_a_string() { + let authority: Authority = "example.com".parse().unwrap(); + assert_eq!(authority, "EXAMPLE.com".to_string()); + assert_eq!("EXAMPLE.com".to_string(), authority); + } + + #[test] + fn equates_with_a_string_of_a_different_authority() { + let authority: Authority = "example.com".parse().unwrap(); + assert_ne!(authority, "test.com".to_string()); + assert_ne!("test.com".to_string(), authority); + } + + #[test] + fn compares_to_self() { + let authority1: Authority = "abc.com".parse().unwrap(); + let authority2: Authority = "def.com".parse().unwrap(); + assert!(authority1 < authority2); + assert!(authority2 > authority1); + } + + #[test] + fn compares_with_a_str() { + let authority: Authority = "def.com".parse().unwrap(); + // with ref + assert!(&authority < "ghi.com"); + assert!("ghi.com" > &authority); + assert!(&authority > "abc.com"); + assert!("abc.com" < &authority); + + // no ref + assert!(authority < "ghi.com"); + assert!("ghi.com" > authority); + assert!(authority > "abc.com"); + assert!("abc.com" < authority); + } + + #[test] + fn compares_with_a_string() { + let authority: Authority = "def.com".parse().unwrap(); + assert!(authority < "ghi.com".to_string()); + assert!("ghi.com".to_string() > authority); + assert!(authority > "abc.com".to_string()); + assert!("abc.com".to_string() < authority); + } +} diff --git a/third_party/rust/http/src/uri/mod.rs b/third_party/rust/http/src/uri/mod.rs new file mode 100644 index 000000000000..5233aece65cb --- /dev/null +++ b/third_party/rust/http/src/uri/mod.rs @@ -0,0 +1,1043 @@ +//! URI component of request and response lines +//! +//! This module primarily contains the `Uri` type which is a component of all +//! HTTP requests and also reexports this type at the root of the crate. A URI +//! is not always a "full URL" in the sense of something you'd type into a web +//! browser, but HTTP requests may only have paths on servers but may have full +//! schemes and hostnames on clients. +//! +//! # Examples +//! +//! ``` +//! use http::Uri; +//! +//! let uri = "/foo/bar?baz".parse::().unwrap(); +//! assert_eq!(uri.path(), "/foo/bar"); +//! assert_eq!(uri.query(), Some("baz")); +//! assert_eq!(uri.host(), None); +//! +//! let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); +//! assert_eq!(uri.scheme_part().map(|s| s.as_str()), Some("https")); +//! assert_eq!(uri.host(), Some("www.rust-lang.org")); +//! assert_eq!(uri.path(), "/install.html"); +//! ``` + +use HttpTryFrom; +use byte_str::ByteStr; + +use bytes::Bytes; + +use std::{fmt, u8, u16}; +// Deprecated in 1.26, needed until our minimum version is >=1.23. +#[allow(unused, deprecated)] +use std::ascii::AsciiExt; +use std::hash::{Hash, Hasher}; +use std::str::{self, FromStr}; +use std::error::Error; + +use self::scheme::Scheme2; + +pub use self::authority::Authority; +pub use self::path::PathAndQuery; +pub use self::scheme::Scheme; + +mod authority; +mod path; +mod scheme; +#[cfg(test)] +mod tests; + +/// The URI component of a request. +/// +/// For HTTP 1, this is included as part of the request line. From Section 5.3, +/// Request Target: +/// +/// > Once an inbound connection is obtained, the client sends an HTTP +/// > request message (Section 3) with a request-target derived from the +/// > target URI. There are four distinct formats for the request-target, +/// > depending on both the method being requested and whether the request +/// > is to a proxy. +/// > +/// > ```notrust +/// > request-target = origin-form +/// > / absolute-form +/// > / authority-form +/// > / asterisk-form +/// > ``` +/// +/// The URI is structured as follows: +/// +/// ```notrust +/// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 +/// |-| |-------------------------------||--------| |-------------------| |-----| +/// | | | | | +/// scheme authority path query fragment +/// ``` +/// +/// For HTTP 2.0, the URI is encoded using pseudoheaders. +/// +/// # Examples +/// +/// ``` +/// use http::Uri; +/// +/// let uri = "/foo/bar?baz".parse::().unwrap(); +/// assert_eq!(uri.path(), "/foo/bar"); +/// assert_eq!(uri.query(), Some("baz")); +/// assert_eq!(uri.host(), None); +/// +/// let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); +/// assert_eq!(uri.scheme_part().map(|s| s.as_str()), Some("https")); +/// assert_eq!(uri.host(), Some("www.rust-lang.org")); +/// assert_eq!(uri.path(), "/install.html"); +/// ``` +#[derive(Clone)] +pub struct Uri { + scheme: Scheme, + authority: Authority, + path_and_query: PathAndQuery, +} + +/// The various parts of a URI. +/// +/// This struct is used to provide to and retrieve from a URI. +#[derive(Debug, Default)] +pub struct Parts { + /// The scheme component of a URI + pub scheme: Option, + + /// The authority component of a URI + pub authority: Option, + + /// The origin-form component of a URI + pub path_and_query: Option, + + /// Allow extending in the future + _priv: (), +} + +/// An error resulting from a failed attempt to construct a URI. +#[derive(Debug)] +pub struct InvalidUri(ErrorKind); + +/// An error resulting from a failed attempt to construct a URI. +#[derive(Debug)] +pub struct InvalidUriBytes(InvalidUri); + +/// An error resulting from a failed attempt to construct a URI. +#[derive(Debug)] +pub struct InvalidUriParts(InvalidUri); + +#[derive(Debug, Eq, PartialEq)] +enum ErrorKind { + InvalidUriChar, + InvalidScheme, + InvalidAuthority, + InvalidFormat, + SchemeMissing, + AuthorityMissing, + PathAndQueryMissing, + TooLong, + Empty, + SchemeTooLong, +} + +// u16::MAX is reserved for None +const MAX_LEN: usize = (u16::MAX - 1) as usize; + +const URI_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, b'!', 0, b'#', b'$', 0, b'&', b'\'', // 3x + b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', // 5x + 0, b'=', 0, b'?', b'@', b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', b'[', 0, b']', 0, b'_', 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +impl Uri { + /// Attempt to convert a `Uri` from `Parts` + pub fn from_parts(src: Parts) -> Result { + if src.scheme.is_some() { + if src.authority.is_none() { + return Err(ErrorKind::AuthorityMissing.into()); + } + + if src.path_and_query.is_none() { + return Err(ErrorKind::PathAndQueryMissing.into()); + } + } else { + if src.authority.is_some() && src.path_and_query.is_some() { + return Err(ErrorKind::SchemeMissing.into()); + } + } + + let scheme = match src.scheme { + Some(scheme) => scheme, + None => Scheme { inner: Scheme2::None }, + }; + + let authority = match src.authority { + Some(authority) => authority, + None => Authority::empty(), + }; + + let path_and_query = match src.path_and_query { + Some(path_and_query) => path_and_query, + None => PathAndQuery::empty(), + }; + + Ok(Uri { + scheme: scheme, + authority: authority, + path_and_query: path_and_query, + }) + } + + /// Attempt to convert a `Uri` from `Bytes` + /// + /// This function will be replaced by a `TryFrom` implementation once the + /// trait lands in stable. + /// + /// # Examples + /// + /// ``` + /// # extern crate http; + /// # use http::uri::*; + /// extern crate bytes; + /// + /// use bytes::Bytes; + /// + /// # pub fn main() { + /// let bytes = Bytes::from("http://example.com/foo"); + /// let uri = Uri::from_shared(bytes).unwrap(); + /// + /// assert_eq!(uri.host().unwrap(), "example.com"); + /// assert_eq!(uri.path(), "/foo"); + /// # } + /// ``` + pub fn from_shared(s: Bytes) -> Result { + use self::ErrorKind::*; + + if s.len() > MAX_LEN { + return Err(TooLong.into()); + } + + match s.len() { + 0 => { + return Err(Empty.into()); + } + 1 => { + match s[0] { + b'/' => { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::slash(), + }); + } + b'*' => { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::star(), + }); + } + _ => { + let authority = Authority::from_shared(s)?; + + return Ok(Uri { + scheme: Scheme::empty(), + authority: authority, + path_and_query: PathAndQuery::empty(), + }); + } + } + } + _ => {} + } + + if s[0] == b'/' { + return Ok(Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::from_shared(s)?, + }); + } + + parse_full(s) + } + + /// Convert a `Uri` into `Parts`. + /// + /// # Note + /// + /// This is just an inherent method providing the same functionality as + /// `let parts: Parts = uri.into()` + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let uri: Uri = "/foo".parse().unwrap(); + /// + /// let parts = uri.into_parts(); + /// + /// assert_eq!(parts.path_and_query.unwrap(), "/foo"); + /// + /// assert!(parts.scheme.is_none()); + /// assert!(parts.authority.is_none()); + /// ``` + #[inline] + pub fn into_parts(self) -> Parts { + self.into() + } + + /// Returns the path & query components of the Uri + #[inline] + pub fn path_and_query(&self) -> Option<&PathAndQuery> { + if !self.scheme.inner.is_none() || self.authority.data.is_empty() { + Some(&self.path_and_query) + } else { + None + } + } + + /// Get the path of this `Uri`. + /// + /// Both relative and absolute URIs contain a path component, though it + /// might be the empty string. The path component is **case sensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |--------| + /// | + /// path + /// ``` + /// + /// If the URI is `*` then the path component is equal to `*`. + /// + /// # Examples + /// + /// A relative URI + /// + /// ``` + /// # use http::Uri; + /// + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.path(), "/hello/world"); + /// ``` + /// + /// An absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.path(), "/hello/world"); + /// ``` + #[inline] + pub fn path(&self) -> &str { + if self.has_path() { + self.path_and_query.path() + } else { + "" + } + } + + /// Get the scheme of this `Uri`. + /// + /// The URI scheme refers to a specification for assigning identifiers + /// within that scheme. Only absolute URIs contain a scheme component, but + /// not all absolute URIs will contain a scheme component. Although scheme + /// names are case-insensitive, the canonical form is lowercase. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// scheme + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.scheme_part().map(|s| s.as_str()), Some("http")); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.scheme_part().is_none()); + /// ``` + #[inline] + pub fn scheme_part(&self) -> Option<&Scheme> { + if self.scheme.inner.is_none() { + None + } else { + Some(&self.scheme) + } + } + + #[deprecated(since = "0.1.2", note = "use scheme_part instead")] + #[doc(hidden)] + #[inline] + pub fn scheme(&self) -> Option<&str> { + if self.scheme.inner.is_none() { + None + } else { + Some(self.scheme.as_str()) + } + } + + /// Get the authority of this `Uri`. + /// + /// The authority is a hierarchical element for naming authority such that + /// the remainder of the URI is delegated to that authority. For HTTP, the + /// authority consists of the host and port. The host portion of the + /// authority is **case-insensitive**. + /// + /// The authority also includes a `username:password` component, however + /// the use of this is deprecated and should be avoided. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------------------| + /// | + /// authority + /// ``` + /// + /// This function will be renamed to `authority` in the next semver release. + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.authority_part().map(|a| a.as_str()), Some("example.org:80")); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.authority_part().is_none()); + /// ``` + #[inline] + pub fn authority_part(&self) -> Option<&Authority> { + if self.authority.data.is_empty() { + None + } else { + Some(&self.authority) + } + } + + #[deprecated(since = "0.1.1", note = "use authority_part instead")] + #[doc(hidden)] + #[inline] + pub fn authority(&self) -> Option<&str> { + if self.authority.data.is_empty() { + None + } else { + Some(self.authority.as_str()) + } + } + + /// Get the host of this `Uri`. + /// + /// The host subcomponent of authority is identified by an IP literal + /// encapsulated within square brackets, an IPv4 address in dotted- decimal + /// form, or a registered name. The host subcomponent is **case-insensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |---------| + /// | + /// host + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.host(), Some("example.org")); + /// ``` + /// + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.host().is_none()); + /// ``` + #[inline] + pub fn host(&self) -> Option<&str> { + self.authority_part().map(|a| a.host()) + } + + /// Get the port of this `Uri`. + /// + /// The port subcomponent of authority is designated by an optional port + /// number in decimal following the host and delimited from it by a single + /// colon (":") character. A value is only returned if one is specified in + /// the URI string, i.e., default port values are **not** returned. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-| + /// | + /// port + /// ``` + /// + /// # Examples + /// + /// Absolute URI with port + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); + /// + /// assert_eq!(uri.port(), Some(80)); + /// ``` + /// + /// Absolute URI without port + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); + /// + /// assert!(uri.port().is_none()); + /// ``` + /// + /// Relative URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.port().is_none()); + /// ``` + pub fn port(&self) -> Option { + self.authority_part() + .and_then(|a| a.port()) + } + + /// Get the query string of this `Uri`, starting after the `?`. + /// + /// The query component contains non-hierarchical data that, along with data + /// in the path component, serves to identify a resource within the scope of + /// the URI's scheme and naming authority (if any). The query component is + /// indicated by the first question mark ("?") character and terminated by a + /// number sign ("#") character or by the end of the URI. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------| + /// | + /// query + /// ``` + /// + /// # Examples + /// + /// Absolute URI + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "http://example.org/hello/world?key=value".parse().unwrap(); + /// + /// assert_eq!(uri.query(), Some("key=value")); + /// ``` + /// + /// Relative URI with a query string component + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(uri.query(), Some("key=value&foo=bar")); + /// ``` + /// + /// Relative URI without a query string component + /// + /// ``` + /// # use http::Uri; + /// let uri: Uri = "/hello/world".parse().unwrap(); + /// + /// assert!(uri.query().is_none()); + /// ``` + #[inline] + pub fn query(&self) -> Option<&str> { + self.path_and_query.query() + } + + fn has_path(&self) -> bool { + !self.path_and_query.data.is_empty() || !self.scheme.inner.is_none() + } +} + +impl<'a> HttpTryFrom<&'a str> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: &'a str) -> Result { + t.parse() + } +} + +impl<'a> HttpTryFrom<&'a String> for Uri { + type Error = InvalidUri; + + #[inline] + fn try_from(t: &'a String) -> Result { + t.parse() + } +} + +impl HttpTryFrom for Uri { + type Error = InvalidUriBytes; + + #[inline] + fn try_from(t: String) -> Result { + Uri::from_shared(Bytes::from(t)) + } +} + +impl HttpTryFrom for Uri { + type Error = InvalidUriBytes; + + #[inline] + fn try_from(t: Bytes) -> Result { + Uri::from_shared(t) + } +} + +impl HttpTryFrom for Uri { + type Error = InvalidUriParts; + + #[inline] + fn try_from(src: Parts) -> Result { + Uri::from_parts(src) + } +} + +impl<'a> HttpTryFrom<&'a Uri> for Uri { + type Error = ::Error; + + #[inline] + fn try_from(src: &'a Uri) -> Result { + Ok(src.clone()) + } +} + +/// Convert a `Uri` from parts +/// +/// # Examples +/// +/// Relative URI +/// +/// ``` +/// # use http::uri::*; +/// let mut parts = Parts::default(); +/// parts.path_and_query = Some("/foo".parse().unwrap()); +/// +/// let uri = Uri::from_parts(parts).unwrap(); +/// +/// assert_eq!(uri.path(), "/foo"); +/// +/// assert!(uri.scheme_part().is_none()); +/// assert!(uri.authority().is_none()); +/// ``` +/// +/// Absolute URI +/// +/// ``` +/// # use http::uri::*; +/// let mut parts = Parts::default(); +/// parts.scheme = Some("http".parse().unwrap()); +/// parts.authority = Some("foo.com".parse().unwrap()); +/// parts.path_and_query = Some("/foo".parse().unwrap()); +/// +/// let uri = Uri::from_parts(parts).unwrap(); +/// +/// assert_eq!(uri.scheme_part().unwrap().as_str(), "http"); +/// assert_eq!(uri.authority().unwrap(), "foo.com"); +/// assert_eq!(uri.path(), "/foo"); +/// ``` +impl From for Parts { + fn from(src: Uri) -> Self { + let path_and_query = if src.has_path() { + Some(src.path_and_query) + } else { + None + }; + + let scheme = match src.scheme.inner { + Scheme2::None => None, + _ => Some(src.scheme), + }; + + let authority = if src.authority.data.is_empty() { + None + } else { + Some(src.authority) + }; + + Parts { + scheme: scheme, + authority: authority, + path_and_query: path_and_query, + _priv: (), + } + } +} + +fn parse_full(mut s: Bytes) -> Result { + // Parse the scheme + let scheme = match Scheme2::parse(&s[..]).map_err(InvalidUriBytes)? { + Scheme2::None => Scheme2::None, + Scheme2::Standard(p) => { + // TODO: use truncate + let _ = s.split_to(p.len() + 3); + Scheme2::Standard(p) + } + Scheme2::Other(n) => { + // Grab the protocol + let mut scheme = s.split_to(n + 3); + + // Strip ://, TODO: truncate + let _ = scheme.split_off(n); + + // Allocate the ByteStr + let val = unsafe { ByteStr::from_utf8_unchecked(scheme) }; + + Scheme2::Other(Box::new(val)) + } + }; + + // Find the end of the authority. The scheme will already have been + // extracted. + let authority_end = Authority::parse(&s[..]).map_err(InvalidUriBytes)?; + + if scheme.is_none() { + if authority_end != s.len() { + return Err(ErrorKind::InvalidFormat.into()); + } + + let authority = Authority { + data: unsafe { ByteStr::from_utf8_unchecked(s) }, + }; + + return Ok(Uri { + scheme: scheme.into(), + authority: authority, + path_and_query: PathAndQuery::empty(), + }); + } + + // Authority is required when absolute + if authority_end == 0 { + return Err(ErrorKind::InvalidFormat.into()); + } + + let authority = s.split_to(authority_end); + let authority = Authority { + data: unsafe { ByteStr::from_utf8_unchecked(authority) }, + }; + + Ok(Uri { + scheme: scheme.into(), + authority: authority, + path_and_query: PathAndQuery::from_shared(s)?, + }) +} + +impl FromStr for Uri { + type Err = InvalidUri; + + #[inline] + fn from_str(s: &str) -> Result { + Uri::from_shared(s.into()).map_err(|e| e.0) + } +} + +impl PartialEq for Uri { + fn eq(&self, other: &Uri) -> bool { + if self.scheme_part() != other.scheme_part() { + return false; + } + + if self.authority_part() != other.authority_part() { + return false; + } + + if self.path() != other.path() { + return false; + } + + if self.query() != other.query() { + return false; + } + + true + } +} + +impl PartialEq for Uri { + fn eq(&self, other: &str) -> bool { + let mut other = other.as_bytes(); + let mut absolute = false; + + if let Some(scheme) = self.scheme_part() { + let scheme = scheme.as_str().as_bytes(); + absolute = true; + + if other.len() < scheme.len() + 3 { + return false; + } + + if !scheme.eq_ignore_ascii_case(&other[..scheme.len()]) { + return false; + } + + other = &other[scheme.len()..]; + + if &other[..3] != b"://" { + return false; + } + + other = &other[3..]; + } + + if let Some(auth) = self.authority_part() { + let len = auth.data.len(); + absolute = true; + + if other.len() < len { + return false; + } + + if !auth.data.as_bytes().eq_ignore_ascii_case(&other[..len]) { + return false; + } + + other = &other[len..]; + } + + let path = self.path(); + + if other.len() < path.len() || path.as_bytes() != &other[..path.len()] { + if absolute && path == "/" { + // PathAndQuery can be ommitted, fall through + } else { + return false; + } + } else { + other = &other[path.len()..]; + } + + if let Some(query) = self.query() { + if other[0] != b'?' { + return false; + } + + other = &other[1..]; + + if other.len() < query.len() { + return false; + } + + if query.as_bytes() != &other[..query.len()] { + return false; + } + + other = &other[query.len()..]; + } + + other.is_empty() || other[0] == b'#' + } +} + +impl PartialEq for str { + fn eq(&self, uri: &Uri) -> bool { + uri == self + } +} + +impl<'a> PartialEq<&'a str> for Uri { + fn eq(&self, other: & &'a str) -> bool { + self == *other + } +} + +impl<'a> PartialEq for &'a str { + fn eq(&self, uri: &Uri) -> bool { + uri == *self + } +} + +impl Eq for Uri {} + +/// Returns a `Uri` representing `/` +impl Default for Uri { + #[inline] + fn default() -> Uri { + Uri { + scheme: Scheme::empty(), + authority: Authority::empty(), + path_and_query: PathAndQuery::slash(), + } + } +} + +impl fmt::Display for Uri { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(scheme) = self.scheme_part() { + write!(f, "{}://", scheme)?; + } + + if let Some(authority) = self.authority_part() { + write!(f, "{}", authority)?; + } + + write!(f, "{}", self.path())?; + + if let Some(query) = self.query() { + write!(f, "?{}", query)?; + } + + Ok(()) + } +} + +impl fmt::Debug for Uri { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl From for InvalidUri { + fn from(src: ErrorKind) -> InvalidUri { + InvalidUri(src) + } +} + +impl From for InvalidUriBytes { + fn from(src: ErrorKind) -> InvalidUriBytes { + InvalidUriBytes(src.into()) + } +} + +impl From for InvalidUriParts { + fn from(src: ErrorKind) -> InvalidUriParts { + InvalidUriParts(src.into()) + } +} + +impl fmt::Display for InvalidUri { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl Error for InvalidUri { + fn description(&self) -> &str { + match self.0 { + ErrorKind::InvalidUriChar => "invalid uri character", + ErrorKind::InvalidScheme => "invalid scheme", + ErrorKind::InvalidAuthority => "invalid authority", + ErrorKind::InvalidFormat => "invalid format", + ErrorKind::SchemeMissing => "scheme missing", + ErrorKind::AuthorityMissing => "authority missing", + ErrorKind::PathAndQueryMissing => "path missing", + ErrorKind::TooLong => "uri too long", + ErrorKind::Empty => "empty string", + ErrorKind::SchemeTooLong => "scheme too long", + } + } +} + +impl fmt::Display for InvalidUriBytes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for InvalidUriParts { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +impl Error for InvalidUriBytes { + fn description(&self) -> &str { + self.0.description() + } +} + +impl Error for InvalidUriParts { + fn description(&self) -> &str { + self.0.description() + } +} + +impl Hash for Uri { + fn hash(&self, state: &mut H) where H: Hasher { + if !self.scheme.inner.is_none() { + self.scheme.hash(state); + state.write_u8(0xff); + } + + if let Some(auth) = self.authority_part() { + auth.hash(state); + } + + Hash::hash_slice(self.path().as_bytes(), state); + + if let Some(query) = self.query() { + b'?'.hash(state); + Hash::hash_slice(query.as_bytes(), state); + } + } +} diff --git a/third_party/rust/http/src/uri/path.rs b/third_party/rust/http/src/uri/path.rs new file mode 100644 index 000000000000..48d925bfeb19 --- /dev/null +++ b/third_party/rust/http/src/uri/path.rs @@ -0,0 +1,540 @@ +use std::{cmp, fmt, str}; +use std::str::FromStr; + +use bytes::Bytes; + +use byte_str::ByteStr; +use super::{ErrorKind, InvalidUri, InvalidUriBytes, URI_CHARS}; + +/// Represents the path component of a URI +#[derive(Clone)] +pub struct PathAndQuery { + pub(super) data: ByteStr, + pub(super) query: u16, +} + +const NONE: u16 = ::std::u16::MAX; + +impl PathAndQuery { + /// Attempt to convert a `PathAndQuery` from `Bytes`. + /// + /// This function will be replaced by a `TryFrom` implementation once the + /// trait lands in stable. + /// + /// # Examples + /// + /// ``` + /// # extern crate http; + /// # use http::uri::*; + /// extern crate bytes; + /// + /// use bytes::Bytes; + /// + /// # pub fn main() { + /// let bytes = Bytes::from("/hello?world"); + /// let path_and_query = PathAndQuery::from_shared(bytes).unwrap(); + /// + /// assert_eq!(path_and_query.path(), "/hello"); + /// assert_eq!(path_and_query.query(), Some("world")); + /// # } + /// ``` + pub fn from_shared(mut src: Bytes) -> Result { + let mut query = NONE; + + let mut i = 0; + + while i < src.len() { + let b = src[i]; + + match URI_CHARS[b as usize] { + 0 => { + if b == b'%' { + // Check if next character is not % + if i + 2 <= src.len() && b'%' == src[i + 1] { + break; + } + + // Check that there are enough chars for a percent + // encoded char + let perc_encoded = + i + 3 <= src.len() && // enough capacity + HEX_DIGIT[src[i + 1] as usize] != 0 && + HEX_DIGIT[src[i + 2] as usize] != 0; + + if !perc_encoded { + return Err(ErrorKind::InvalidUriChar.into()); + } + + i += 3; + continue; + } else { + return Err(ErrorKind::InvalidUriChar.into()); + } + } + b'?' => { + if query == NONE { + query = i as u16; + } + } + b'#' => { + // TODO: truncate + src.split_off(i); + break; + } + _ => {} + } + + i += 1; + } + + Ok(PathAndQuery { + data: unsafe { ByteStr::from_utf8_unchecked(src) }, + query: query, + }) + } + + /// Convert a `PathAndQuery` from a static string. + /// + /// This function will not perform any copying, however the string is + /// checked to ensure that it is valid. + /// + /// # Panics + /// + /// This function panics if the argument is an invalid path and query. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let v = PathAndQuery::from_static("/hello?world"); + /// + /// assert_eq!(v.path(), "/hello"); + /// assert_eq!(v.query(), Some("world")); + /// ``` + #[inline] + pub fn from_static(src: &'static str) -> Self { + let src = Bytes::from_static(src.as_bytes()); + + PathAndQuery::from_shared(src) + .unwrap() + } + + pub(super) fn empty() -> Self { + PathAndQuery { + data: ByteStr::new(), + query: NONE, + } + } + + pub(super) fn slash() -> Self { + PathAndQuery { + data: ByteStr::from_static("/"), + query: NONE, + } + } + + pub(super) fn star() -> Self { + PathAndQuery { + data: ByteStr::from_static("*"), + query: NONE, + } + } + + /// Returns the path component + /// + /// The path component is **case sensitive**. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |--------| + /// | + /// path + /// ``` + /// + /// If the URI is `*` then the path component is equal to `*`. + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(path_and_query.path(), "/hello/world"); + /// ``` + #[inline] + pub fn path(&self) -> &str { + let ret = if self.query == NONE { + &self.data[..] + } else { + &self.data[..self.query as usize] + }; + + if ret.is_empty() { + return "/"; + } + + ret + } + + /// Returns the query string component + /// + /// The query component contains non-hierarchical data that, along with data + /// in the path component, serves to identify a resource within the scope of + /// the URI's scheme and naming authority (if any). The query component is + /// indicated by the first question mark ("?") character and terminated by a + /// number sign ("#") character or by the end of the URI. + /// + /// ```notrust + /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 + /// |-------------------| + /// | + /// query + /// ``` + /// + /// # Examples + /// + /// With a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(path_and_query.query(), Some("key=value&foo=bar")); + /// ``` + /// + /// Without a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert!(path_and_query.query().is_none()); + /// ``` + #[inline] + pub fn query(&self) -> Option<&str> { + if self.query == NONE { + None + } else { + let i = self.query + 1; + Some(&self.data[i as usize..]) + } + } + + /// Returns the path and query as a string component. + /// + /// # Examples + /// + /// With a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); + /// + /// assert_eq!(path_and_query.as_str(), "/hello/world?key=value&foo=bar"); + /// ``` + /// + /// Without a query string component + /// + /// ``` + /// # use http::uri::*; + /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); + /// + /// assert_eq!(path_and_query.as_str(), "/hello/world"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + let ret = &self.data[..]; + if ret.is_empty() { + return "/"; + } + ret + } + + /// Converts this `PathAndQuery` back to a sequence of bytes + #[inline] + pub fn into_bytes(self) -> Bytes { + self.into() + } +} + +impl FromStr for PathAndQuery { + type Err = InvalidUri; + + fn from_str(s: &str) -> Result { + PathAndQuery::from_shared(s.into()).map_err(|e| e.0) + } +} + +impl From for Bytes { + fn from(src: PathAndQuery) -> Bytes { + src.data.into() + } +} + +impl fmt::Debug for PathAndQuery { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for PathAndQuery { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + if !self.data.is_empty() { + match self.data.as_bytes()[0] { + b'/' | b'*' => write!(fmt, "{}", &self.data[..]), + _ => write!(fmt, "/{}", &self.data[..]), + } + } else { + write!(fmt, "/") + } + } +} + +// ===== PartialEq / PartialOrd ===== + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self.data == other.data + } +} + +impl Eq for PathAndQuery {} + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &str) -> bool { + self.as_str() == other + } +} + +impl<'a> PartialEq for &'a str { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self == &other.as_str() + } +} + +impl<'a> PartialEq<&'a str> for PathAndQuery { + #[inline] + fn eq(&self, other: &&'a str) -> bool { + self.as_str() == *other + } +} + +impl PartialEq for str { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self == other.as_str() + } +} + +impl PartialEq for PathAndQuery { + #[inline] + fn eq(&self, other: &String) -> bool { + self.as_str() == other.as_str() + } +} + +impl PartialEq for String { + #[inline] + fn eq(&self, other: &PathAndQuery) -> bool { + self.as_str() == other.as_str() + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &str) -> Option { + self.as_str().partial_cmp(other) + } +} + +impl PartialOrd for str { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.partial_cmp(other.as_str()) + } +} + +impl<'a> PartialOrd<&'a str> for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &&'a str) -> Option { + self.as_str().partial_cmp(*other) + } +} + +impl<'a> PartialOrd for &'a str { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.partial_cmp(&other.as_str()) + } +} + +impl PartialOrd for PathAndQuery { + #[inline] + fn partial_cmp(&self, other: &String) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl PartialOrd for String { + #[inline] + fn partial_cmp(&self, other: &PathAndQuery) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +const HEX_DIGIT: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3x + 0, 0, 0, 0, 0, 0, 0, 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x + 0, 0, 0, 0, 0, b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', 0, 0, 0, 0, 0, 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equal_to_self_of_same_path() { + let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(p1, p2); + assert_eq!(p2, p1); + } + + #[test] + fn not_equal_to_self_of_different_path() { + let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/world&foo=bar".parse().unwrap(); + assert_ne!(p1, p2); + assert_ne!(p2, p1); + } + + #[test] + fn equates_with_a_str() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(&path_and_query, "/hello/world&foo=bar"); + assert_eq!("/hello/world&foo=bar", &path_and_query); + assert_eq!(path_and_query, "/hello/world&foo=bar"); + assert_eq!("/hello/world&foo=bar", path_and_query); + } + + #[test] + fn not_equal_with_a_str_of_a_different_path() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + // as a reference + assert_ne!(&path_and_query, "/hello&foo=bar"); + assert_ne!("/hello&foo=bar", &path_and_query); + // without reference + assert_ne!(path_and_query, "/hello&foo=bar"); + assert_ne!("/hello&foo=bar", path_and_query); + } + + #[test] + fn equates_with_a_string() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_eq!(path_and_query, "/hello/world&foo=bar".to_string()); + assert_eq!("/hello/world&foo=bar".to_string(), path_and_query); + } + + #[test] + fn not_equal_with_a_string_of_a_different_path() { + let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); + assert_ne!(path_and_query, "/hello&foo=bar".to_string()); + assert_ne!("/hello&foo=bar".to_string(), path_and_query); + } + + #[test] + fn compares_to_self() { + let p1: PathAndQuery = "/a/world&foo=bar".parse().unwrap(); + let p2: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + assert!(p1 < p2); + assert!(p2 > p1); + } + + #[test] + fn compares_with_a_str() { + let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + // by ref + assert!(&path_and_query < "/c/world&foo=bar"); + assert!("/c/world&foo=bar" > &path_and_query); + assert!(&path_and_query > "/a/world&foo=bar"); + assert!("/a/world&foo=bar" < &path_and_query); + + // by val + assert!(path_and_query < "/c/world&foo=bar"); + assert!("/c/world&foo=bar" > path_and_query); + assert!(path_and_query > "/a/world&foo=bar"); + assert!("/a/world&foo=bar" < path_and_query); + } + + #[test] + fn compares_with_a_string() { + let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); + assert!(path_and_query < "/c/world&foo=bar".to_string()); + assert!("/c/world&foo=bar".to_string() > path_and_query); + assert!(path_and_query > "/a/world&foo=bar".to_string()); + assert!("/a/world&foo=bar".to_string() < path_and_query); + } + + #[test] + fn double_percent_path() { + let double_percent_path = "/your.js?bn=%%val"; + + assert!(double_percent_path.parse::().is_ok()); + + let path: PathAndQuery = double_percent_path.parse().unwrap(); + assert_eq!(path, double_percent_path); + + let double_percent_path = "/path%%"; + + assert!(double_percent_path.parse::().is_ok()); + } + + #[test] + fn path_ends_with_question_mark() { + let path = "/path?%"; + + assert!(path.parse::().is_err()); + } + + #[test] + fn path_ends_with_fragment_percent() { + let path = "/path#%"; + + assert!(path.parse::().is_ok()); + } +} diff --git a/third_party/rust/http/src/uri/scheme.rs b/third_party/rust/http/src/uri/scheme.rs new file mode 100644 index 000000000000..5fa1e9bb0675 --- /dev/null +++ b/third_party/rust/http/src/uri/scheme.rs @@ -0,0 +1,361 @@ +// Deprecated in 1.26, needed until our minimum version is >=1.23. +#[allow(unused, deprecated)] +use std::ascii::AsciiExt; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::str::FromStr; + +use bytes::Bytes; + +use byte_str::ByteStr; +use super::{ErrorKind, InvalidUri, InvalidUriBytes}; + +/// Represents the scheme component of a URI +#[derive(Clone)] +pub struct Scheme { + pub(super) inner: Scheme2, +} + +#[derive(Clone, Debug)] +pub(super) enum Scheme2> { + None, + Standard(Protocol), + Other(T), +} + +#[derive(Copy, Clone, Debug)] +pub(super) enum Protocol { + Http, + Https, +} + +impl Scheme { + /// HTTP protocol scheme + pub const HTTP: Scheme = Scheme { + inner: Scheme2::Standard(Protocol::Http), + }; + + /// HTTP protocol over TLS. + pub const HTTPS: Scheme = Scheme { + inner: Scheme2::Standard(Protocol::Https), + }; + + /// Attempt to convert a `Scheme` from `Bytes` + /// + /// This function will be replaced by a `TryFrom` implementation once the + /// trait lands in stable. + /// + /// # Examples + /// + /// ``` + /// # extern crate http; + /// # use http::uri::*; + /// extern crate bytes; + /// + /// use bytes::Bytes; + /// + /// # pub fn main() { + /// let bytes = Bytes::from("http"); + /// let scheme = Scheme::from_shared(bytes).unwrap(); + /// + /// assert_eq!(scheme.as_str(), "http"); + /// # } + /// ``` + pub fn from_shared(s: Bytes) -> Result { + use self::Scheme2::*; + + match Scheme2::parse_exact(&s[..]).map_err(InvalidUriBytes)? { + None => Err(ErrorKind::InvalidScheme.into()), + Standard(p) => Ok(Standard(p).into()), + Other(_) => { + let b = unsafe { ByteStr::from_utf8_unchecked(s) }; + Ok(Other(Box::new(b)).into()) + } + } + } + + pub(super) fn empty() -> Self { + Scheme { + inner: Scheme2::None, + } + } + + /// Return a str representation of the scheme + /// + /// # Examples + /// + /// ``` + /// # use http::uri::*; + /// let scheme: Scheme = "http".parse().unwrap(); + /// assert_eq!(scheme.as_str(), "http"); + /// ``` + #[inline] + pub fn as_str(&self) -> &str { + use self::Scheme2::*; + use self::Protocol::*; + + match self.inner { + Standard(Http) => "http", + Standard(Https) => "https", + Other(ref v) => &v[..], + None => unreachable!(), + } + } + + /// Converts this `Scheme` back to a sequence of bytes + #[inline] + pub fn into_bytes(self) -> Bytes { + self.into() + } +} + +impl FromStr for Scheme { + type Err = InvalidUri; + + fn from_str(s: &str) -> Result { + use self::Scheme2::*; + + match Scheme2::parse_exact(s.as_bytes())? { + None => Err(ErrorKind::InvalidScheme.into()), + Standard(p) => Ok(Standard(p).into()), + Other(_) => { + Ok(Other(Box::new(s.into())).into()) + } + } + } +} + +impl From for Bytes { + #[inline] + fn from(src: Scheme) -> Self { + use self::Scheme2::*; + use self::Protocol::*; + + match src.inner { + None => Bytes::new(), + Standard(Http) => Bytes::from_static(b"http"), + Standard(Https) => Bytes::from_static(b"https"), + Other(v) => (*v).into(), + } + } +} + +impl fmt::Debug for Scheme { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +impl fmt::Display for Scheme { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl AsRef for Scheme { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl PartialEq for Scheme { + fn eq(&self, other: &Scheme) -> bool { + use self::Protocol::*; + use self::Scheme2::*; + + match (&self.inner, &other.inner) { + (&Standard(Http), &Standard(Http)) => true, + (&Standard(Https), &Standard(Https)) => true, + (&Other(ref a), &Other(ref b)) => a.eq_ignore_ascii_case(b), + (&None, _) | (_, &None) => unreachable!(), + _ => false, + } + } +} + +impl Eq for Scheme {} + +/// Case-insensitive equality +/// +/// # Examples +/// +/// ``` +/// # use http::uri::Scheme; +/// let scheme: Scheme = "HTTP".parse().unwrap(); +/// assert_eq!(scheme, *"http"); +/// ``` +impl PartialEq for Scheme { + fn eq(&self, other: &str) -> bool { + self.as_str().eq_ignore_ascii_case(other) + } +} + +/// Case-insensitive equality +impl PartialEq for str { + fn eq(&self, other: &Scheme) -> bool { + other == self + } +} + +/// Case-insensitive hashing +impl Hash for Scheme { + fn hash(&self, state: &mut H) where H: Hasher { + match self.inner { + Scheme2::None => (), + Scheme2::Standard(Protocol::Http) => state.write_u8(1), + Scheme2::Standard(Protocol::Https) => state.write_u8(2), + Scheme2::Other(ref other) => { + other.len().hash(state); + for &b in other.as_bytes() { + state.write_u8(b.to_ascii_lowercase()); + } + } + } + } +} + +impl Scheme2 { + pub(super) fn is_none(&self) -> bool { + match *self { + Scheme2::None => true, + _ => false, + } + } +} + +// Require the scheme to not be too long in order to enable further +// optimizations later. +const MAX_SCHEME_LEN: usize = 64; + +// scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// +const SCHEME_CHARS: [u8; 256] = [ + // 0 1 2 3 4 5 6 7 8 9 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3x + 0, 0, 0, b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x + b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', 0, // 5x + 0, 0, 0, 0, 0, b'A', b'B', b'C', b'D', b'E', // 6x + b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x + b'Z', 0, 0, 0, 0, 0, 0, b'a', b'b', b'c', // 9x + b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x + b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x + b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x + 0, 0, 0, 0, 0, 0 // 25x +]; + +impl Scheme2 { + fn parse_exact(s: &[u8]) -> Result, InvalidUri> { + match s { + b"http" => Ok(Protocol::Http.into()), + b"https" => Ok(Protocol::Https.into()), + _ => { + if s.len() > MAX_SCHEME_LEN { + return Err(ErrorKind::SchemeTooLong.into()); + } + + for &b in s { + match SCHEME_CHARS[b as usize] { + b':' => { + // Don't want :// here + return Err(ErrorKind::InvalidScheme.into()); + } + 0 => { + return Err(ErrorKind::InvalidScheme.into()); + } + _ => {} + } + } + + Ok(Scheme2::Other(())) + } + } + } + + pub(super) fn parse(s: &[u8]) -> Result, InvalidUri> { + if s.len() >= 7 { + // Check for HTTP + if s[..7].eq_ignore_ascii_case(b"http://") { + // Prefix will be striped + return Ok(Protocol::Http.into()); + } + } + + if s.len() >= 8 { + // Check for HTTPs + if s[..8].eq_ignore_ascii_case(b"https://") { + return Ok(Protocol::Https.into()); + } + } + + if s.len() > 3 { + for i in 0..s.len() { + let b = s[i]; + + if i == MAX_SCHEME_LEN { + return Err(ErrorKind::SchemeTooLong.into()); + } + + match SCHEME_CHARS[b as usize] { + b':' => { + // Not enough data remaining + if s.len() < i + 3 { + break; + } + + // Not a scheme + if &s[i+1..i+3] != b"//" { + break; + } + + // Return scheme + return Ok(Scheme2::Other(i)); + } + // Invald scheme character, abort + 0 => break, + _ => {} + } + } + } + + Ok(Scheme2::None) + } +} + +impl Protocol { + pub(super) fn len(&self) -> usize { + match *self { + Protocol::Http => 4, + Protocol::Https => 5, + } + } +} + +impl From for Scheme2 { + fn from(src: Protocol) -> Self { + Scheme2::Standard(src) + } +} + +#[doc(hidden)] +impl From for Scheme { + fn from(src: Scheme2) -> Self { + Scheme { inner: src } + } +} diff --git a/third_party/rust/http/src/uri/tests.rs b/third_party/rust/http/src/uri/tests.rs new file mode 100644 index 000000000000..08b32eada83c --- /dev/null +++ b/third_party/rust/http/src/uri/tests.rs @@ -0,0 +1,447 @@ +use std::str::FromStr; + +use super::{ErrorKind, InvalidUri, Uri, URI_CHARS}; + +#[test] +fn test_char_table() { + for (i, &v) in URI_CHARS.iter().enumerate() { + if v != 0 { + assert_eq!(i, v as usize); + } + } +} + +macro_rules! part { + ($s:expr) => ( + Some(&$s.parse().unwrap()) + ) +} + +macro_rules! test_parse { + ( + $test_name:ident, + $str:expr, + $alt:expr, + $($method:ident = $value:expr,)* + ) => ( + #[test] + fn $test_name() { + let orig_str = $str; + let uri = Uri::from_str(orig_str).unwrap(); + $( + assert_eq!(uri.$method(), $value, "{}: uri = {:?}", stringify!($method), uri); + )+ + assert_eq!(uri, orig_str, "partial eq to original str"); + assert_eq!(uri, uri.clone(), "clones are equal"); + + let new_str = uri.to_string(); + let new_uri = Uri::from_str(&new_str).expect("to_string output parses again as a Uri"); + assert_eq!(new_uri, orig_str, "round trip still equals original str"); + + const ALT: &'static [&'static str] = &$alt; + + for &alt in ALT.iter() { + let other: Uri = alt.parse().unwrap(); + assert_eq!(uri, *alt); + assert_eq!(uri, other); + } + } + ); +} + +test_parse! { + test_uri_parse_path_and_query, + "/some/path/here?and=then&hello#and-bye", + [], + + scheme_part = None, + authority_part = None, + path = "/some/path/here", + query = Some("and=then&hello"), + host = None, +} + +test_parse! { + test_uri_parse_absolute_form, + "http://127.0.0.1:61761/chunks", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1:61761"), + path = "/chunks", + query = None, + host = Some("127.0.0.1"), + port = Some(61761), +} + +test_parse! { + test_uri_parse_absolute_form_without_path, + "https://127.0.0.1:61761", + ["https://127.0.0.1:61761/"], + + scheme_part = part!("https"), + authority_part = part!("127.0.0.1:61761"), + path = "/", + query = None, + port = Some(61761), + host = Some("127.0.0.1"), +} + +test_parse! { + test_uri_parse_asterisk_form, + "*", + [], + + scheme_part = None, + authority_part = None, + path = "*", + query = None, + host = None, +} + +test_parse! { + test_uri_parse_authority_no_port, + "localhost", + ["LOCALHOST", "LocaLHOSt"], + + scheme_part = None, + authority_part = part!("localhost"), + path = "", + query = None, + port = None, + host = Some("localhost"), +} + +test_parse! { + test_uri_authority_only_one_character_issue_197, + "S", + [], + + scheme_part = None, + authority_part = part!("S"), + path = "", + query = None, + port = None, + host = Some("S"), +} + +test_parse! { + test_uri_parse_authority_form, + "localhost:3000", + ["localhosT:3000"], + + scheme_part = None, + authority_part = part!("localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Some(3000), +} + + +test_parse! { + test_uri_parse_absolute_with_default_port_http, + "http://127.0.0.1:80", + ["http://127.0.0.1:80/"], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1:80"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Some(80), +} + +test_parse! { + test_uri_parse_absolute_with_default_port_https, + "https://127.0.0.1:443", + ["https://127.0.0.1:443/"], + + scheme_part = part!("https"), + authority_part = part!("127.0.0.1:443"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Some(443), +} + +test_parse! { + test_uri_parse_fragment_questionmark, + "http://127.0.0.1/#?", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_path_with_terminating_questionmark, + "http://127.0.0.1/path?", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1"), + path = "/path", + query = Some(""), + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_nonempty_query, + "http://127.0.0.1?foo=bar", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1"), + path = "/", + query = Some("foo=bar"), + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_fragment_with_slash, + "http://127.0.0.1#foo/bar", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_uri_parse_absolute_form_with_empty_path_and_fragment_with_questionmark, + "http://127.0.0.1#foo?bar", + [], + + scheme_part = part!("http"), + authority_part = part!("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_userinfo1, + "http://a:b@127.0.0.1:1234/", + [], + + scheme_part = part!("http"), + authority_part = part!("a:b@127.0.0.1:1234"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = Some(1234), +} + +test_parse! { + test_userinfo2, + "http://a:b@127.0.0.1/", + [], + + scheme_part = part!("http"), + authority_part = part!("a:b@127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_userinfo3, + "http://a@127.0.0.1/", + [], + + scheme_part = part!("http"), + authority_part = part!("a@127.0.0.1"), + host = Some("127.0.0.1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_userinfo_with_port, + "user@localhost:3000", + [], + + scheme_part = None, + authority_part = part!("user@localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Some(3000), +} + +test_parse! { + test_userinfo_pass_with_port, + "user:pass@localhost:3000", + [], + + scheme_part = None, + authority_part = part!("user:pass@localhost:3000"), + path = "", + query = None, + host = Some("localhost"), + port = Some(3000), +} + +test_parse! { + test_ipv6, + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]/", + [], + + scheme_part = part!("http"), + authority_part = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), + host = Some("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand, + "http://[::1]/", + [], + + scheme_part = part!("http"), + authority_part = part!("[::1]"), + host = Some("::1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand2, + "http://[::]/", + [], + + scheme_part = part!("http"), + authority_part = part!("[::]"), + host = Some("::"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_shorthand3, + "http://[2001:db8::2:1]/", + [], + + scheme_part = part!("http"), + authority_part = part!("[2001:db8::2:1]"), + host = Some("2001:db8::2:1"), + path = "/", + query = None, + port = None, +} + +test_parse! { + test_ipv6_with_port, + "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008/", + [], + + scheme_part = part!("http"), + authority_part = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008"), + host = Some("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), + path = "/", + query = None, + port = Some(8008), +} + +test_parse! { + test_percentage_encoded_path, + "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", + [], + + scheme_part = None, + authority_part = None, + host = None, + path = "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", + query = None, + port = None, +} + +#[test] +fn test_uri_parse_error() { + fn err(s: &str) { + Uri::from_str(s).unwrap_err(); + } + + err("http://"); + err("htt:p//host"); + err("hyper.rs/"); + err("hyper.rs?key=val"); + err("?key=val"); + err("localhost/"); + err("localhost?key=val"); + err("\0"); + err("http://[::1"); + err("http://::1]"); + err("localhost:8080:3030"); +} + +#[test] +fn test_max_uri_len() { + let mut uri = vec![]; + uri.extend(b"http://localhost/"); + uri.extend(vec![b'a'; 70 * 1024]); + + let uri = String::from_utf8(uri).unwrap(); + let res: Result = uri.parse(); + + assert_eq!(res.unwrap_err().0, ErrorKind::TooLong); +} + +#[test] +fn test_long_scheme() { + let mut uri = vec![]; + uri.extend(vec![b'a'; 256]); + uri.extend(b"://localhost/"); + + let uri = String::from_utf8(uri).unwrap(); + let res: Result = uri.parse(); + + assert_eq!(res.unwrap_err().0, ErrorKind::SchemeTooLong); +} + +#[test] +fn test_uri_to_path_and_query() { + let cases = vec![ + ("/", "/"), + ("/foo?bar", "/foo?bar"), + ("/foo?bar#nope", "/foo?bar"), + ("http://hyper.rs", "/"), + ("http://hyper.rs/", "/"), + ("http://hyper.rs/path", "/path"), + ("http://hyper.rs?query", "/?query"), + ("*", "*"), + ]; + + for case in cases { + let uri = Uri::from_str(case.0).unwrap(); + let s = uri.path_and_query().unwrap().to_string(); + + assert_eq!(s, case.1); + } +} + +#[test] +fn test_authority_uri_parts_round_trip() { + let s = "hyper.rs"; + let uri = Uri::from_str(s).expect("first parse"); + assert_eq!(uri, s); + assert_eq!(uri.to_string(), s); + + let parts = uri.into_parts(); + let uri2 = Uri::from_parts(parts).expect("from_parts"); + assert_eq!(uri2, s); + assert_eq!(uri2.to_string(), s); +} diff --git a/third_party/rust/http/src/version.rs b/third_party/rust/http/src/version.rs new file mode 100644 index 000000000000..3229dc32693f --- /dev/null +++ b/third_party/rust/http/src/version.rs @@ -0,0 +1,68 @@ +//! HTTP version +//! +//! This module contains a definition of the `Version` type. The `Version` +//! type is intended to be accessed through the root of the crate +//! (`http::Version`) rather than this module. +//! +//! The `Version` type contains constants that represent the various versions +//! of the HTTP protocol. +//! +//! # Examples +//! +//! ``` +//! use http::Version; +//! +//! let http11 = Version::HTTP_11; +//! let http2 = Version::HTTP_2; +//! assert!(http11 != http2); +//! +//! println!("{:?}", http2); +//! ``` + +use std::fmt; + +/// Represents a version of the HTTP spec. +#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] +pub struct Version(Http); + +impl Version { + /// `HTTP/0.9` + pub const HTTP_09: Version = Version(Http::Http09); + + /// `HTTP/1.0` + pub const HTTP_10: Version = Version(Http::Http10); + + /// `HTTP/1.1` + pub const HTTP_11: Version = Version(Http::Http11); + + /// `HTTP/2.0` + pub const HTTP_2: Version = Version(Http::H2); +} + +#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] +enum Http { + Http09, + Http10, + Http11, + H2, +} + +impl Default for Version { + #[inline] + fn default() -> Version { + Version::HTTP_11 + } +} + +impl fmt::Debug for Version { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Http::*; + + f.write_str(match self.0 { + Http09 => "HTTP/0.9", + Http10 => "HTTP/1.0", + Http11 => "HTTP/1.1", + H2 => "HTTP/2.0", + }) + } +} diff --git a/third_party/rust/http/tests/header_map.rs b/third_party/rust/http/tests/header_map.rs new file mode 100644 index 000000000000..2fbb95e9e0d4 --- /dev/null +++ b/third_party/rust/http/tests/header_map.rs @@ -0,0 +1,321 @@ +extern crate http; + +use http::*; +use http::header::*; + +#[test] +fn smoke() { + let mut headers = HeaderMap::new(); + + assert!(headers.get("hello").is_none()); + + let name: HeaderName = "hello".parse().unwrap(); + + match headers.entry(&name).unwrap() { + Entry::Vacant(e) => { + e.insert("world".parse().unwrap()); + } + _ => panic!(), + } + + assert!(headers.get("hello").is_some()); + + match headers.entry(&name).unwrap() { + Entry::Occupied(mut e) => { + assert_eq!(e.get(), &"world"); + + // Push another value + e.append("zomg".parse().unwrap()); + + let mut i = e.iter(); + + assert_eq!(*i.next().unwrap(), "world"); + assert_eq!(*i.next().unwrap(), "zomg"); + assert!(i.next().is_none()); + } + _ => panic!(), + } +} + +#[test] +fn drain() { + let mut headers = HeaderMap::new(); + + // Insert a single value + let name: HeaderName = "hello".parse().unwrap(); + headers.insert(name, "world".parse().unwrap()); + + { + let mut iter = headers.drain(); + let (name, values) = iter.next().unwrap(); + assert_eq!(name.as_str(), "hello"); + + let values: Vec<_> = values.collect(); + assert_eq!(values.len(), 1); + assert_eq!(values[0], "world"); + + assert!(iter.next().is_none()); + } + + assert!(headers.is_empty()); + + // Insert two sequential values + headers.insert("hello".parse::().unwrap(), "world".parse().unwrap()); + headers.insert("zomg".parse::().unwrap(), "bar".parse().unwrap()); + headers.append("hello".parse::().unwrap(), "world2".parse().unwrap()); + + // Drain... + { + let mut iter = headers.drain(); + let (name, values) = iter.next().unwrap(); + assert_eq!(name.as_str(), "hello"); + + let values: Vec<_> = values.collect(); + assert_eq!(values.len(), 2); + assert_eq!(values[0], "world"); + assert_eq!(values[1], "world2"); + + let (name, values) = iter.next().unwrap(); + assert_eq!(name.as_str(), "zomg"); + + let values: Vec<_> = values.collect(); + assert_eq!(values.len(), 1); + assert_eq!(values[0], "bar"); + + assert!(iter.next().is_none()); + } +} + +#[test] +fn drain_entry() { + let mut headers = HeaderMap::new(); + + headers.insert("hello".parse::().unwrap(), "world".parse().unwrap()); + headers.insert("zomg".parse::().unwrap(), "foo".parse().unwrap()); + headers.append("hello".parse::().unwrap(), "world2".parse().unwrap()); + headers.insert("more".parse::().unwrap(), "words".parse().unwrap()); + headers.append("more".parse::().unwrap(), "insertions".parse().unwrap()); + + // Using insert + { + let mut e = match headers.entry("hello").unwrap() { + Entry::Occupied(e) => e, + _ => panic!(), + }; + + let vals: Vec<_> = e.insert_mult("wat".parse().unwrap()).collect(); + assert_eq!(2, vals.len()); + assert_eq!(vals[0], "world"); + assert_eq!(vals[1], "world2"); + } +} + +#[test] +fn eq() { + let mut a = HeaderMap::new(); + let mut b = HeaderMap::new(); + + assert_eq!(a, b); + + a.insert("hello".parse::().unwrap(), "world".parse().unwrap()); + assert_ne!(a, b); + + b.insert("hello".parse::().unwrap(), "world".parse().unwrap()); + assert_eq!(a, b); + + a.insert("foo".parse::().unwrap(), "bar".parse().unwrap()); + a.append("foo".parse::().unwrap(), "baz".parse().unwrap()); + assert_ne!(a, b); + + b.insert("foo".parse::().unwrap(), "bar".parse().unwrap()); + assert_ne!(a, b); + + b.append("foo".parse::().unwrap(), "baz".parse().unwrap()); + assert_eq!(a, b); + + a.append("a".parse::().unwrap(), "a".parse().unwrap()); + a.append("a".parse::().unwrap(), "b".parse().unwrap()); + b.append("a".parse::().unwrap(), "b".parse().unwrap()); + b.append("a".parse::().unwrap(), "a".parse().unwrap()); + + assert_ne!(a, b); +} + +#[test] +fn into_header_name() { + let mut m = HeaderMap::new(); + m.insert(HOST, "localhost".parse().unwrap()); + m.insert(&ACCEPT, "*/*".parse().unwrap()); + m.insert("connection", "keep-alive".parse().unwrap()); + + m.append(LOCATION, "/".parse().unwrap()); + m.append(&VIA, "bob".parse().unwrap()); + m.append("transfer-encoding", "chunked".parse().unwrap()); + + assert_eq!(m.len(), 6); +} + +#[test] +fn as_header_name() { + let mut m = HeaderMap::new(); + let v: HeaderValue = "localhost".parse().unwrap(); + m.insert(HOST, v.clone()); + + let expected = Some(&v); + + assert_eq!(m.get("host"), expected); + assert_eq!(m.get(&HOST), expected); + + let s = String::from("host"); + assert_eq!(m.get(&s), expected); + assert_eq!(m.get(s.as_str()), expected); +} + +#[test] +fn insert_all_std_headers() { + let mut m = HeaderMap::new(); + + for (i, hdr) in STD.iter().enumerate() { + m.insert(hdr.clone(), hdr.as_str().parse().unwrap()); + + for j in 0..(i+1) { + assert_eq!(m[&STD[j]], STD[j].as_str()); + } + + if i != 0 { + for j in (i+1)..STD.len() { + assert!(m.get(&STD[j]).is_none(), "contained {}; j={}", STD[j].as_str(), j); + } + } + } +} + +#[test] +fn insert_79_custom_std_headers() { + let mut h = HeaderMap::new(); + let hdrs = custom_std(79); + + for (i, hdr) in hdrs.iter().enumerate() { + h.insert(hdr.clone(), hdr.as_str().parse().unwrap()); + + for j in 0..(i+1) { + assert_eq!(h[&hdrs[j]], hdrs[j].as_str()); + } + + for j in (i+1)..hdrs.len() { + assert!(h.get(&hdrs[j]).is_none()); + } + } +} + +#[test] +fn append_multiple_values() { + let mut map = HeaderMap::new(); + + map.append(header::CONTENT_TYPE, "json".parse().unwrap()); + map.append(header::CONTENT_TYPE, "html".parse().unwrap()); + map.append(header::CONTENT_TYPE, "xml".parse().unwrap()); + + let vals = map.get_all(&header::CONTENT_TYPE) + .iter() + .collect::>(); + + assert_eq!(&vals, &[&"json", &"html", &"xml"]); +} + +fn custom_std(n: usize) -> Vec { + (0..n).map(|i| { + let s = format!("{}-{}", STD[i % STD.len()].as_str(), i); + s.parse().unwrap() + }).collect() +} + +const STD: &'static [HeaderName] = &[ + ACCEPT, + ACCEPT_CHARSET, + ACCEPT_ENCODING, + ACCEPT_LANGUAGE, + ACCEPT_RANGES, + ACCESS_CONTROL_ALLOW_CREDENTIALS, + ACCESS_CONTROL_ALLOW_HEADERS, + ACCESS_CONTROL_ALLOW_METHODS, + ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, + ACCESS_CONTROL_MAX_AGE, + ACCESS_CONTROL_REQUEST_HEADERS, + ACCESS_CONTROL_REQUEST_METHOD, + AGE, + ALLOW, + ALT_SVC, + AUTHORIZATION, + CACHE_CONTROL, + CONNECTION, + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LANGUAGE, + CONTENT_LENGTH, + CONTENT_LOCATION, + CONTENT_RANGE, + CONTENT_SECURITY_POLICY, + CONTENT_SECURITY_POLICY_REPORT_ONLY, + CONTENT_TYPE, + COOKIE, + DNT, + DATE, + ETAG, + EXPECT, + EXPIRES, + FORWARDED, + FROM, + HOST, + IF_MATCH, + IF_MODIFIED_SINCE, + IF_NONE_MATCH, + IF_RANGE, + IF_UNMODIFIED_SINCE, + LAST_MODIFIED, + LINK, + LOCATION, + MAX_FORWARDS, + ORIGIN, + PRAGMA, + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + PUBLIC_KEY_PINS, + PUBLIC_KEY_PINS_REPORT_ONLY, + RANGE, + REFERER, + REFERRER_POLICY, + RETRY_AFTER, + SERVER, + SET_COOKIE, + STRICT_TRANSPORT_SECURITY, + TE, + TRAILER, + TRANSFER_ENCODING, + USER_AGENT, + UPGRADE, + UPGRADE_INSECURE_REQUESTS, + VARY, + VIA, + WARNING, + WWW_AUTHENTICATE, + X_CONTENT_TYPE_OPTIONS, + X_DNS_PREFETCH_CONTROL, + X_FRAME_OPTIONS, + X_XSS_PROTECTION, +]; + +#[test] +fn get_invalid() { + let mut headers = HeaderMap::new(); + headers.insert("foo", "bar".parse().unwrap()); + assert!(headers.get("Evil\r\nKey").is_none()); +} + +#[test] +#[should_panic] +fn insert_invalid() { + let mut headers = HeaderMap::new(); + headers.insert("evil\r\nfoo", "bar".parse().unwrap()); +} diff --git a/third_party/rust/http/tests/header_map_fuzz.rs b/third_party/rust/http/tests/header_map_fuzz.rs new file mode 100644 index 000000000000..b3b65881255e --- /dev/null +++ b/third_party/rust/http/tests/header_map_fuzz.rs @@ -0,0 +1,365 @@ +extern crate http; +extern crate rand; +extern crate quickcheck; + +use http::*; +use http::header::*; + +use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; +use rand::{StdRng, SeedableRng, Rng}; + +use std::collections::HashMap; + +#[test] +fn header_map_fuzz() { + fn prop(fuzz: Fuzz) -> TestResult { + fuzz.run(); + TestResult::from_bool(true) + } + + QuickCheck::new() + .quickcheck(prop as fn(Fuzz) -> TestResult) +} + +#[derive(Debug, Clone)] +struct Fuzz { + // The magic seed that makes the test case reproducible + seed: [usize; 4], + + // Actions to perform + steps: Vec, + + // Number of steps to drop + reduce: usize, +} + +#[derive(Debug)] +struct Weight { + insert: usize, + remove: usize, + append: usize, +} + +#[derive(Debug, Clone)] +struct Step { + action: Action, + expect: AltMap, +} + +#[derive(Debug, Clone)] +enum Action { + Insert { + name: HeaderName, // Name to insert + val: HeaderValue, // Value to insert + old: Option, // Old value + }, + Append { + name: HeaderName, + val: HeaderValue, + ret: bool, + }, + Remove { + name: HeaderName, // Name to remove + val: Option, // Value to get + }, +} + +// An alternate implementation of HeaderMap backed by HashMap +#[derive(Debug, Clone, Default)] +struct AltMap { + map: HashMap>, +} + +impl Fuzz { + fn new(seed: [usize; 4]) -> Fuzz { + // Seed the RNG + let mut rng = StdRng::from_seed(&seed); + + let mut steps = vec![]; + let mut expect = AltMap::default(); + let num = rng.gen_range(5, 500); + + let weight = Weight { + insert: rng.gen_range(1, 10), + remove: rng.gen_range(1, 10), + append: rng.gen_range(1, 10), + }; + + while steps.len() < num { + steps.push(expect.gen_step(&weight, &mut rng)); + } + + Fuzz { + seed: seed, + steps: steps, + reduce: 0, + } + } + + fn run(self) { + // Create a new header map + let mut map = HeaderMap::new(); + + // Number of steps to perform + let take = self.steps.len() - self.reduce; + + for step in self.steps.into_iter().take(take) { + step.action.apply(&mut map); + + step.expect.assert_identical(&map); + } + } +} + +impl Arbitrary for Fuzz { + fn arbitrary(g: &mut G) -> Self { + Fuzz::new(quickcheck::Rng::gen(g)) + } +} + +impl AltMap { + fn gen_step(&mut self, weight: &Weight, rng: &mut StdRng) -> Step { + let action = self.gen_action(weight, rng); + + Step { + action: action, + expect: self.clone(), + } + } + + /// This will also apply the action against `self` + fn gen_action(&mut self, weight: &Weight, rng: &mut StdRng) -> Action { + let sum = weight.insert + + weight.remove + + weight.append; + + let mut num = rng.gen_range(0, sum); + + if num < weight.insert { + return self.gen_insert(rng); + } + + num -= weight.insert; + + if num < weight.remove { + return self.gen_remove(rng); + } + + num -= weight.remove; + + if num < weight.append { + return self.gen_append(rng); + } + + unreachable!(); + } + + fn gen_insert(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(4, rng); + let val = gen_header_value(rng); + let old = self.insert(name.clone(), val.clone()); + + Action::Insert { + name: name, + val: val, + old: old, + } + } + + fn gen_remove(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(-4, rng); + let val = self.remove(&name); + + Action::Remove { + name: name, + val: val, + } + } + + fn gen_append(&mut self, rng: &mut StdRng) -> Action { + let name = self.gen_name(-5, rng); + let val = gen_header_value(rng); + + let vals = self.map.entry(name.clone()) + .or_insert(vec![]); + + let ret = !vals.is_empty(); + vals.push(val.clone()); + + Action::Append { + name: name, + val: val, + ret: ret, + } + } + + /// Negative numbers weigh finding an existing header higher + fn gen_name(&self, weight: i32, rng: &mut StdRng) -> HeaderName { + let mut existing = rng.gen_weighted_bool(weight.abs() as u32); + + if weight < 0 { + existing = !existing; + } + + if existing { + // Existing header + if let Some(name) = self.find_random_name(rng) { + name + } else { + gen_header_name(rng) + } + } else { + gen_header_name(rng) + } + } + + fn find_random_name(&self, rng: &mut StdRng) -> Option { + if self.map.is_empty() { + None + } else { + let n = rng.gen_range(0, self.map.len()); + self.map.keys().nth(n).map(Clone::clone) + } + } + + fn insert(&mut self, name: HeaderName, val: HeaderValue) -> Option { + let old = self.map.insert(name, vec![val]); + old.and_then(|v| v.into_iter().next()) + } + + fn remove(&mut self, name: &HeaderName) -> Option { + self.map.remove(name).and_then(|v| v.into_iter().next()) + } + + fn assert_identical(&self, other: &HeaderMap) { + assert_eq!(self.map.len(), other.keys_len()); + + for (key, val) in &self.map { + // Test get + assert_eq!(other.get(key), val.get(0)); + + // Test get_all + let vals = other.get_all(key); + let actual: Vec<_> = vals.iter().collect(); + assert_eq!(&actual[..], &val[..]); + } + } +} + +impl Action { + fn apply(self, map: &mut HeaderMap) { + match self { + Action::Insert { name, val, old } => { + let actual = map.insert(name, val); + assert_eq!(actual, old); + } + Action::Remove { name, val } => { + // Just to help track the state, load all associated values. + map.get_all(&name).iter().collect::>(); + + let actual = map.remove(&name); + assert_eq!(actual, val); + } + Action::Append { name, val, ret } => { + assert_eq!(ret, map.append(name, val)); + } + } + } +} + +fn gen_header_name(g: &mut StdRng) -> HeaderName { + if g.gen_weighted_bool(2) { + g.choose(&[ + header::ACCEPT, + header::ACCEPT_CHARSET, + header::ACCEPT_ENCODING, + header::ACCEPT_LANGUAGE, + header::ACCEPT_RANGES, + header::ACCESS_CONTROL_ALLOW_CREDENTIALS, + header::ACCESS_CONTROL_ALLOW_HEADERS, + header::ACCESS_CONTROL_ALLOW_METHODS, + header::ACCESS_CONTROL_ALLOW_ORIGIN, + header::ACCESS_CONTROL_EXPOSE_HEADERS, + header::ACCESS_CONTROL_MAX_AGE, + header::ACCESS_CONTROL_REQUEST_HEADERS, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::AGE, + header::ALLOW, + header::ALT_SVC, + header::AUTHORIZATION, + header::CACHE_CONTROL, + header::CONNECTION, + header::CONTENT_DISPOSITION, + header::CONTENT_ENCODING, + header::CONTENT_LANGUAGE, + header::CONTENT_LENGTH, + header::CONTENT_LOCATION, + header::CONTENT_RANGE, + header::CONTENT_SECURITY_POLICY, + header::CONTENT_SECURITY_POLICY_REPORT_ONLY, + header::CONTENT_TYPE, + header::COOKIE, + header::DNT, + header::DATE, + header::ETAG, + header::EXPECT, + header::EXPIRES, + header::FORWARDED, + header::FROM, + header::HOST, + header::IF_MATCH, + header::IF_MODIFIED_SINCE, + header::IF_NONE_MATCH, + header::IF_RANGE, + header::IF_UNMODIFIED_SINCE, + header::LAST_MODIFIED, + header::LINK, + header::LOCATION, + header::MAX_FORWARDS, + header::ORIGIN, + header::PRAGMA, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::PUBLIC_KEY_PINS, + header::PUBLIC_KEY_PINS_REPORT_ONLY, + header::RANGE, + header::REFERER, + header::REFERRER_POLICY, + header::RETRY_AFTER, + header::SERVER, + header::SET_COOKIE, + header::STRICT_TRANSPORT_SECURITY, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::USER_AGENT, + header::UPGRADE, + header::UPGRADE_INSECURE_REQUESTS, + header::VARY, + header::VIA, + header::WARNING, + header::WWW_AUTHENTICATE, + header::X_CONTENT_TYPE_OPTIONS, + header::X_DNS_PREFETCH_CONTROL, + header::X_FRAME_OPTIONS, + header::X_XSS_PROTECTION, + ]).unwrap().clone() + } else { + let value = gen_string(g, 1, 25); + HeaderName::from_bytes(value.as_bytes()).unwrap() + } +} + +fn gen_header_value(g: &mut StdRng) -> HeaderValue { + let value = gen_string(g, 0, 70); + HeaderValue::from_bytes(value.as_bytes()).unwrap() +} + +fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { + let bytes: Vec<_> = (min..max).map(|_| { + // Chars to pick from + g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap().clone() + }).collect(); + + String::from_utf8(bytes).unwrap() +} diff --git a/third_party/rust/http/tests/status_code.rs b/third_party/rust/http/tests/status_code.rs new file mode 100644 index 000000000000..72ba810ca135 --- /dev/null +++ b/third_party/rust/http/tests/status_code.rs @@ -0,0 +1,67 @@ +extern crate http; + +use http::*; + +#[test] +fn from_bytes() { + for ok in &["100", "101", "199", "200", "250", "299", "321", "399", "499", "599"] { + assert!(StatusCode::from_bytes(ok.as_bytes()).is_ok()); + } + + for not_ok in &["0", "00", "10", "40", "99", "000", "010", "099", "600", "610", "999"] { + assert!(StatusCode::from_bytes(not_ok.as_bytes()).is_err()); + } +} + +#[test] +fn equates_with_u16() { + let status = StatusCode::from_u16(200u16).unwrap(); + assert_eq!(200u16, status); + assert_eq!(status, 200u16); +} + +macro_rules! test_round_trip { + ($($num:expr,)+) => { + #[test] + fn roundtrip() { + $( + let status = StatusCode::from_bytes(stringify!($num).as_bytes()).unwrap(); + let expect = $num; + + assert_eq!(u16::from(status), expect); + )+ + } + } +} + +test_round_trip!( + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, + + 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, + 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, + 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, + + 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, + 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, + 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, + 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, + ); diff --git a/third_party/rust/hyper/.cargo-checksum.json b/third_party/rust/hyper/.cargo-checksum.json index 8cb464989b4c..db4df0730a36 100644 --- a/third_party/rust/hyper/.cargo-checksum.json +++ b/third_party/rust/hyper/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"88c441ef43515d199714599d4222c3735df869d3909f5d2d8dcb2f7775784e6a","LICENSE":"df9cfd06d8a44d9a671eadd39ffd97f166481da015a30f45dfd27886209c5922","src/buffer.rs":"bb0334bcb2fa63a8f9b20cfb662a52f76a8556a5a326f5d44e6885d80fbd406d","src/client/mod.rs":"67bef422959f1c8deb1216d9bdc2e578137859ae8ecca3dd54110aa444191024","src/client/pool.rs":"652072fbcc210d551c95695ed71042ae2a80a594d01e17d8e7983626620de815","src/client/proxy.rs":"0fb671fddb1c377646dc86b620b086941d56354659eace4eb387f414c345fe6b","src/client/request.rs":"c9029bbf1c27ad11d78f61e1f1175b6417eecbc3b9053c33d8c08daf61f72570","src/client/response.rs":"25a39147177e838b7e73d073583cc4a1623b7b8d682a1acdbca4a5eb720c9c97","src/error.rs":"3b3d27425118d08b1205fe7a1c91037b6ad5791cb7ccfbf53207388cb4edca1b","src/header/common/accept.rs":"d4203a24788e23fcf1ef081e9d5ab010e95d48b67532417600b6f4ff7b4a103b","src/header/common/accept_charset.rs":"f2664acc80c429c32325b4128feedc12e47357be17550821929f8002c85af214","src/header/common/accept_encoding.rs":"bea61eb5cd40564a01a0cbbd139e8b070fd8b8489c7af08de9541f607e50fbb2","src/header/common/accept_language.rs":"2e0155b85d991a0b917be1e54a9eda019597aeb4bf61cbd841fbe92f186262a3","src/header/common/accept_ranges.rs":"df351621f0753340697b0a0ceab030580c8604d6a3a5c3c3415b9fca55c04330","src/header/common/access_control_allow_credentials.rs":"c68a618d51235be37d12746fad6248c07d6c904106cd0a97be2239efab8ce941","src/header/common/access_control_allow_headers.rs":"4781e3ce4ef2ee4e90a9f775742d55751c5403b0b11539f96c9e5c76d7a3029e","src/header/common/access_control_allow_methods.rs":"0f72c4e70e74f0be8ddae576220b2cea11e2d848512e5ffb329206275908383d","src/header/common/access_control_allow_origin.rs":"933fdc7d147f7706b191ad6943b341887d9260ef7480384deb2563cd31a43cd1","src/header/common/access_control_expose_headers.rs":"f026676e762ac5c5e6ae33543ceba4e484406e89306d0b3873507814675f408a","src/header/common/access_control_max_age.rs":"1c5970cfaf15ab3ab602b215ab119f100343a7f9e4f85a6711ea21a12f369402","src/header/common/access_control_request_headers.rs":"879810240d82544c581b5057808657f9445bf823e19a990e5e88d43d28b88ce6","src/header/common/access_control_request_method.rs":"fd5bc2396dbf0977c616d7b262f2441e60d3e2806c54d0838195cdedd992e7f1","src/header/common/allow.rs":"8b12d0c7f9a76f2aec826ba80097efa784304f50e2e0ce330d8ffd810bb7dd7d","src/header/common/authorization.rs":"e1a6df10a3ee74fbb4ccaa68d0e13d1f93170510a0a0884372d5b29e289795f1","src/header/common/cache_control.rs":"de2c876b78b83ce080007d3413a13e7770963cec49cd0202a8ffb0259185f95f","src/header/common/connection.rs":"5584948f8baae51a480ab6badf23fcb2aa2df6f3db5bd12ba09dcd08d1d01643","src/header/common/content_disposition.rs":"b6be5e65ec73113e045b73a0f69f63204942e25a3b9469f2f59c032c587e111f","src/header/common/content_encoding.rs":"636e07b326036f0d903fe7bce4e49da56dc43ed53d7dd158a906c06bd7466ad2","src/header/common/content_language.rs":"0c42bebf462b570a6dae46580685cb931feaf83b8fd6a0e89a29f7383397f393","src/header/common/content_length.rs":"c792029182c0213d4e2cc90f66210c01f164a4a8b25abadaa74ce51947ad4c76","src/header/common/content_range.rs":"5fab7832b985489b76c698654a4891aaaff6b671b7174dc25c2c4abeeb0d14eb","src/header/common/content_type.rs":"cd6db1ac9159317a467df0feebf66fa0f2e976f81aa339f3e1402110ee16cdad","src/header/common/cookie.rs":"f38e0ece0f4ddf6f5bacd7b1199aab289a5943dd1151c525970837b24fb7e59c","src/header/common/date.rs":"b3516aee3bb26ee5b2a97829ea45d6440512cbd9ef862f9e7f65a39f282e207e","src/header/common/etag.rs":"d7548c554751bb74c5ef53095cd30074e48bb39802190d8caec90621d16c6e9b","src/header/common/expect.rs":"754c5d648086429d6d9c7cebcd86d59703893d6ff904ae5de5217bb71a860496","src/header/common/expires.rs":"351a28af03596aa5ae7dbe97485ef2e717bb5567d8a5ae830e8c1968bbb236ec","src/header/common/from.rs":"05d9f1675c7d3f2caa7082fe44f5e89c3a4104af26ff21b9846443ccde0df12a","src/header/common/host.rs":"f296d9c92c0462a937636190bc3aae8a8f9e3473bd8b7cd5963870fc03686bc2","src/header/common/if_match.rs":"a0919e71b40b51e447d21fce6b9fbe779de84ae228dc4769a237a2aa34cbb201","src/header/common/if_modified_since.rs":"b722b471531740b4e200da3f53182fea6d0b750931664d233674d99f7f92767f","src/header/common/if_none_match.rs":"f810a9fc0161bd293b8ea6f18d84120d7950557c46fb07028e7351c0fbaed0fa","src/header/common/if_range.rs":"bdc77ca3c6b392aaf38c778f69756933f8a59cb946839e801508548aef891153","src/header/common/if_unmodified_since.rs":"9783ab6133cab3b38800c0049338ff697390f0915f6b7d9b81a485f028ae2dbc","src/header/common/last-event-id.rs":"e773661a46c926b2ecd93cd1a1ca5a6c361b1b8d0c9d004c9ba0bb6b96995e4a","src/header/common/last_modified.rs":"26949ae19bb17e660a3c2c967a7e066c1cc79ea987e5c5b8627a211952d18eb0","src/header/common/link.rs":"a32fabe5b796d8a541c7440ad521612a4fba8cf8e5006d48bca44d02e0108a3c","src/header/common/location.rs":"ed3482f4f806e6241d70aabbc5ce409022ab5d85aa33d7c1e8a50385aec31a7a","src/header/common/mod.rs":"c461990cdc26ad38cc0f31e5a92c8f6a84cbf1c887c39d65b6a92b65d08af071","src/header/common/origin.rs":"c5d38d5c47494038badf85154eae5aa529821adbb8d91e04845cd1939a3acb70","src/header/common/pragma.rs":"024f6d401d6a2c363fb81afcf57dd4cfb164a7ce5a5e4dc9f9b7a95d1ea829d1","src/header/common/prefer.rs":"e680be23a4ed754e357d607a3cbdd9e00d2591fd26058f7f43f7757a86be32f5","src/header/common/preference_applied.rs":"13841597fcd9f1049fc515ea0a401975b3a0c4851c6664e9e4afcba987184680","src/header/common/range.rs":"553a3cb2ba7531418ca56f39f20a6f03b7cb07258b8a70a9ff52c224ae410e93","src/header/common/referer.rs":"e40aebce1e21a8a939df1cb331ed6ab84c37a261fde83b788be1f82710bdb2ba","src/header/common/referrer_policy.rs":"a72fd93ae042aaa5ecfe548a12b6ac07dcd8efdc375591f6f851332f4398684c","src/header/common/server.rs":"e3c7963bd341573c513bf1a1a15bbe38a78f5d8029f6783a8d679ce17969cbe0","src/header/common/set_cookie.rs":"ca02d00f2ec18b4059b1979e3f66a4fadcacacb0f9a8c7faf7a2535f7ad0d6e8","src/header/common/strict_transport_security.rs":"16d20d713979143c9a22326bffc29b31d15e51937f150888c5cdf5b844c881bb","src/header/common/transfer_encoding.rs":"0ae112334bba0b5bb64e4a67059209ed5fb8f034cd24966d63cbfff714480df1","src/header/common/upgrade.rs":"caa2777900ebbd1239a322e0f84b745b688d270fac331d86b0979fe0140cfe63","src/header/common/user_agent.rs":"3f8396206a2cad8d925d391321ebc2d7625f18ea2efcb8562958748369f83111","src/header/common/vary.rs":"ea9ca2d338f01b3a60cb7734532e8717a85c93c15eb1f4701c620fee42a5e9f5","src/header/internals/cell.rs":"f9d023d725b5ccc78a9566202292a59d7ef42c27d42a0e5c99460273e687fffe","src/header/internals/item.rs":"05973f19f40deaccf9b363645cc13cf4c702c4d22ddebb5e6af9a14d592e5a24","src/header/internals/mod.rs":"a7584044f256ee1be3ba1542c7ce32263aa9d62700571430b1d723c5cf53f383","src/header/internals/vec_map.rs":"0e2209a731eba7868d362c5e0f0bf74849baf8e293a0253bd6c5f7207f388ee4","src/header/mod.rs":"60f84113fb7a65c9295e0d9467d37c1229025d884f0df50b9a9dc31af5b4cc5f","src/header/parsing.rs":"5cb02c694b2bdd16abf9b6bc6af3c01b143f01e26f60ce2fb69c6b0c0c19017f","src/header/shared/charset.rs":"71087f535a425d7d17cb3de69f1016e6602032b957d49fe121f6e56f1bf26e2f","src/header/shared/encoding.rs":"9c3b39f074bcadab68e158beea5e35d43de0c717971839d2e1ae43468d7bd7f7","src/header/shared/entity.rs":"b70b08c6511f7fd5539e4e42685423bf2f5fb3d5333fe3175e9010b890d0b654","src/header/shared/httpdate.rs":"6401ebff650228ddc457e3d4b008208254c62a478ea98367e87ffe6cbb106992","src/header/shared/mod.rs":"7216081f6253959364d9aa3abf16f17462baab48f16d174078c61c71ad023c0f","src/header/shared/quality_item.rs":"284d1b35af35d2244459f4ade87c1897a949ef047d4e784738635d558ea157b4","src/http/h1.rs":"4026eec0c2f49fc28cdcba67d8133e0473cec1807e49a6d5f3d77fcde83ab20f","src/http/message.rs":"c09939bb38c40d5a8124f5b60b8fc539aa10a11b4a46c1c3f74d097fa8ecc088","src/http/mod.rs":"9112fb2e59070d105fb361afc600f85cd6a5e21d4d007eaa821145e9b7e63f55","src/lib.rs":"0d5f7558dc4995d553835df43bd3412193ffbf2841be0ed4e6454122838904cb","src/method.rs":"f6fbd5f697cb28f073881cca26004ea3e97638a4b0e44c6192a426b9e762e9cc","src/mock.rs":"dbaa5c2c3bd4a160bac72fe09982605b783086ad8f5500cba3caddbbf804c7a2","src/net.rs":"15d6e3c20ca97641cca8bdf80a67296823e79be28b4492b48b0f74411439a105","src/server/listener.rs":"3c3d7c658a5cee2ba0ebb4703ca86d02474dbb074a1f57415cd7331803adee3d","src/server/mod.rs":"f4454713769cbad49b62150915031f4db047f5a09603c5f1b7ea8685eab7814f","src/server/request.rs":"49f0d044ed9173d4e3f434112ba88f113feb4b592e5a2a36d442fa26481969f6","src/server/response.rs":"c8ad551860d8404c12d3c12c5932ec536dc1c8b34f428ba613728c6c9c4979b1","src/status.rs":"6c3af3f4cac43748be938bfc7c2aeabe63f32fef233d7c2a7ea7a933c7c19699","src/uri.rs":"ec3eb7595f7842f67a9449e4dbe2d949e1eb86dd3322ee890c3c116ded1a59d9","src/version.rs":"cd422615a8ef3e9e223b3fd68df475bba59dce1e0503ef879c4d88b51832e8db"},"package":"368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2"} \ No newline at end of file +{"files":{"Cargo.toml":"b018b824dbaa62f439c37ae66925a166a1e74cfb7d044505466881c99c9e43a3","LICENSE":"df9cfd06d8a44d9a671eadd39ffd97f166481da015a30f45dfd27886209c5922","src/body/body.rs":"7b45e29ed498cb2d543ada08d4e3e3f7faff8a96d76cc15d4d61070a79044f6a","src/body/chunk.rs":"e3f55b39e0be9e7a509d329e2aab3db94a7e010e9b6a46e4d0e5bdacb9a64773","src/body/mod.rs":"aaa253d1e4aa6128388c22b6107600af450ff5c2610eeafe53ecbca31ac2d7f7","src/body/payload.rs":"3d44bf7a99b4a51651f0f2518590b89f332b0263e1bba2352547da4ef0be63e1","src/client/conn.rs":"b3486638bf5969dc4aad4ae4ee9ccb86503d106ef22ebfe636c0bdd6ad6cce9c","src/client/connect.rs":"f3c4cef9528e755ece2a0793e233fc76e1e93228b1502e8df853706ff9fc8c21","src/client/dispatch.rs":"7a155b77f5b4dd7735dce1dc9936cba778a1de2c4497964ee927b1d2fbdb1e8a","src/client/dns.rs":"0402907a6941dec149b5ef88072daee907963011275fbc26dcb5e1dad6d003e0","src/client/mod.rs":"98eedcfbbb26f19c6dd180d7ec62fad46f23d485cd87a1502e164effe9d76592","src/client/pool.rs":"40550b1dbb6e46f50da268ae219f1ee1d660f90c3470d8061f0f9bba420942b6","src/client/tests.rs":"424ce58d8ff8e82fdb071cee4653dbd10fd3abd25bdcbb4e5cee3d1711618a45","src/common/buf.rs":"5ff16c21fc0c73577781dee68ef43d38eaefdcfa8e4d0a11bdb5cff663a76e8a","src/common/exec.rs":"5b16a5083be6b77277c1902c62c2a4f3c0b2ae15d1b8a5f40f60c50742bf1332","src/common/io/mod.rs":"6f8e4518df7f24d81fc59b46a2deb61557e8d92700bdc62539fe0f034066fc89","src/common/io/rewind.rs":"205d904384476023969b134609486bbd8c2ee8457d0c7cd486c8424f576577e8","src/common/lazy.rs":"cd802e229b66aa83a4da5e45b0cbe9601bde913ac0479cae9854d75522ac4858","src/common/mod.rs":"4d6e8cc4853428aa689648044aa35a11a1aca1e0e558cf3b2fe13a0ac4bdadca","src/common/never.rs":"d2f6345add7df1a32a6a37179d9abc9c25e633d422abde923201b9c54bf00bd0","src/error.rs":"548be059e6eea566f5a8d9ece8db4bee5b3d2f3de7f86f3a6916494dab4507b2","src/headers.rs":"fc851ba57b10319fc2fce10e6a5718f5e0e26aed2dd8bfcf8fff0cee09250f84","src/lib.rs":"4170f9ff2072497f09f2ab29033afa742c17a2dce59907540b1f3645940c728f","src/mock.rs":"b19d6036cd85e0ee443903533684e79fa64330a3ab73f538d6e8c0589cb8833f","src/proto/h1/conn.rs":"6e24b247b3f3965b9df5bf91a7e01825435bc0db7929c90f148cfc550f487494","src/proto/h1/date.rs":"fbcedfa6b0661ef5b0b39684079ed33cccd6cc8d8fb5bd497e7338664aaf0f73","src/proto/h1/decode.rs":"fbd2b09a46537d88c7cd8d908bdd34b7aacc390086fb044756c8a8ea0913a90f","src/proto/h1/dispatch.rs":"b7c283d1c75b5bc8abcee752ff5d88c385ef25d391484c5976f9f849a87ec756","src/proto/h1/encode.rs":"b20efa2b5fcf2959d7da59b6153a9e30bcdc2d4b218b6c442877e275e0acd6c8","src/proto/h1/io.rs":"2b7c910bce11eafcf5bdea5f06a29a3614ebfc7d7f3b32e4c579317d3a1ff12a","src/proto/h1/mod.rs":"bb8e18175945fe36754532e5ae7af9c1448beb2c3f23a28765ea582cf3288d74","src/proto/h1/role.rs":"3a29c2c1e1e9d3d79880c1734fdb84dd2d63ac86e82e3c63b7ce3db6b52d0c5f","src/proto/h2/client.rs":"13547e1e4922de95b182231e8c4923e2d42a9b364868a0687e53ec91ae6241df","src/proto/h2/mod.rs":"07c0f0d8ef3dcd176f8b3e4a81b30ab5eb23d99b003afeaee1d46557333030b3","src/proto/h2/server.rs":"e5dd69fdf1f15bfe02c0e097aa6139136c2259dd93b8be73107ac6ea84397bf3","src/proto/mod.rs":"1762e5443c8ee6ea7ecb4870ade6e750799d86b4667bb5162f11d60cd87c41a5","src/rt.rs":"df55114f40696f5a1e79c7c818c6feb91d479fc00ac5f1775c1d0fdb30168118","src/server/conn.rs":"e074cf17018d230bde7add76593f897f7bd3f0e313b571d1d4b2bf6eabae12f8","src/server/mod.rs":"5a41404518c9027a56997265ead580672bb10202a1653add15a74b1031c1b985","src/server/tcp.rs":"62e905fa076cc6fc6fff325556e79f0966ec3a72e6479621dda72e11268e9fb7","src/service/mod.rs":"00cc9e24bcf1e3a76b4c2f8d48fab70dc67430fb8705015ce7d6641ac6516b5d","src/service/new_service.rs":"20b2a6c4403ca5a3b395d823ad8bf75fa4af329007424a4ded4f3292e864ea7c","src/service/service.rs":"e6e8edebef5a5b855350f71b1351574f90267f7e6ddae24d6f409ddd6bddb130","src/upgrade.rs":"75ce6ceb7e4b4e2f8bd5a3992327332955ea63aafbb2acd635b559dd4d62c86b"},"package":"c087746de95e20e4dabe86606c3a019964a8fde2d5f386152939063c116c5971"} \ No newline at end of file diff --git a/third_party/rust/hyper/Cargo.toml b/third_party/rust/hyper/Cargo.toml index 0abac1a2f05f..4d898d630442 100644 --- a/third_party/rust/hyper/Cargo.toml +++ b/third_party/rust/hyper/Cargo.toml @@ -12,51 +12,191 @@ [package] name = "hyper" -version = "0.10.13" -authors = ["Sean McArthur ", "Jonathan Reem "] +version = "0.12.7" +authors = ["Sean McArthur "] include = ["Cargo.toml", "LICENSE", "src/**/*"] -description = "A modern HTTP library." -homepage = "http://hyper.rs" +description = "A fast and correct HTTP library." +homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper" readme = "README.md" keywords = ["http", "hyper", "hyperium"] -categories = ["web-programming::http-client", "web-programming::http-server"] +categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] license = "MIT" repository = "https://github.com/hyperium/hyper" -[dependencies.mime] -version = "0.2" +[profile.bench] +codegen-units = 1 +incremental = false -[dependencies.traitobject] -version = "0.1" +[profile.release] +codegen-units = 1 +incremental = false -[dependencies.unicase] -version = "1.0" +[[example]] +name = "client" +path = "examples/client.rs" +required-features = ["runtime"] -[dependencies.language-tags] -version = "0.2" +[[example]] +name = "client_json" +path = "examples/client_json.rs" +required-features = ["runtime"] + +[[example]] +name = "echo" +path = "examples/echo.rs" +required-features = ["runtime"] + +[[example]] +name = "hello" +path = "examples/hello.rs" +required-features = ["runtime"] + +[[example]] +name = "multi_server" +path = "examples/multi_server.rs" +required-features = ["runtime"] + +[[example]] +name = "params" +path = "examples/params.rs" +required-features = ["runtime"] + +[[example]] +name = "send_file" +path = "examples/send_file.rs" +required-features = ["runtime"] + +[[example]] +name = "state" +path = "examples/state.rs" +required-features = ["runtime"] + +[[example]] +name = "upgrades" +path = "examples/upgrades.rs" +required-features = ["runtime"] + +[[example]] +name = "web_api" +path = "examples/web_api.rs" +required-features = ["runtime"] + +[[test]] +name = "client" +path = "tests/client.rs" +required-features = ["runtime"] + +[[test]] +name = "integration" +path = "tests/integration.rs" +required-features = ["runtime"] + +[[test]] +name = "server" +path = "tests/server.rs" +required-features = ["runtime"] + +[[bench]] +name = "end_to_end" +path = "benches/end_to_end.rs" +required-features = ["runtime"] + +[[bench]] +name = "server" +path = "benches/server.rs" +required-features = ["runtime"] +[dependencies.bytes] +version = "0.4.4" + +[dependencies.futures] +version = "0.1.21" + +[dependencies.futures-cpupool] +version = "0.1.6" +optional = true + +[dependencies.h2] +version = "0.1.10" + +[dependencies.http] +version = "0.1.7" [dependencies.httparse] version = "1.0" -[dependencies.typeable] +[dependencies.iovec] version = "0.1" +[dependencies.itoa] +version = "0.4.1" + +[dependencies.log] +version = "0.4" + +[dependencies.net2] +version = "0.2.32" +optional = true + [dependencies.time] version = "0.1" -[dependencies.url] +[dependencies.tokio] +version = "0.1.7" +optional = true + +[dependencies.tokio-executor] +version = "0.1.0" +optional = true + +[dependencies.tokio-io] +version = "0.1" + +[dependencies.tokio-reactor] +version = "0.1" +optional = true + +[dependencies.tokio-tcp] +version = "0.1" +optional = true + +[dependencies.tokio-timer] +version = "0.2" +optional = true + +[dependencies.want] +version = "0.0.6" +[dev-dependencies.futures-timer] +version = "0.1" + +[dev-dependencies.num_cpus] version = "1.0" -[dependencies.log] -version = "0.3" +[dev-dependencies.pretty_env_logger] +version = "0.2.0" -[dependencies.num_cpus] +[dev-dependencies.serde] version = "1.0" -[dependencies.base64] -version = "0.6.0" -[dev-dependencies.env_logger] -version = "0.4" +[dev-dependencies.serde_derive] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.spmc] +version = "0.2" + +[dev-dependencies.tokio-fs] +version = "0.1" + +[dev-dependencies.tokio-mockstream] +version = "1.1.0" + +[dev-dependencies.url] +version = "1.0" [features] +__internal_flaky_tests = [] +default = ["__internal_flaky_tests", "runtime"] nightly = [] +runtime = ["futures-cpupool", "net2", "tokio", "tokio-executor", "tokio-reactor", "tokio-tcp", "tokio-timer"] diff --git a/third_party/rust/hyper/src/body/body.rs b/third_party/rust/hyper/src/body/body.rs new file mode 100644 index 000000000000..0f4a1d02207c --- /dev/null +++ b/third_party/rust/hyper/src/body/body.rs @@ -0,0 +1,470 @@ +use std::borrow::Cow; +use std::fmt; + +use bytes::Bytes; +use futures::sync::{mpsc, oneshot}; +use futures::{Async, Future, Poll, Stream}; +use h2; +use http::HeaderMap; + +use common::Never; +use super::internal::{FullDataArg, FullDataRet}; +use super::{Chunk, Payload}; +use upgrade::OnUpgrade; + +type BodySender = mpsc::Sender>; + +/// A stream of `Chunk`s, used when receiving bodies. +/// +/// A good default `Payload` to use in many applications. +/// +/// Also implements `futures::Stream`, so stream combinators may be used. +#[must_use = "streams do nothing unless polled"] +pub struct Body { + kind: Kind, + /// Keep the extra bits in an `Option>`, so that + /// Body stays small in the common case (no extras needed). + extra: Option>, +} + +enum Kind { + Once(Option), + Chan { + content_length: Option, + abort_rx: oneshot::Receiver<()>, + rx: mpsc::Receiver>, + }, + H2 { + content_length: Option, + recv: h2::RecvStream, + }, + Wrapped(Box> + Send>), +} + +struct Extra { + /// Allow the client to pass a future to delay the `Body` from returning + /// EOF. This allows the `Client` to try to put the idle connection + /// back into the pool before the body is "finished". + /// + /// The reason for this is so that creating a new request after finishing + /// streaming the body of a response could sometimes result in creating + /// a brand new connection, since the pool didn't know about the idle + /// connection yet. + delayed_eof: Option, + on_upgrade: OnUpgrade, +} + +type DelayEofUntil = oneshot::Receiver; + +enum DelayEof { + /// Initial state, stream hasn't seen EOF yet. + NotEof(DelayEofUntil), + /// Transitions to this state once we've seen `poll` try to + /// return EOF (`None`). This future is then polled, and + /// when it completes, the Body finally returns EOF (`None`). + Eof(DelayEofUntil), +} + +/// A sender half used with `Body::channel()`. +/// +/// Useful when wanting to stream chunks from another thread. See +/// [`Body::channel`](Body::channel) for more. +#[must_use = "Sender does nothing unless sent on"] +#[derive(Debug)] +pub struct Sender { + abort_tx: oneshot::Sender<()>, + tx: BodySender, +} + +impl Body { + /// Create an empty `Body` stream. + /// + /// # Example + /// + /// ``` + /// use hyper::{Body, Request}; + /// + /// // create a `GET /` request + /// let get = Request::new(Body::empty()); + /// ``` + #[inline] + pub fn empty() -> Body { + Body::new(Kind::Once(None)) + } + + /// Create a `Body` stream with an associated sender half. + /// + /// Useful when wanting to stream chunks from another thread. + #[inline] + pub fn channel() -> (Sender, Body) { + Self::new_channel(None) + } + + pub(crate) fn new_channel(content_length: Option) -> (Sender, Body) { + let (tx, rx) = mpsc::channel(0); + let (abort_tx, abort_rx) = oneshot::channel(); + + let tx = Sender { + abort_tx: abort_tx, + tx: tx, + }; + let rx = Body::new(Kind::Chan { + content_length, + abort_rx, + rx, + }); + + (tx, rx) + } + + /// Wrap a futures `Stream` in a box inside `Body`. + /// + /// # Example + /// + /// ``` + /// # extern crate futures; + /// # extern crate hyper; + /// # use hyper::Body; + /// # fn main() { + /// let chunks = vec![ + /// "hello", + /// " ", + /// "world", + /// ]; + /// + /// let stream = futures::stream::iter_ok::<_, ::std::io::Error>(chunks); + /// + /// let body = Body::wrap_stream(stream); + /// # } + /// ``` + pub fn wrap_stream(stream: S) -> Body + where + S: Stream + Send + 'static, + S::Error: Into>, + Chunk: From, + { + let mapped = stream.map(Chunk::from).map_err(Into::into); + Body::new(Kind::Wrapped(Box::new(mapped))) + } + + /// Converts this `Body` into a `Future` of a pending HTTP upgrade. + /// + /// See [the `upgrade` module](::upgrade) for more. + pub fn on_upgrade(self) -> OnUpgrade { + self + .extra + .map(|ex| ex.on_upgrade) + .unwrap_or_else(OnUpgrade::none) + } + + fn new(kind: Kind) -> Body { + Body { + kind: kind, + extra: None, + } + } + + pub(crate) fn h2(recv: h2::RecvStream, content_length: Option) -> Self { + Body::new(Kind::H2 { + content_length, + recv, + }) + } + + pub(crate) fn set_on_upgrade(&mut self, upgrade: OnUpgrade) { + debug_assert!(!upgrade.is_none(), "set_on_upgrade with empty upgrade"); + let extra = self.extra_mut(); + debug_assert!(extra.on_upgrade.is_none(), "set_on_upgrade twice"); + extra.on_upgrade = upgrade; + } + + pub(crate) fn delayed_eof(&mut self, fut: DelayEofUntil) { + self.extra_mut().delayed_eof = Some(DelayEof::NotEof(fut)); + } + + fn take_delayed_eof(&mut self) -> Option { + self + .extra + .as_mut() + .and_then(|extra| extra.delayed_eof.take()) + } + + fn extra_mut(&mut self) -> &mut Extra { + self + .extra + .get_or_insert_with(|| Box::new(Extra { + delayed_eof: None, + on_upgrade: OnUpgrade::none(), + })) + } + + fn poll_eof(&mut self) -> Poll, ::Error> { + match self.take_delayed_eof() { + Some(DelayEof::NotEof(mut delay)) => { + match self.poll_inner() { + ok @ Ok(Async::Ready(Some(..))) | + ok @ Ok(Async::NotReady) => { + self.extra_mut().delayed_eof = Some(DelayEof::NotEof(delay)); + ok + }, + Ok(Async::Ready(None)) => match delay.poll() { + Ok(Async::Ready(never)) => match never {}, + Ok(Async::NotReady) => { + self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay)); + Ok(Async::NotReady) + }, + Err(_done) => { + Ok(Async::Ready(None)) + }, + }, + Err(e) => Err(e), + } + }, + Some(DelayEof::Eof(mut delay)) => { + match delay.poll() { + Ok(Async::Ready(never)) => match never {}, + Ok(Async::NotReady) => { + self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay)); + Ok(Async::NotReady) + }, + Err(_done) => { + Ok(Async::Ready(None)) + }, + } + }, + None => self.poll_inner(), + } + } + + fn poll_inner(&mut self) -> Poll, ::Error> { + match self.kind { + Kind::Once(ref mut val) => Ok(Async::Ready(val.take())), + Kind::Chan { + content_length: ref mut len, + ref mut rx, + ref mut abort_rx, + } => { + if let Ok(Async::Ready(())) = abort_rx.poll() { + return Err(::Error::new_body_write("body write aborted")); + } + + match rx.poll().expect("mpsc cannot error") { + Async::Ready(Some(Ok(chunk))) => { + if let Some(ref mut len) = *len { + debug_assert!(*len >= chunk.len() as u64); + *len = *len - chunk.len() as u64; + } + Ok(Async::Ready(Some(chunk))) + } + Async::Ready(Some(Err(err))) => Err(err), + Async::Ready(None) => Ok(Async::Ready(None)), + Async::NotReady => Ok(Async::NotReady), + } + } + Kind::H2 { + recv: ref mut h2, .. + } => h2 + .poll() + .map(|async| { + async.map(|opt| { + opt.map(|bytes| { + let _ = h2.release_capacity().release_capacity(bytes.len()); + Chunk::from(bytes) + }) + }) + }) + .map_err(::Error::new_body), + Kind::Wrapped(ref mut s) => s.poll().map_err(::Error::new_body), + } + } +} + +impl Default for Body { + /// Returns [`Body::empty()`](Body::empty). + #[inline] + fn default() -> Body { + Body::empty() + } +} + +impl Payload for Body { + type Data = Chunk; + type Error = ::Error; + + fn poll_data(&mut self) -> Poll, Self::Error> { + self.poll_eof() + } + + fn poll_trailers(&mut self) -> Poll, Self::Error> { + match self.kind { + Kind::H2 { + recv: ref mut h2, .. + } => h2.poll_trailers().map_err(::Error::new_h2), + _ => Ok(Async::Ready(None)), + } + } + + fn is_end_stream(&self) -> bool { + match self.kind { + Kind::Once(ref val) => val.is_none(), + Kind::Chan { content_length, .. } => content_length == Some(0), + Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), + Kind::Wrapped(..) => false, + } + } + + fn content_length(&self) -> Option { + match self.kind { + Kind::Once(Some(ref val)) => Some(val.len() as u64), + Kind::Once(None) => Some(0), + Kind::Wrapped(..) => None, + Kind::Chan { content_length, .. } | Kind::H2 { content_length, .. } => content_length, + } + } + + // We can improve the performance of `Body` when we know it is a Once kind. + #[doc(hidden)] + fn __hyper_full_data(&mut self, _: FullDataArg) -> FullDataRet { + match self.kind { + Kind::Once(ref mut val) => FullDataRet(val.take()), + _ => FullDataRet(None), + } + } +} + +impl Stream for Body { + type Item = Chunk; + type Error = ::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.poll_data() + } +} + +impl fmt::Debug for Body { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Body").finish() + } +} + +impl Sender { + /// Check to see if this `Sender` can send more data. + pub fn poll_ready(&mut self) -> Poll<(), ::Error> { + match self.abort_tx.poll_cancel() { + Ok(Async::Ready(())) | Err(_) => return Err(::Error::new_closed()), + Ok(Async::NotReady) => (), + } + + self.tx.poll_ready().map_err(|_| ::Error::new_closed()) + } + + /// Sends data on this channel. + /// + /// This should be called after `poll_ready` indicated the channel + /// could accept another `Chunk`. + /// + /// Returns `Err(Chunk)` if the channel could not (currently) accept + /// another `Chunk`. + pub fn send_data(&mut self, chunk: Chunk) -> Result<(), Chunk> { + self.tx + .try_send(Ok(chunk)) + .map_err(|err| err.into_inner().expect("just sent Ok")) + } + + /// Aborts the body in an abnormal fashion. + pub fn abort(self) { + let _ = self.abort_tx.send(()); + } + + pub(crate) fn send_error(&mut self, err: ::Error) { + let _ = self.tx.try_send(Err(err)); + } +} + +impl From for Body { + #[inline] + fn from(chunk: Chunk) -> Body { + if chunk.is_empty() { + Body::empty() + } else { + Body::new(Kind::Once(Some(chunk))) + } + } +} + +impl + From> + Send + 'static>> + for Body +{ + #[inline] + fn from( + stream: Box< + Stream> + Send + 'static, + >, + ) -> Body { + Body::new(Kind::Wrapped(stream)) + } +} + +impl From for Body { + #[inline] + fn from(bytes: Bytes) -> Body { + Body::from(Chunk::from(bytes)) + } +} + +impl From> for Body { + #[inline] + fn from(vec: Vec) -> Body { + Body::from(Chunk::from(vec)) + } +} + +impl From<&'static [u8]> for Body { + #[inline] + fn from(slice: &'static [u8]) -> Body { + Body::from(Chunk::from(slice)) + } +} + +impl From> for Body { + #[inline] + fn from(cow: Cow<'static, [u8]>) -> Body { + match cow { + Cow::Borrowed(b) => Body::from(b), + Cow::Owned(o) => Body::from(o), + } + } +} + +impl From for Body { + #[inline] + fn from(s: String) -> Body { + Body::from(Chunk::from(s.into_bytes())) + } +} + +impl From<&'static str> for Body { + #[inline] + fn from(slice: &'static str) -> Body { + Body::from(Chunk::from(slice.as_bytes())) + } +} + +impl From> for Body { + #[inline] + fn from(cow: Cow<'static, str>) -> Body { + match cow { + Cow::Borrowed(b) => Body::from(b), + Cow::Owned(o) => Body::from(o), + } + } +} + +#[test] +fn test_body_stream_concat() { + let body = Body::from("hello world"); + + let total = body.concat2().wait().unwrap(); + assert_eq!(total.as_ref(), b"hello world"); +} diff --git a/third_party/rust/hyper/src/body/chunk.rs b/third_party/rust/hyper/src/body/chunk.rs new file mode 100644 index 000000000000..7cf7237451b8 --- /dev/null +++ b/third_party/rust/hyper/src/body/chunk.rs @@ -0,0 +1,186 @@ +use std::fmt; + +use bytes::{Buf, Bytes}; + +/// A piece of a message body. +/// +/// These are returned by [`Body`](::Body). It is an efficient buffer type. +/// +/// A `Chunk` can be easily created by many of Rust's standard types that +/// represent a collection of bytes, using `Chunk::from`. +pub struct Chunk { + /// The buffer of bytes making up this body. + bytes: Bytes, +} + +// An unexported type to prevent locking `Chunk::into_iter()` to `Bytes::into_iter()`. +#[derive(Debug)] +pub struct IntoIter { + inner: ::IntoIter, +} + + +impl Chunk { + /// Converts this `Chunk` directly into the `Bytes` type without copies. + /// + /// This is simply an inherent alias for `Bytes::from(chunk)`, which exists, + /// but doesn't appear in rustdocs. + #[inline] + pub fn into_bytes(self) -> Bytes { + self.into() + } +} + +impl Buf for Chunk { + #[inline] + fn remaining(&self) -> usize { + //perf: Bytes::len() isn't inline yet, + //so it's slightly slower than checking + //the length of the slice. + self.bytes().len() + } + + #[inline] + fn bytes(&self) -> &[u8] { + &self.bytes + } + + #[inline] + fn advance(&mut self, cnt: usize) { + self.bytes.advance(cnt); + } +} + +impl From> for Chunk { + #[inline] + fn from(v: Vec) -> Chunk { + Chunk::from(Bytes::from(v)) + } +} + +impl From<&'static [u8]> for Chunk { + #[inline] + fn from(slice: &'static [u8]) -> Chunk { + Chunk::from(Bytes::from_static(slice)) + } +} + +impl From for Chunk { + #[inline] + fn from(s: String) -> Chunk { + s.into_bytes().into() + } +} + +impl From<&'static str> for Chunk { + #[inline] + fn from(slice: &'static str) -> Chunk { + slice.as_bytes().into() + } +} + +impl From for Chunk { + #[inline] + fn from(bytes: Bytes) -> Chunk { + Chunk { + bytes: bytes, + } + } +} + +impl From for Bytes { + #[inline] + fn from(chunk: Chunk) -> Bytes { + chunk.bytes + } +} + +impl ::std::ops::Deref for Chunk { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &Self::Target { + self.as_ref() + } +} + +impl AsRef<[u8]> for Chunk { + #[inline] + fn as_ref(&self) -> &[u8] { + &self.bytes + } +} + +impl fmt::Debug for Chunk { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_ref(), f) + } +} + +impl Default for Chunk { + #[inline] + fn default() -> Chunk { + Chunk::from(Bytes::new()) + } +} + +impl IntoIterator for Chunk { + type Item = u8; + type IntoIter = IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + IntoIter { + inner: self.bytes.into_iter(), + } + } +} + +impl Extend for Chunk { + #[inline] + fn extend(&mut self, iter: T) where T: IntoIterator { + self.bytes.extend(iter) + } +} + +impl Iterator for IntoIter { + type Item = u8; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IntoIter {} + +#[cfg(test)] +mod tests { + #[cfg(feature = "nightly")] + use test::Bencher; + + #[cfg(feature = "nightly")] + #[bench] + fn bench_chunk_static_buf(b: &mut Bencher) { + use bytes::BufMut; + + let s = "Hello, World!"; + b.bytes = s.len() as u64; + + let mut dst = Vec::with_capacity(128); + + b.iter(|| { + let chunk = ::Chunk::from(s); + dst.put(chunk); + ::test::black_box(&dst); + unsafe { dst.set_len(0); } + }) + } +} + diff --git a/third_party/rust/hyper/src/body/mod.rs b/third_party/rust/hyper/src/body/mod.rs new file mode 100644 index 000000000000..951fa98d8953 --- /dev/null +++ b/third_party/rust/hyper/src/body/mod.rs @@ -0,0 +1,49 @@ +//! Streaming bodies for Requests and Responses +//! +//! For both [Clients](::client) and [Servers](::server), requests and +//! responses use streaming bodies, instead of complete buffering. This +//! allows applications to not use memory they don't need, and allows exerting +//! back-pressure on connections by only reading when asked. +//! +//! There are two pieces to this in hyper: +//! +//! - The [`Payload`](Payload) trait the describes all possible bodies. hyper +//! allows any body type that implements `Payload`, allowing applications to +//! have fine-grained control over their streaming. +//! - The [`Body`](Body) concrete type, which is an implementation of `Payload`, +//! and returned by hyper as a "receive stream" (so, for server requests and +//! client responses). It is also a decent default implementation if you don't +//! have very custom needs of your send streams. +pub use self::body::{Body, Sender}; +pub use self::chunk::Chunk; +pub use self::payload::Payload; + +mod body; +mod chunk; +mod payload; + +// The full_data API is not stable, so these types are to try to prevent +// users from being able to: +// +// - Implment `__hyper_full_data` on their own Payloads. +// - Call `__hyper_full_data` on any Payload. +// +// That's because to implement it, they need to name these types, and +// they can't because they aren't exported. And to call it, they would +// need to create one of these values, which they also can't. +pub(crate) mod internal { + #[allow(missing_debug_implementations)] + pub struct FullDataArg(pub(crate) ()); + #[allow(missing_debug_implementations)] + pub struct FullDataRet(pub(crate) Option); +} + +fn _assert_send_sync() { + fn _assert_send() {} + fn _assert_sync() {} + + _assert_send::(); + _assert_send::(); + _assert_sync::(); +} + diff --git a/third_party/rust/hyper/src/body/payload.rs b/third_party/rust/hyper/src/body/payload.rs new file mode 100644 index 000000000000..9003261689be --- /dev/null +++ b/third_party/rust/hyper/src/body/payload.rs @@ -0,0 +1,97 @@ +use bytes::Buf; +use futures::{Async, Poll}; +use http::HeaderMap; + +use super::internal::{FullDataArg, FullDataRet}; + +/// This trait represents a streaming body of a `Request` or `Response`. +/// +/// The built-in implementation of this trait is [`Body`](Body), in case you +/// don't need to customize a send stream for your own application. +pub trait Payload: Send + 'static { + /// A buffer of bytes representing a single chunk of a body. + type Data: Buf + Send; + + /// The error type of this stream. + type Error: Into>; + + /// Poll for a `Data` buffer. + /// + /// Similar to `Stream::poll_next`, this yields `Some(Data)` until + /// the body ends, when it yields `None`. + fn poll_data(&mut self) -> Poll, Self::Error>; + + /// Poll for an optional **single** `HeaderMap` of trailers. + /// + /// This should **only** be called after `poll_data` has ended. + /// + /// Note: Trailers aren't currently used for HTTP/1, only for HTTP/2. + fn poll_trailers(&mut self) -> Poll, Self::Error> { + Ok(Async::Ready(None)) + } + + /// A hint that the `Body` is complete, and doesn't need to be polled more. + /// + /// This can be useful to determine if the there is any body or trailers + /// without having to poll. An empty `Body` could return `true` and hyper + /// would be able to know that only the headers need to be sent. Or, it can + /// also be checked after each `poll_data` call, to allow hyper to try to + /// end the underlying stream with the last chunk, instead of needing to + /// send an extra `DATA` frame just to mark the stream as finished. + /// + /// As a hint, it is used to try to optimize, and thus is OK for a default + /// implementation to return `false`. + fn is_end_stream(&self) -> bool { + false + } + + /// Return a length of the total bytes that will be streamed, if known. + /// + /// If an exact size of bytes is known, this would allow hyper to send a + /// `Content-Length` header automatically, not needing to fall back to + /// `Transfer-Encoding: chunked`. + /// + /// This does not need to be kept updated after polls, it will only be + /// called once to create the headers. + fn content_length(&self) -> Option { + None + } + + // This API is unstable, and is impossible to use outside of hyper. Some + // form of it may become stable in a later version. + // + // The only thing a user *could* do is reference the method, but DON'T + // DO THAT! :) + #[doc(hidden)] + fn __hyper_full_data(&mut self, FullDataArg) -> FullDataRet { + FullDataRet(None) + } +} + +impl Payload for Box { + type Data = E::Data; + type Error = E::Error; + + fn poll_data(&mut self) -> Poll, Self::Error> { + (**self).poll_data() + } + + fn poll_trailers(&mut self) -> Poll, Self::Error> { + (**self).poll_trailers() + } + + fn is_end_stream(&self) -> bool { + (**self).is_end_stream() + } + + fn content_length(&self) -> Option { + (**self).content_length() + } + + #[doc(hidden)] + fn __hyper_full_data(&mut self, arg: FullDataArg) -> FullDataRet { + (**self).__hyper_full_data(arg) + } +} + + diff --git a/third_party/rust/hyper/src/buffer.rs b/third_party/rust/hyper/src/buffer.rs deleted file mode 100644 index 32d009bc8292..000000000000 --- a/third_party/rust/hyper/src/buffer.rs +++ /dev/null @@ -1,197 +0,0 @@ -use std::cmp; -use std::io::{self, Read, BufRead}; - -pub struct BufReader { - inner: R, - buf: Vec, - pos: usize, - cap: usize, -} - -const INIT_BUFFER_SIZE: usize = 4096; -pub const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; - -impl BufReader { - #[inline] - pub fn new(rdr: R) -> BufReader { - BufReader::with_capacity(rdr, INIT_BUFFER_SIZE) - } - - #[inline] - pub fn from_parts(rdr: R, buf: Vec, pos: usize, cap: usize) -> BufReader { - BufReader { - inner: rdr, - buf: buf, - pos: pos, - cap: cap, - } - } - - #[inline] - pub fn with_capacity(rdr: R, cap: usize) -> BufReader { - BufReader { - inner: rdr, - buf: vec![0; cap], - pos: 0, - cap: 0, - } - } - - #[inline] - pub fn get_ref(&self) -> &R { &self.inner } - - #[inline] - pub fn get_mut(&mut self) -> &mut R { &mut self.inner } - - #[inline] - pub fn get_buf(&self) -> &[u8] { - if self.pos < self.cap { - trace!("get_buf [u8; {}][{}..{}]", self.buf.len(), self.pos, self.cap); - &self.buf[self.pos..self.cap] - } else { - trace!("get_buf []"); - &[] - } - } - - /// Extracts the buffer from this reader. Return the current cursor position - /// and the position of the last valid byte. - /// - /// This operation does not copy the buffer. Instead, it directly returns - /// the internal buffer. As a result, this reader will no longer have any - /// buffered contents and any subsequent read from this reader will not - /// include the returned buffered contents. - #[inline] - pub fn take_buf(&mut self) -> (Vec, usize, usize) { - let (pos, cap) = (self.pos, self.cap); - self.pos = 0; - self.cap = 0; - - let mut output = vec![0; INIT_BUFFER_SIZE]; - ::std::mem::swap(&mut self.buf, &mut output); - (output, pos, cap) - } - - #[inline] - pub fn into_inner(self) -> R { self.inner } - - #[inline] - pub fn into_parts(self) -> (R, Vec, usize, usize) { - (self.inner, self.buf, self.pos, self.cap) - } - - #[inline] - pub fn read_into_buf(&mut self) -> io::Result { - self.maybe_reserve(); - let v = &mut self.buf; - trace!("read_into_buf buf[{}..{}]", self.cap, v.len()); - if self.cap < v.capacity() { - let nread = try!(self.inner.read(&mut v[self.cap..])); - self.cap += nread; - Ok(nread) - } else { - trace!("read_into_buf at full capacity"); - Ok(0) - } - } - - #[inline] - fn maybe_reserve(&mut self) { - let cap = self.buf.capacity(); - if self.cap == cap && cap < MAX_BUFFER_SIZE { - self.buf.reserve(cmp::min(cap * 4, MAX_BUFFER_SIZE) - cap); - let new = self.buf.capacity() - self.buf.len(); - trace!("reserved {}", new); - unsafe { grow_zerofill(&mut self.buf, new) } - } - } -} - -#[inline] -unsafe fn grow_zerofill(buf: &mut Vec, additional: usize) { - use std::ptr; - let len = buf.len(); - buf.set_len(len + additional); - ptr::write_bytes(buf.as_mut_ptr().offset(len as isize), 0, additional); -} - -impl Read for BufReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - if self.cap == self.pos && buf.len() >= self.buf.len() { - return self.inner.read(buf); - } - let nread = { - let mut rem = try!(self.fill_buf()); - try!(rem.read(buf)) - }; - self.consume(nread); - Ok(nread) - } -} - -impl BufRead for BufReader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - if self.pos == self.cap { - self.cap = try!(self.inner.read(&mut self.buf)); - self.pos = 0; - } - Ok(&self.buf[self.pos..self.cap]) - } - - #[inline] - fn consume(&mut self, amt: usize) { - self.pos = cmp::min(self.pos + amt, self.cap); - if self.pos == self.cap { - self.pos = 0; - self.cap = 0; - } - } -} - -#[cfg(test)] -mod tests { - - use std::io::{self, Read, BufRead}; - use super::BufReader; - - struct SlowRead(u8); - - impl Read for SlowRead { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let state = self.0; - self.0 += 1; - (&match state % 3 { - 0 => b"foo", - 1 => b"bar", - _ => b"baz", - }[..]).read(buf) - } - } - - #[test] - fn test_consume_and_get_buf() { - let mut rdr = BufReader::new(SlowRead(0)); - rdr.read_into_buf().unwrap(); - rdr.consume(1); - assert_eq!(rdr.get_buf(), b"oo"); - rdr.read_into_buf().unwrap(); - rdr.read_into_buf().unwrap(); - assert_eq!(rdr.get_buf(), b"oobarbaz"); - rdr.consume(5); - assert_eq!(rdr.get_buf(), b"baz"); - rdr.consume(3); - assert_eq!(rdr.get_buf(), b""); - assert_eq!(rdr.pos, 0); - assert_eq!(rdr.cap, 0); - } - - #[test] - fn test_resize() { - let raw = b"hello world"; - let mut rdr = BufReader::with_capacity(&raw[..], 5); - rdr.read_into_buf().unwrap(); - assert_eq!(rdr.get_buf(), b"hello"); - rdr.read_into_buf().unwrap(); - assert_eq!(rdr.get_buf(), b"hello world"); - } -} diff --git a/third_party/rust/hyper/src/client/conn.rs b/third_party/rust/hyper/src/client/conn.rs new file mode 100644 index 000000000000..d030c27a1a42 --- /dev/null +++ b/third_party/rust/hyper/src/client/conn.rs @@ -0,0 +1,592 @@ +//! Lower-level client connection API. +//! +//! The types in this module are to provide a lower-level API based around a +//! single connection. Connecting to a host, pooling connections, and the like +//! are not handled at this level. This module provides the building blocks to +//! customize those things externally. +//! +//! If don't have need to manage connections yourself, consider using the +//! higher-level [Client](super) API. +use std::fmt; +use std::marker::PhantomData; +use std::mem; + +use bytes::Bytes; +use futures::{Async, Future, Poll}; +use futures::future::{self, Either}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use body::Payload; +use common::Exec; +use upgrade::Upgraded; +use proto; +use super::dispatch; +use {Body, Request, Response}; + +type Http1Dispatcher = proto::dispatch::Dispatcher< + proto::dispatch::Client, + B, + T, + R, +>; +type ConnEither = Either< + Http1Dispatcher, + proto::h2::Client, +>; + +/// Returns a `Handshake` future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +pub fn handshake(io: T) -> Handshake +where + T: AsyncRead + AsyncWrite + Send + 'static, +{ + Builder::new() + .handshake(io) +} + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::Sender, Response>, +} + + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{ + inner: Option>, +} + + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a `Handshake` future. +#[derive(Clone, Debug)] +pub struct Builder { + exec: Exec, + h1_writev: bool, + h1_title_case_headers: bool, + http2: bool, +} + +/// A future setting up HTTP over an IO object. +/// +/// If successful, yields a `(SendRequest, Connection)` pair. +#[must_use = "futures do nothing unless polled"] +pub struct Handshake { + builder: Builder, + io: Option, + _marker: PhantomData, +} + +/// A future returned by `SendRequest::send_request`. +/// +/// Yields a `Response` if successful. +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + // for now, a Box is used to hide away the internal `B` + // that can be returned if canceled + inner: Box, Error=::Error> + Send>, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + +// ========== internal client api + +/// A `Future` for when `SendRequest::poll_ready()` is ready. +#[must_use = "futures do nothing unless polled"] +pub(super) struct WhenReady { + tx: Option>, +} + +// A `SendRequest` that can be cloned to send HTTP2 requests. +// private for now, probably not a great idea of a type... +#[must_use = "futures do nothing unless polled"] +pub(super) struct Http2SendRequest { + dispatch: dispatch::UnboundedSender, Response>, +} + +// ===== impl SendRequest + +impl SendRequest +{ + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self) -> Poll<(), ::Error> { + self.dispatch.poll_ready() + } + + pub(super) fn when_ready(self) -> WhenReady { + WhenReady { + tx: Some(self), + } + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } + + pub(super) fn into_http2(self) -> Http2SendRequest { + Http2SendRequest { + dispatch: self.dispatch.unbound(), + } + } +} + +impl SendRequest +where + B: Payload + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + /// + /// # Example + /// + /// ``` + /// # extern crate futures; + /// # extern crate hyper; + /// # extern crate http; + /// # use http::header::HOST; + /// # use hyper::client::conn::SendRequest; + /// # use hyper::Body; + /// use futures::Future; + /// use hyper::Request; + /// + /// # fn doc(mut tx: SendRequest) { + /// // build a Request + /// let req = Request::builder() + /// .uri("/foo/bar") + /// .header(HOST, "hyper.rs") + /// .body(Body::empty()) + /// .unwrap(); + /// + /// // send it and get a future back + /// let fut = tx.send_request(req) + /// .map(|res| { + /// // got the Response + /// assert!(res.status().is_success()); + /// }); + /// # drop(fut); + /// # } + /// # fn main() {} + /// ``` + pub fn send_request(&mut self, req: Request) -> ResponseFuture { + let inner = match self.dispatch.send(req) { + Ok(rx) => { + Either::A(rx.then(move |res| { + match res { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + }, + Err(_req) => { + debug!("connection was not ready"); + let err = ::Error::new_canceled(Some("connection was not ready")); + Either::B(future::err(err)) + } + }; + + ResponseFuture { + inner: Box::new(inner), + } + } + + //TODO: replace with `impl Future` when stable + pub(crate) fn send_request_retryable(&mut self, req: Request) -> Box, Error=(::Error, Option>)> + Send> + where + B: Send, + { + let inner = match self.dispatch.try_send(req) { + Ok(rx) => { + Either::A(rx.then(move |res| { + match res { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + }, + Err(req) => { + debug!("connection was not ready"); + let err = ::Error::new_canceled(Some("connection was not ready")); + Either::B(future::err((err, Some(req)))) + } + }; + Box::new(inner) + } +} + +/* TODO(0.12.0): when we change from tokio-service to tower. +impl Service for SendRequest { + type Request = Request; + type Response = Response; + type Error = ::Error; + type Future = ResponseFuture; + + fn call(&self, req: Self::Request) -> Self::Future { + + } +} +*/ + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SendRequest") + .finish() + } +} + +// ===== impl Http2SendRequest + +impl Http2SendRequest { + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } +} + +impl Http2SendRequest +where + B: Payload + 'static, +{ + //TODO: replace with `impl Future` when stable + pub(super) fn send_request_retryable(&mut self, req: Request) -> Box, Error=(::Error, Option>)> + Send> + where + B: Send, + { + let inner = match self.dispatch.try_send(req) { + Ok(rx) => { + Either::A(rx.then(move |res| { + match res { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + }, + Err(req) => { + debug!("connection was not ready"); + let err = ::Error::new_canceled(Some("connection was not ready")); + Either::B(future::err((err, Some(req)))) + } + }; + Box::new(inner) + } +} + +impl fmt::Debug for Http2SendRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Http2SendRequest") + .finish() + } +} + +impl Clone for Http2SendRequest { + fn clone(&self) -> Self { + Http2SendRequest { + dispatch: self.dispatch.clone(), + } + } +} + +// ===== impl Connection + +impl Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{ + /// Return the inner IO object, and additional information. + /// + /// Only works for HTTP/1 connections. HTTP/2 connections will panic. + pub fn into_parts(self) -> Parts { + let (io, read_buf, _) = match self.inner.expect("already upgraded") { + Either::A(h1) => h1.into_inner(), + Either::B(_h2) => { + panic!("http2 cannot into_inner"); + } + }; + + Parts { + io: io, + read_buf: read_buf, + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actally shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> { + match self.inner.as_mut().expect("already upgraded") { + &mut Either::A(ref mut h1) => { + h1.poll_without_shutdown() + }, + &mut Either::B(ref mut h2) => { + h2.poll().map(|x| x.map(|_| ())) + } + } + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + match try_ready!(self.inner.poll()) { + Some(proto::Dispatched::Shutdown) | + None => { + Ok(Async::Ready(())) + }, + Some(proto::Dispatched::Upgrade(pending)) => { + let h1 = match mem::replace(&mut self.inner, None) { + Some(Either::A(h1)) => h1, + _ => unreachable!("Upgrade expects h1"), + }; + + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(Box::new(io), buf)); + Ok(Async::Ready(())) + } + } + } +} + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Payload + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Connection") + .finish() + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new() -> Builder { + Builder { + exec: Exec::Default, + h1_writev: true, + h1_title_case_headers: false, + http2: false, + } + } + + pub(super) fn exec(&mut self, exec: Exec) -> &mut Builder { + self.exec = exec; + self + } + + pub(super) fn h1_writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_writev = enabled; + self + } + + pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { + self.h1_title_case_headers = enabled; + self + } + + /// Sets whether HTTP2 is required. + /// + /// Default is false. + pub fn http2_only(&mut self, enabled: bool) -> &mut Builder { + self.http2 = enabled; + self + } + + /// Constructs a connection with the configured options and IO. + #[inline] + pub fn handshake(&self, io: T) -> Handshake + where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, + { + Handshake { + builder: self.clone(), + io: Some(io), + _marker: PhantomData, + } + } +} + +// ===== impl Handshake + +impl Future for Handshake +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{ + type Item = (SendRequest, Connection); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + let io = self.io.take().expect("polled more than once"); + let (tx, rx) = dispatch::channel(); + let either = if !self.builder.http2 { + let mut conn = proto::Conn::new(io); + if !self.builder.h1_writev { + conn.set_write_strategy_flatten(); + } + if self.builder.h1_title_case_headers { + conn.set_title_case_headers(); + } + let cd = proto::h1::dispatch::Client::new(rx); + let dispatch = proto::h1::Dispatcher::new(cd, conn); + Either::A(dispatch) + } else { + let h2 = proto::h2::Client::new(io, rx, self.builder.exec.clone()); + Either::B(h2) + }; + + Ok(Async::Ready(( + SendRequest { + dispatch: tx, + }, + Connection { + inner: Some(either), + }, + ))) + } +} + +impl fmt::Debug for Handshake { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Handshake") + .finish() + } +} + +// ===== impl ResponseFuture + +impl Future for ResponseFuture { + type Item = Response; + type Error = ::Error; + + #[inline] + fn poll(&mut self) -> Poll { + self.inner.poll() + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ResponseFuture") + .finish() + } +} + +// ===== impl WhenReady + +impl Future for WhenReady { + type Item = SendRequest; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + let mut tx = self.tx.take().expect("polled after complete"); + match tx.poll_ready()? { + Async::Ready(()) => Ok(Async::Ready(tx)), + Async::NotReady => { + self.tx = Some(tx); + Ok(Async::NotReady) + } + } + } +} + +// assert trait markers + +trait AssertSend: Send {} +trait AssertSendSync: Send + Sync {} + + +#[doc(hidden)] +impl AssertSendSync for SendRequest {} + +#[doc(hidden)] +impl AssertSend for Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{} + +#[doc(hidden)] +impl AssertSendSync for Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, + B::Data: Send + Sync + 'static, +{} + +#[doc(hidden)] +impl AssertSendSync for Builder {} + +#[doc(hidden)] +impl AssertSend for ResponseFuture {} + diff --git a/third_party/rust/hyper/src/client/connect.rs b/third_party/rust/hyper/src/client/connect.rs new file mode 100644 index 000000000000..8e0559440b41 --- /dev/null +++ b/third_party/rust/hyper/src/client/connect.rs @@ -0,0 +1,1082 @@ +//! The `Connect` trait, and supporting types. +//! +//! This module contains: +//! +//! - A default [`HttpConnector`](HttpConnector) that does DNS resolution and +//! establishes connections over TCP. +//! - The [`Connect`](Connect) trait and related types to build custom connectors. +use std::error::Error as StdError; +use std::mem; + +use bytes::{BufMut, BytesMut}; +use futures::Future; +use http::{uri, Uri}; +use tokio_io::{AsyncRead, AsyncWrite}; + +#[cfg(feature = "runtime")] pub use self::http::HttpConnector; + +/// Connect to a destination, returning an IO transport. +/// +/// A connector receives a [`Destination`](Destination) describing how a +/// connection should be estabilished, and returns a `Future` of the +/// ready connection. +pub trait Connect: Send + Sync { + /// The connected IO Stream. + type Transport: AsyncRead + AsyncWrite + Send + 'static; + /// An error occured when trying to connect. + type Error: Into>; + /// A Future that will resolve to the connected Transport. + type Future: Future + Send; + /// Connect to a destination. + fn connect(&self, dst: Destination) -> Self::Future; +} + +/// A set of properties to describe where and how to try to connect. +#[derive(Clone, Debug)] +pub struct Destination { + //pub(super) alpn: Alpn, + pub(super) uri: Uri, +} + +/// Extra information about the connected transport. +/// +/// This can be used to inform recipients about things like if ALPN +/// was used, or if connected to an HTTP proxy. +#[derive(Debug)] +pub struct Connected { + //alpn: Alpn, + pub(super) is_proxied: bool, +} + +/*TODO: when HTTP1 Upgrades to H2 are added, this will be needed +#[derive(Debug)] +pub(super) enum Alpn { + Http1, + //H2, + //Http1OrH2 +} +*/ + +impl Destination { + /// Get the protocol scheme. + #[inline] + pub fn scheme(&self) -> &str { + self.uri + .scheme_part() + .map(|s| s.as_str()) + .unwrap_or("") + } + + /// Get the hostname. + #[inline] + pub fn host(&self) -> &str { + self.uri + .host() + .unwrap_or("") + } + + /// Get the port, if specified. + #[inline] + pub fn port(&self) -> Option { + self.uri.port() + } + + /// Update the scheme of this destination. + /// + /// # Example + /// + /// ```rust + /// # use hyper::client::connect::Destination; + /// # fn with_dst(mut dst: Destination) { + /// // let mut dst = some_destination... + /// // Change from "http://"... + /// assert_eq!(dst.scheme(), "http"); + /// + /// // to "ws://"... + /// dst.set_scheme("ws"); + /// assert_eq!(dst.scheme(), "ws"); + /// # } + /// ``` + /// + /// # Error + /// + /// Returns an error if the string is not a valid scheme. + pub fn set_scheme(&mut self, scheme: &str) -> ::Result<()> { + let scheme = scheme.parse().map_err(::error::Parse::from)?; + self.update_uri(move |parts| { + parts.scheme = Some(scheme); + }) + } + + /// Update the host of this destination. + /// + /// # Example + /// + /// ```rust + /// # use hyper::client::connect::Destination; + /// # fn with_dst(mut dst: Destination) { + /// // let mut dst = some_destination... + /// // Change from "hyper.rs"... + /// assert_eq!(dst.host(), "hyper.rs"); + /// + /// // to "some.proxy"... + /// dst.set_host("some.proxy"); + /// assert_eq!(dst.host(), "some.proxy"); + /// # } + /// ``` + /// + /// # Error + /// + /// Returns an error if the string is not a valid hostname. + pub fn set_host(&mut self, host: &str) -> ::Result<()> { + if host.contains(&['@',':'][..]) { + return Err(::error::Parse::Uri.into()); + } + let auth = if let Some(port) = self.port() { + format!("{}:{}", host, port).parse().map_err(::error::Parse::from)? + } else { + host.parse().map_err(::error::Parse::from)? + }; + self.update_uri(move |parts| { + parts.authority = Some(auth); + }) + } + + /// Update the port of this destination. + /// + /// # Example + /// + /// ```rust + /// # use hyper::client::connect::Destination; + /// # fn with_dst(mut dst: Destination) { + /// // let mut dst = some_destination... + /// // Change from "None"... + /// assert_eq!(dst.port(), None); + /// + /// // to "4321"... + /// dst.set_port(4321); + /// assert_eq!(dst.port(), Some(4321)); + /// + /// // Or remove the port... + /// dst.set_port(None); + /// assert_eq!(dst.port(), None); + /// # } + /// ``` + pub fn set_port

(&mut self, port: P) + where + P: Into>, + { + self.set_port_opt(port.into()); + } + + fn set_port_opt(&mut self, port: Option) { + use std::fmt::Write; + + let auth = if let Some(port) = port { + let host = self.host(); + // Need space to copy the hostname, plus ':', + // plus max 5 port digits... + let cap = host.len() + 1 + 5; + let mut buf = BytesMut::with_capacity(cap); + buf.put_slice(host.as_bytes()); + buf.put_u8(b':'); + write!(buf, "{}", port) + .expect("should have space for 5 digits"); + + uri::Authority::from_shared(buf.freeze()) + .expect("valid host + :port should be valid authority") + } else { + self.host().parse() + .expect("valid host without port should be valid authority") + }; + + self.update_uri(move |parts| { + parts.authority = Some(auth); + }) + .expect("valid uri should be valid with port"); + } + + fn update_uri(&mut self, f: F) -> ::Result<()> + where + F: FnOnce(&mut uri::Parts) + { + // Need to store a default Uri while we modify the current one... + let old_uri = mem::replace(&mut self.uri, Uri::default()); + // However, mutate a clone, so we can revert if there's an error... + let mut parts: uri::Parts = old_uri.clone().into(); + + f(&mut parts); + + match Uri::from_parts(parts) { + Ok(uri) => { + self.uri = uri; + Ok(()) + }, + Err(err) => { + self.uri = old_uri; + Err(::error::Parse::from(err).into()) + }, + } + } + + /* + /// Returns whether this connection must negotiate HTTP/2 via ALPN. + pub fn must_h2(&self) -> bool { + match self.alpn { + Alpn::Http1 => false, + Alpn::H2 => true, + } + } + */ +} + +impl Connected { + /// Create new `Connected` type with empty metadata. + pub fn new() -> Connected { + Connected { + //alpn: Alpn::Http1, + is_proxied: false, + } + } + + /// Set whether the connected transport is to an HTTP proxy. + /// + /// This setting will affect if HTTP/1 requests written on the transport + /// will have the request-target in absolute-form or origin-form (such as + /// `GET http://hyper.rs/guide HTTP/1.1` or `GET /guide HTTP/1.1`). + /// + /// Default is `false`. + pub fn proxy(mut self, is_proxied: bool) -> Connected { + self.is_proxied = is_proxied; + self + } + + /* + /// Set that the connected transport negotiated HTTP/2 as it's + /// next protocol. + pub fn h2(mut self) -> Connected { + self.alpn = Alpn::H2; + self + } + */ +} + +#[cfg(test)] +mod tests { + use super::Destination; + + #[test] + fn test_destination_set_scheme() { + let mut dst = Destination { + uri: "http://hyper.rs".parse().expect("initial parse"), + }; + + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + + dst.set_scheme("https").expect("set https"); + assert_eq!(dst.scheme(), "https"); + assert_eq!(dst.host(), "hyper.rs"); + + dst.set_scheme("").unwrap_err(); + assert_eq!(dst.scheme(), "https", "error doesn't modify dst"); + assert_eq!(dst.host(), "hyper.rs", "error doesn't modify dst"); + } + + #[test] + fn test_destination_set_host() { + let mut dst = Destination { + uri: "http://hyper.rs".parse().expect("initial parse"), + }; + + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), None); + + dst.set_host("seanmonstar.com").expect("set https"); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "seanmonstar.com"); + assert_eq!(dst.port(), None); + + dst.set_host("/im-not a host! >:)").unwrap_err(); + assert_eq!(dst.scheme(), "http", "error doesn't modify dst"); + assert_eq!(dst.host(), "seanmonstar.com", "error doesn't modify dst"); + assert_eq!(dst.port(), None, "error doesn't modify dst"); + + // Also test that an exist port is set correctly. + let mut dst = Destination { + uri: "http://hyper.rs:8080".parse().expect("initial parse 2"), + }; + + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), Some(8080)); + + dst.set_host("seanmonstar.com").expect("set host"); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "seanmonstar.com"); + assert_eq!(dst.port(), Some(8080)); + + dst.set_host("/im-not a host! >:)").unwrap_err(); + assert_eq!(dst.scheme(), "http", "error doesn't modify dst"); + assert_eq!(dst.host(), "seanmonstar.com", "error doesn't modify dst"); + assert_eq!(dst.port(), Some(8080), "error doesn't modify dst"); + + // Check port isn't snuck into `set_host`. + dst.set_host("seanmonstar.com:3030").expect_err("set_host sneaky port"); + assert_eq!(dst.scheme(), "http", "error doesn't modify dst"); + assert_eq!(dst.host(), "seanmonstar.com", "error doesn't modify dst"); + assert_eq!(dst.port(), Some(8080), "error doesn't modify dst"); + + // Check userinfo isn't snuck into `set_host`. + dst.set_host("sean@nope").expect_err("set_host sneaky userinfo"); + assert_eq!(dst.scheme(), "http", "error doesn't modify dst"); + assert_eq!(dst.host(), "seanmonstar.com", "error doesn't modify dst"); + assert_eq!(dst.port(), Some(8080), "error doesn't modify dst"); + } + + #[test] + fn test_destination_set_port() { + let mut dst = Destination { + uri: "http://hyper.rs".parse().expect("initial parse"), + }; + + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), None); + + dst.set_port(None); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), None); + + dst.set_port(8080); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), Some(8080)); + + // Also test that an exist port is set correctly. + let mut dst = Destination { + uri: "http://hyper.rs:8080".parse().expect("initial parse 2"), + }; + + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), Some(8080)); + + dst.set_port(3030); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), Some(3030)); + + dst.set_port(None); + assert_eq!(dst.scheme(), "http"); + assert_eq!(dst.host(), "hyper.rs"); + assert_eq!(dst.port(), None); + } +} + +#[cfg(feature = "runtime")] +mod http { + use super::*; + + use std::borrow::Cow; + use std::fmt; + use std::io; + use std::mem; + use std::net::{IpAddr, SocketAddr}; + use std::sync::Arc; + use std::time::{Duration, Instant}; + + use futures::{Async, Poll}; + use futures::future::{Executor, ExecuteError}; + use futures::sync::oneshot; + use futures_cpupool::{Builder as CpuPoolBuilder}; + use http::uri::Scheme; + use net2::TcpBuilder; + use tokio_reactor::Handle; + use tokio_tcp::{TcpStream, ConnectFuture}; + use tokio_timer::Delay; + + use super::super::dns; + + use self::http_connector::HttpConnectorBlockingTask; + + + fn connect(addr: &SocketAddr, local_addr: &Option, handle: &Option, reuse_address: bool) -> io::Result { + let builder = match addr { + &SocketAddr::V4(_) => TcpBuilder::new_v4()?, + &SocketAddr::V6(_) => TcpBuilder::new_v6()?, + }; + + if reuse_address { + builder.reuse_address(reuse_address)?; + } + + if let Some(ref local_addr) = *local_addr { + // Caller has requested this socket be bound before calling connect + builder.bind(SocketAddr::new(local_addr.clone(), 0))?; + } + else if cfg!(windows) { + // Windows requires a socket be bound before calling connect + let any: SocketAddr = match addr { + &SocketAddr::V4(_) => { + ([0, 0, 0, 0], 0).into() + }, + &SocketAddr::V6(_) => { + ([0, 0, 0, 0, 0, 0, 0, 0], 0).into() + } + }; + builder.bind(any)?; + } + + let handle = match *handle { + Some(ref handle) => Cow::Borrowed(handle), + None => Cow::Owned(Handle::current()), + }; + + Ok(TcpStream::connect_std(builder.to_tcp_stream()?, addr, &handle)) + } + + /// A connector for the `http` scheme. + /// + /// Performs DNS resolution in a thread pool, and then connects over TCP. + #[derive(Clone)] + pub struct HttpConnector { + executor: HttpConnectExecutor, + enforce_http: bool, + handle: Option, + keep_alive_timeout: Option, + nodelay: bool, + local_address: Option, + happy_eyeballs_timeout: Option, + reuse_address: bool, + } + + impl HttpConnector { + /// Construct a new HttpConnector. + /// + /// Takes number of DNS worker threads. + #[inline] + pub fn new(threads: usize) -> HttpConnector { + HttpConnector::new_with_handle_opt(threads, None) + } + + /// Construct a new HttpConnector with a specific Tokio handle. + pub fn new_with_handle(threads: usize, handle: Handle) -> HttpConnector { + HttpConnector::new_with_handle_opt(threads, Some(handle)) + } + + fn new_with_handle_opt(threads: usize, handle: Option) -> HttpConnector { + let pool = CpuPoolBuilder::new() + .name_prefix("hyper-dns") + .pool_size(threads) + .create(); + HttpConnector::new_with_executor(pool, handle) + } + + /// Construct a new HttpConnector. + /// + /// Takes an executor to run blocking tasks on. + pub fn new_with_executor(executor: E, handle: Option) -> HttpConnector + where E: Executor + Send + Sync + { + HttpConnector { + executor: HttpConnectExecutor(Arc::new(executor)), + enforce_http: true, + handle, + keep_alive_timeout: None, + nodelay: false, + local_address: None, + happy_eyeballs_timeout: Some(Duration::from_millis(300)), + reuse_address: false, + } + } + + /// Option to enforce all `Uri`s have the `http` scheme. + /// + /// Enabled by default. + #[inline] + pub fn enforce_http(&mut self, is_enforced: bool) { + self.enforce_http = is_enforced; + } + + /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration. + /// + /// If `None`, the option will not be set. + /// + /// Default is `None`. + #[inline] + pub fn set_keepalive(&mut self, dur: Option) { + self.keep_alive_timeout = dur; + } + + /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. + /// + /// Default is `false`. + #[inline] + pub fn set_nodelay(&mut self, nodelay: bool) { + self.nodelay = nodelay; + } + + /// Set that all sockets are bound to the configured address before connection. + /// + /// If `None`, the sockets will not be bound. + /// + /// Default is `None`. + #[inline] + pub fn set_local_address(&mut self, addr: Option) { + self.local_address = addr; + } + + /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. + /// + /// If hostname resolves to both IPv4 and IPv6 addresses and connection + /// cannot be established using preferred address family before timeout + /// elapses, then connector will in parallel attempt connection using other + /// address family. + /// + /// If `None`, parallel connection attempts are disabled. + /// + /// Default is 300 milliseconds. + /// + /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 + #[inline] + pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { + self.happy_eyeballs_timeout = dur; + } + + /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. + /// + /// Default is `false`. + #[inline] + pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { + self.reuse_address = reuse_address; + self + } + } + + impl fmt::Debug for HttpConnector { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("HttpConnector") + .finish() + } + } + + impl Connect for HttpConnector { + type Transport = TcpStream; + type Error = io::Error; + type Future = HttpConnecting; + + fn connect(&self, dst: Destination) -> Self::Future { + trace!( + "Http::connect; scheme={}, host={}, port={:?}", + dst.scheme(), + dst.host(), + dst.port(), + ); + + if self.enforce_http { + if dst.uri.scheme_part() != Some(&Scheme::HTTP) { + return invalid_url(InvalidUrl::NotHttp, &self.handle); + } + } else if dst.uri.scheme_part().is_none() { + return invalid_url(InvalidUrl::MissingScheme, &self.handle); + } + + let host = match dst.uri.host() { + Some(s) => s, + None => return invalid_url(InvalidUrl::MissingAuthority, &self.handle), + }; + let port = match dst.uri.port() { + Some(port) => port, + None => if dst.uri.scheme_part() == Some(&Scheme::HTTPS) { 443 } else { 80 }, + }; + + HttpConnecting { + state: State::Lazy(self.executor.clone(), host.into(), port, self.local_address), + handle: self.handle.clone(), + keep_alive_timeout: self.keep_alive_timeout, + nodelay: self.nodelay, + happy_eyeballs_timeout: self.happy_eyeballs_timeout, + reuse_address: self.reuse_address, + } + } + } + + #[inline] + fn invalid_url(err: InvalidUrl, handle: &Option) -> HttpConnecting { + HttpConnecting { + state: State::Error(Some(io::Error::new(io::ErrorKind::InvalidInput, err))), + handle: handle.clone(), + keep_alive_timeout: None, + nodelay: false, + happy_eyeballs_timeout: None, + reuse_address: false, + } + } + + #[derive(Debug, Clone, Copy)] + enum InvalidUrl { + MissingScheme, + NotHttp, + MissingAuthority, + } + + impl fmt::Display for InvalidUrl { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.description()) + } + } + + impl StdError for InvalidUrl { + fn description(&self) -> &str { + match *self { + InvalidUrl::MissingScheme => "invalid URL, missing scheme", + InvalidUrl::NotHttp => "invalid URL, scheme must be http", + InvalidUrl::MissingAuthority => "invalid URL, missing domain", + } + } + } + /// A Future representing work to connect to a URL. + #[must_use = "futures do nothing unless polled"] + pub struct HttpConnecting { + state: State, + handle: Option, + keep_alive_timeout: Option, + nodelay: bool, + happy_eyeballs_timeout: Option, + reuse_address: bool, + } + + enum State { + Lazy(HttpConnectExecutor, String, u16, Option), + Resolving(oneshot::SpawnHandle, Option), + Connecting(ConnectingTcp), + Error(Option), + } + + impl Future for HttpConnecting { + type Item = (TcpStream, Connected); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + loop { + let state; + match self.state { + State::Lazy(ref executor, ref mut host, port, local_addr) => { + // If the host is already an IP addr (v4 or v6), + // skip resolving the dns and start connecting right away. + if let Some(addrs) = dns::IpAddrs::try_parse(host, port) { + state = State::Connecting(ConnectingTcp::new( + local_addr, addrs, self.happy_eyeballs_timeout, self.reuse_address)); + } else { + let host = mem::replace(host, String::new()); + let work = dns::Work::new(host, port); + state = State::Resolving(oneshot::spawn(work, executor), local_addr); + } + }, + State::Resolving(ref mut future, local_addr) => { + match try!(future.poll()) { + Async::NotReady => return Ok(Async::NotReady), + Async::Ready(addrs) => { + state = State::Connecting(ConnectingTcp::new( + local_addr, addrs, self.happy_eyeballs_timeout, self.reuse_address)); + } + }; + }, + State::Connecting(ref mut c) => { + let sock = try_ready!(c.poll(&self.handle)); + + if let Some(dur) = self.keep_alive_timeout { + sock.set_keepalive(Some(dur))?; + } + + sock.set_nodelay(self.nodelay)?; + + return Ok(Async::Ready((sock, Connected::new()))); + }, + State::Error(ref mut e) => return Err(e.take().expect("polled more than once")), + } + self.state = state; + } + } + } + + impl fmt::Debug for HttpConnecting { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("HttpConnecting") + } + } + + struct ConnectingTcp { + local_addr: Option, + preferred: ConnectingTcpRemote, + fallback: Option, + reuse_address: bool, + } + + impl ConnectingTcp { + fn new( + local_addr: Option, + remote_addrs: dns::IpAddrs, + fallback_timeout: Option, + reuse_address: bool, + ) -> ConnectingTcp { + if let Some(fallback_timeout) = fallback_timeout { + let (preferred_addrs, fallback_addrs) = remote_addrs.split_by_preference(); + if fallback_addrs.is_empty() { + return ConnectingTcp { + local_addr, + preferred: ConnectingTcpRemote::new(preferred_addrs), + fallback: None, + reuse_address, + }; + } + + ConnectingTcp { + local_addr, + preferred: ConnectingTcpRemote::new(preferred_addrs), + fallback: Some(ConnectingTcpFallback { + delay: Delay::new(Instant::now() + fallback_timeout), + remote: ConnectingTcpRemote::new(fallback_addrs), + }), + reuse_address, + } + } else { + ConnectingTcp { + local_addr, + preferred: ConnectingTcpRemote::new(remote_addrs), + fallback: None, + reuse_address, + } + } + } + } + + struct ConnectingTcpFallback { + delay: Delay, + remote: ConnectingTcpRemote, + } + + struct ConnectingTcpRemote { + addrs: dns::IpAddrs, + current: Option, + } + + impl ConnectingTcpRemote { + fn new(addrs: dns::IpAddrs) -> Self { + Self { + addrs, + current: None, + } + } + } + + impl ConnectingTcpRemote { + // not a Future, since passing a &Handle to poll + fn poll( + &mut self, + local_addr: &Option, + handle: &Option, + reuse_address: bool, + ) -> Poll { + let mut err = None; + loop { + if let Some(ref mut current) = self.current { + match current.poll() { + Ok(ok) => return Ok(ok), + Err(e) => { + trace!("connect error {:?}", e); + err = Some(e); + if let Some(addr) = self.addrs.next() { + debug!("connecting to {}", addr); + *current = connect(&addr, local_addr, handle, reuse_address)?; + continue; + } + } + } + } else if let Some(addr) = self.addrs.next() { + debug!("connecting to {}", addr); + self.current = Some(connect(&addr, local_addr, handle, reuse_address)?); + continue; + } + + return Err(err.take().expect("missing connect error")); + } + } + } + + impl ConnectingTcp { + // not a Future, since passing a &Handle to poll + fn poll(&mut self, handle: &Option) -> Poll { + match self.fallback.take() { + None => self.preferred.poll(&self.local_addr, handle, self.reuse_address), + Some(mut fallback) => match self.preferred.poll(&self.local_addr, handle, self.reuse_address) { + Ok(Async::Ready(stream)) => { + // Preferred successful - drop fallback. + Ok(Async::Ready(stream)) + } + Ok(Async::NotReady) => match fallback.delay.poll() { + Ok(Async::Ready(_)) => match fallback.remote.poll(&self.local_addr, handle, self.reuse_address) { + Ok(Async::Ready(stream)) => { + // Fallback successful - drop current preferred, + // but keep fallback as new preferred. + self.preferred = fallback.remote; + Ok(Async::Ready(stream)) + } + Ok(Async::NotReady) => { + // Neither preferred nor fallback are ready. + self.fallback = Some(fallback); + Ok(Async::NotReady) + } + Err(_) => { + // Fallback failed - resume with preferred only. + Ok(Async::NotReady) + } + }, + Ok(Async::NotReady) => { + // Too early to attempt fallback. + self.fallback = Some(fallback); + Ok(Async::NotReady) + } + Err(_) => { + // Fallback delay failed - resume with preferred only. + Ok(Async::NotReady) + } + } + Err(_) => { + // Preferred failed - use fallback as new preferred. + self.preferred = fallback.remote; + self.preferred.poll(&self.local_addr, handle, self.reuse_address) + } + } + } + } + } + + // Make this Future unnameable outside of this crate. + mod http_connector { + use super::*; + // Blocking task to be executed on a thread pool. + pub struct HttpConnectorBlockingTask { + pub(super) work: oneshot::Execute + } + + impl fmt::Debug for HttpConnectorBlockingTask { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("HttpConnectorBlockingTask") + } + } + + impl Future for HttpConnectorBlockingTask { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + self.work.poll() + } + } + } + + #[derive(Clone)] + struct HttpConnectExecutor(Arc + Send + Sync>); + + impl Executor> for HttpConnectExecutor { + fn execute(&self, future: oneshot::Execute) -> Result<(), ExecuteError>> { + self.0.execute(HttpConnectorBlockingTask { work: future }) + .map_err(|err| ExecuteError::new(err.kind(), err.into_future().work)) + } + } + + #[cfg(test)] + mod tests { + use std::io; + use futures::Future; + use super::{Connect, Destination, HttpConnector}; + + #[test] + fn test_errors_missing_authority() { + let uri = "/foo/bar?baz".parse().unwrap(); + let dst = Destination { + uri, + }; + let connector = HttpConnector::new(1); + + assert_eq!(connector.connect(dst).wait().unwrap_err().kind(), io::ErrorKind::InvalidInput); + } + + #[test] + fn test_errors_enforce_http() { + let uri = "https://example.domain/foo/bar?baz".parse().unwrap(); + let dst = Destination { + uri, + }; + let connector = HttpConnector::new(1); + + assert_eq!(connector.connect(dst).wait().unwrap_err().kind(), io::ErrorKind::InvalidInput); + } + + + #[test] + fn test_errors_missing_scheme() { + let uri = "example.domain".parse().unwrap(); + let dst = Destination { + uri, + }; + let connector = HttpConnector::new(1); + + assert_eq!(connector.connect(dst).wait().unwrap_err().kind(), io::ErrorKind::InvalidInput); + } + + #[test] + fn client_happy_eyeballs() { + extern crate pretty_env_logger; + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; + use std::time::{Duration, Instant}; + + use futures::{Async, Poll}; + use tokio::runtime::current_thread::Runtime; + use tokio_reactor::Handle; + + use super::dns; + use super::ConnectingTcp; + + let _ = pretty_env_logger::try_init(); + let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server4.local_addr().unwrap(); + let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap(); + let mut rt = Runtime::new().unwrap(); + + let local_timeout = Duration::default(); + let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; + let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; + let fallback_timeout = ::std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) + + Duration::from_millis(250); + + let scenarios = &[ + // Fast primary, without fallback. + (&[local_ipv4_addr()][..], + 4, local_timeout, false), + (&[local_ipv6_addr()][..], + 6, local_timeout, false), + + // Fast primary, with (unused) fallback. + (&[local_ipv4_addr(), local_ipv6_addr()][..], + 4, local_timeout, false), + (&[local_ipv6_addr(), local_ipv4_addr()][..], + 6, local_timeout, false), + + // Unreachable + fast primary, without fallback. + (&[unreachable_ipv4_addr(), local_ipv4_addr()][..], + 4, unreachable_v4_timeout, false), + (&[unreachable_ipv6_addr(), local_ipv6_addr()][..], + 6, unreachable_v6_timeout, false), + + // Unreachable + fast primary, with (unused) fallback. + (&[unreachable_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], + 4, unreachable_v4_timeout, false), + (&[unreachable_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], + 6, unreachable_v6_timeout, true), + + // Slow primary, with (used) fallback. + (&[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], + 6, fallback_timeout, false), + (&[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], + 4, fallback_timeout, true), + + // Slow primary, with (used) unreachable + fast fallback. + (&[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], + 6, fallback_timeout + unreachable_v6_timeout, false), + (&[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], + 4, fallback_timeout + unreachable_v4_timeout, true), + ]; + + // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. + // Otherwise, connection to "slow" IPv6 address will error-out immediatelly. + let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; + + for &(hosts, family, timeout, needs_ipv6_access) in scenarios { + if needs_ipv6_access && !ipv6_accessible { + continue; + } + + let addrs = hosts.iter().map(|host| (host.clone(), addr.port()).into()).collect(); + let connecting_tcp = ConnectingTcp::new(None, dns::IpAddrs::new(addrs), Some(fallback_timeout), false); + let fut = ConnectingTcpFuture(connecting_tcp); + + let start = Instant::now(); + let res = rt.block_on(fut).unwrap(); + let duration = start.elapsed(); + + // Allow actual duration to be +/- 150ms off. + let min_duration = if timeout >= Duration::from_millis(150) { + timeout - Duration::from_millis(150) + } else { + Duration::default() + }; + let max_duration = timeout + Duration::from_millis(150); + + assert_eq!(res, family); + assert!(duration >= min_duration); + assert!(duration <= max_duration); + } + + struct ConnectingTcpFuture(ConnectingTcp); + + impl Future for ConnectingTcpFuture { + type Item = u8; + type Error = ::std::io::Error; + + fn poll(&mut self) -> Poll { + match self.0.poll(&Some(Handle::default())) { + Ok(Async::Ready(stream)) => Ok(Async::Ready( + if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 } + )), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(err) => Err(err), + } + } + } + + fn local_ipv4_addr() -> IpAddr { + Ipv4Addr::new(127, 0, 0, 1).into() + } + + fn local_ipv6_addr() -> IpAddr { + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() + } + + fn unreachable_ipv4_addr() -> IpAddr { + Ipv4Addr::new(127, 0, 0, 2).into() + } + + fn unreachable_ipv6_addr() -> IpAddr { + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() + } + + fn slow_ipv4_addr() -> IpAddr { + // RFC 6890 reserved IPv4 address. + Ipv4Addr::new(198, 18, 0, 25).into() + } + + fn slow_ipv6_addr() -> IpAddr { + // RFC 6890 reserved IPv6 address. + Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() + } + + fn measure_connect(addr: IpAddr) -> (bool, Duration) { + let start = Instant::now(); + let result = ::std::net::TcpStream::connect_timeout( + &(addr, 80).into(), Duration::from_secs(1)); + + let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; + let duration = start.elapsed(); + (reachable, duration) + } + } + } +} + diff --git a/third_party/rust/hyper/src/client/dispatch.rs b/third_party/rust/hyper/src/client/dispatch.rs new file mode 100644 index 000000000000..6973df79855d --- /dev/null +++ b/third_party/rust/hyper/src/client/dispatch.rs @@ -0,0 +1,318 @@ +use futures::{Async, Poll, Stream}; +use futures::sync::{mpsc, oneshot}; +use want; + +use common::Never; + +pub type RetryPromise = oneshot::Receiver)>>; +pub type Promise = oneshot::Receiver>; + +pub fn channel() -> (Sender, Receiver) { + let (tx, rx) = mpsc::unbounded(); + let (giver, taker) = want::new(); + let tx = Sender { + buffered_once: false, + giver: giver, + inner: tx, + }; + let rx = Receiver { + inner: rx, + taker: taker, + }; + (tx, rx) +} + +/// A bounded sender of requests and callbacks for when responses are ready. +/// +/// While the inner sender is unbounded, the Giver is used to determine +/// if the Receiver is ready for another request. +pub struct Sender { + /// One message is always allowed, even if the Receiver hasn't asked + /// for it yet. This boolean keeps track of whether we've sent one + /// without notice. + buffered_once: bool, + /// The Giver helps watch that the the Receiver side has been polled + /// when the queue is empty. This helps us know when a request and + /// response have been fully processed, and a connection is ready + /// for more. + giver: want::Giver, + /// Actually bounded by the Giver, plus `buffered_once`. + inner: mpsc::UnboundedSender>, +} + +/// An unbounded version. +/// +/// Cannot poll the Giver, but can still use it to determine if the Receiver +/// has been dropped. However, this version can be cloned. +pub struct UnboundedSender { + /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. + giver: want::SharedGiver, + inner: mpsc::UnboundedSender>, +} + +impl Sender { + pub fn poll_ready(&mut self) -> Poll<(), ::Error> { + self.giver.poll_want() + .map_err(|_| ::Error::new_closed()) + } + + pub fn is_ready(&self) -> bool { + self.giver.is_wanting() + } + + pub fn is_closed(&self) -> bool { + self.giver.is_canceled() + } + + fn can_send(&mut self) -> bool { + if self.giver.give() || !self.buffered_once { + // If the receiver is ready *now*, then of course we can send. + // + // If the receiver isn't ready yet, but we don't have anything + // in the channel yet, then allow one message. + self.buffered_once = true; + true + } else { + false + } + } + + pub fn try_send(&mut self, val: T) -> Result, T> { + if !self.can_send() { + return Err(val); + } + let (tx, rx) = oneshot::channel(); + self.inner.unbounded_send(Envelope(Some((val, Callback::Retry(tx))))) + .map(move |_| rx) + .map_err(|e| e.into_inner().0.take().expect("envelope not dropped").0) + } + + pub fn send(&mut self, val: T) -> Result, T> { + if !self.can_send() { + return Err(val); + } + let (tx, rx) = oneshot::channel(); + self.inner.unbounded_send(Envelope(Some((val, Callback::NoRetry(tx))))) + .map(move |_| rx) + .map_err(|e| e.into_inner().0.take().expect("envelope not dropped").0) + } + + pub fn unbound(self) -> UnboundedSender { + UnboundedSender { + giver: self.giver.shared(), + inner: self.inner, + } + } +} + +impl UnboundedSender { + pub fn is_ready(&self) -> bool { + !self.giver.is_canceled() + } + + pub fn is_closed(&self) -> bool { + self.giver.is_canceled() + } + + pub fn try_send(&mut self, val: T) -> Result, T> { + let (tx, rx) = oneshot::channel(); + self.inner.unbounded_send(Envelope(Some((val, Callback::Retry(tx))))) + .map(move |_| rx) + .map_err(|e| e.into_inner().0.take().expect("envelope not dropped").0) + } +} + +impl Clone for UnboundedSender { + fn clone(&self) -> Self { + UnboundedSender { + giver: self.giver.clone(), + inner: self.inner.clone(), + } + } +} + +pub struct Receiver { + inner: mpsc::UnboundedReceiver>, + taker: want::Taker, +} + +impl Stream for Receiver { + type Item = (T, Callback); + type Error = Never; + + fn poll(&mut self) -> Poll, Self::Error> { + match self.inner.poll() { + Ok(Async::Ready(item)) => Ok(Async::Ready(item.map(|mut env| { + env.0.take().expect("envelope not dropped") + }))), + Ok(Async::NotReady) => { + self.taker.want(); + Ok(Async::NotReady) + } + Err(()) => unreachable!("mpsc never errors"), + } + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + // Notify the giver about the closure first, before dropping + // the mpsc::Receiver. + self.taker.cancel(); + } +} + +struct Envelope(Option<(T, Callback)>); + +impl Drop for Envelope { + fn drop(&mut self) { + if let Some((val, cb)) = self.0.take() { + let _ = cb.send(Err((::Error::new_canceled(None::<::Error>), Some(val)))); + } + } +} + +pub enum Callback { + Retry(oneshot::Sender)>>), + NoRetry(oneshot::Sender>), +} + +impl Callback { + pub fn poll_cancel(&mut self) -> Poll<(), ()> { + match *self { + Callback::Retry(ref mut tx) => tx.poll_cancel(), + Callback::NoRetry(ref mut tx) => tx.poll_cancel(), + } + } + + pub fn send(self, val: Result)>) { + match self { + Callback::Retry(tx) => { + let _ = tx.send(val); + }, + Callback::NoRetry(tx) => { + let _ = tx.send(val.map_err(|e| e.0)); + } + } + } +} + +#[cfg(test)] +mod tests { + extern crate pretty_env_logger; + #[cfg(feature = "nightly")] + extern crate test; + + use futures::{future, Future, Stream}; + + + #[derive(Debug)] + struct Custom(i32); + + #[test] + fn drop_receiver_sends_cancel_errors() { + let _ = pretty_env_logger::try_init(); + + future::lazy(|| { + let (mut tx, mut rx) = super::channel::(); + + // must poll once for try_send to succeed + assert!(rx.poll().expect("rx empty").is_not_ready()); + + let promise = tx.try_send(Custom(43)).unwrap(); + drop(rx); + + promise.then(|fulfilled| { + let err = fulfilled + .expect("fulfilled") + .expect_err("promise should error"); + + match (err.0.kind(), err.1) { + (&::error::Kind::Canceled, Some(_)) => (), + e => panic!("expected Error::Cancel(_), found {:?}", e), + } + + Ok::<(), ()>(()) + }) + }).wait().unwrap(); + } + + #[test] + fn sender_checks_for_want_on_send() { + future::lazy(|| { + let (mut tx, mut rx) = super::channel::(); + // one is allowed to buffer, second is rejected + let _ = tx.try_send(Custom(1)).expect("1 buffered"); + tx.try_send(Custom(2)).expect_err("2 not ready"); + + assert!(rx.poll().expect("rx 1").is_ready()); + // Even though 1 has been popped, only 1 could be buffered for the + // lifetime of the channel. + tx.try_send(Custom(2)).expect_err("2 still not ready"); + + assert!(rx.poll().expect("rx empty").is_not_ready()); + let _ = tx.try_send(Custom(2)).expect("2 ready"); + + Ok::<(), ()>(()) + }).wait().unwrap(); + } + + #[test] + fn unbounded_sender_doesnt_bound_on_want() { + let (tx, rx) = super::channel::(); + let mut tx = tx.unbound(); + + let _ = tx.try_send(Custom(1)).unwrap(); + let _ = tx.try_send(Custom(2)).unwrap(); + let _ = tx.try_send(Custom(3)).unwrap(); + + drop(rx); + + let _ = tx.try_send(Custom(4)).unwrap_err(); + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_throughput(b: &mut test::Bencher) { + let (mut tx, mut rx) = super::channel::(); + + b.iter(move || { + ::futures::future::lazy(|| { + let _ = tx.send(1).unwrap(); + loop { + let async = rx.poll().unwrap(); + if async.is_not_ready() { + break; + } + } + + + Ok::<(), ()>(()) + }).wait().unwrap(); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_not_ready(b: &mut test::Bencher) { + let (_tx, mut rx) = super::channel::(); + + b.iter(move || { + ::futures::future::lazy(|| { + assert!(rx.poll().unwrap().is_not_ready()); + + Ok::<(), ()>(()) + }).wait().unwrap(); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn giver_queue_cancel(b: &mut test::Bencher) { + let (_tx, mut rx) = super::channel::(); + + b.iter(move || { + rx.taker.cancel(); + }) + } +} diff --git a/third_party/rust/hyper/src/client/dns.rs b/third_party/rust/hyper/src/client/dns.rs new file mode 100644 index 000000000000..866b0e5e9c26 --- /dev/null +++ b/third_party/rust/hyper/src/client/dns.rs @@ -0,0 +1,100 @@ +use std::io; +use std::net::{ + Ipv4Addr, Ipv6Addr, + SocketAddr, ToSocketAddrs, + SocketAddrV4, SocketAddrV6, +}; +use std::vec; + +use ::futures::{Async, Future, Poll}; + +pub struct Work { + host: String, + port: u16 +} + +impl Work { + pub fn new(host: String, port: u16) -> Work { + Work { host: host, port: port } + } +} + +impl Future for Work { + type Item = IpAddrs; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + debug!("resolving host={:?}, port={:?}", self.host, self.port); + (&*self.host, self.port).to_socket_addrs() + .map(|i| Async::Ready(IpAddrs { iter: i })) + } +} + +pub struct IpAddrs { + iter: vec::IntoIter, +} + +impl IpAddrs { + pub fn new(addrs: Vec) -> Self { + IpAddrs { iter: addrs.into_iter() } + } + + pub fn try_parse(host: &str, port: u16) -> Option { + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV4::new(addr, port); + return Some(IpAddrs { iter: vec![SocketAddr::V4(addr)].into_iter() }) + } + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV6::new(addr, port, 0, 0); + return Some(IpAddrs { iter: vec![SocketAddr::V6(addr)].into_iter() }) + } + None + } + + pub fn split_by_preference(self) -> (IpAddrs, IpAddrs) { + let preferring_v6 = self.iter + .as_slice() + .first() + .map(SocketAddr::is_ipv6) + .unwrap_or(false); + + let (preferred, fallback) = self.iter + .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); + + (IpAddrs::new(preferred), IpAddrs::new(fallback)) + } + + pub fn is_empty(&self) -> bool { + self.iter.as_slice().is_empty() + } +} + +impl Iterator for IpAddrs { + type Item = SocketAddr; + #[inline] + fn next(&mut self) -> Option { + self.iter.next() + } +} + +#[cfg(test)] +mod tests { + use std::net::{Ipv4Addr, Ipv6Addr}; + use super::*; + + #[test] + fn test_ip_addrs_split_by_preference() { + let v4_addr = (Ipv4Addr::new(127, 0, 0, 1), 80).into(); + let v6_addr = (Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 80).into(); + + let (mut preferred, mut fallback) = + IpAddrs { iter: vec![v4_addr, v6_addr].into_iter() }.split_by_preference(); + assert!(preferred.next().unwrap().is_ipv4()); + assert!(fallback.next().unwrap().is_ipv6()); + + let (mut preferred, mut fallback) = + IpAddrs { iter: vec![v6_addr, v4_addr].into_iter() }.split_by_preference(); + assert!(preferred.next().unwrap().is_ipv6()); + assert!(fallback.next().unwrap().is_ipv4()); + } +} diff --git a/third_party/rust/hyper/src/client/mod.rs b/third_party/rust/hyper/src/client/mod.rs index 68043fb39010..ebc538b8af9f 100644 --- a/third_party/rust/hyper/src/client/mod.rs +++ b/third_party/rust/hyper/src/client/mod.rs @@ -1,746 +1,970 @@ //! HTTP Client //! -//! # Usage +//! There are two levels of APIs provided for construct HTTP clients: //! -//! The `Client` API is designed for most people to make HTTP requests. -//! It utilizes the lower level `Request` API. +//! - The higher-level [`Client`](Client) type. +//! - The lower-level [conn](conn) module. //! -//! ## GET +//! # Client //! -//! ```no_run -//! # use hyper::Client; +//! The [`Client`](Client) is the main way to send HTTP requests to a server. +//! The default `Client` provides these things on top of the lower-level API: +//! +//! - A default **connector**, able to resolve hostnames and connect to +//! destinations over plain-text TCP. +//! - A **pool** of existing connections, allowing better performance when +//! making multiple requests to the same hostname. +//! - Automatic setting of the `Host` header, based on the request `Uri`. +//! - Automatic request **retries** when a pooled connection is closed by the +//! server before any bytes have been written. +//! +//! Many of these features can configured, by making use of +//! [`Client::builder`](Client::builder). +//! +//! ## Example +//! +//! For a small example program simply fetching a URL, take a look at the +//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs). +//! +//! ``` +//! extern crate hyper; +//! +//! use hyper::Client; +//! # #[cfg(feature = "runtime")] +//! use hyper::rt::{self, Future, Stream}; +//! +//! # #[cfg(feature = "runtime")] +//! # fn fetch_httpbin() { //! let client = Client::new(); //! -//! let res = client.get("http://example.domain").send().unwrap(); -//! assert_eq!(res.status, hyper::Ok); +//! let fut = client +//! +//! // Make a GET /ip to 'http://httpbin.org' +//! .get("http://httpbin.org/ip".parse().unwrap()) +//! +//! // And then, if the request gets a response... +//! .and_then(|res| { +//! println!("status: {}", res.status()); +//! +//! // Concatenate the body stream into a single buffer... +//! // This returns a new future, since we must stream body. +//! res.into_body().concat2() +//! }) +//! +//! // And then, if reading the full body succeeds... +//! .and_then(|body| { +//! // The body is just bytes, but let's print a string... +//! let s = ::std::str::from_utf8(&body) +//! .expect("httpbin sends utf-8 JSON"); +//! +//! println!("body: {}", s); +//! +//! // and_then requires we return a new Future, and it turns +//! // out that Result is a Future that is ready immediately. +//! Ok(()) +//! }) +//! +//! // Map any errors that might have happened... +//! .map_err(|err| { +//! println!("error: {}", err); +//! }); +//! +//! // A runtime is needed to execute our asynchronous code. In order to +//! // spawn the future into the runtime, it should already have been +//! // started and running before calling this code. +//! rt::spawn(fut); +//! # } +//! # fn main () {} //! ``` -//! -//! The returned value is a `Response`, which provides easy access to -//! the `status`, the `headers`, and the response body via the `Read` -//! trait. -//! -//! ## POST -//! -//! ```no_run -//! # use hyper::Client; -//! let client = Client::new(); -//! -//! let res = client.post("http://example.domain") -//! .body("foo=bar") -//! .send() -//! .unwrap(); -//! assert_eq!(res.status, hyper::Ok); -//! ``` -//! -//! # Sync -//! -//! The `Client` implements `Sync`, so you can share it among multiple threads -//! and make multiple requests simultaneously. -//! -//! ```no_run -//! # use hyper::Client; -//! use std::sync::Arc; -//! use std::thread; -//! -//! // Note: an Arc is used here because `thread::spawn` creates threads that -//! // can outlive the main thread, so we must use reference counting to keep -//! // the Client alive long enough. Scoped threads could skip the Arc. -//! let client = Arc::new(Client::new()); -//! let clone1 = client.clone(); -//! let clone2 = client.clone(); -//! thread::spawn(move || { -//! clone1.get("http://example.domain").send().unwrap(); -//! }); -//! thread::spawn(move || { -//! clone2.post("http://example.domain/post").body("foo=bar").send().unwrap(); -//! }); -//! ``` -use std::borrow::Cow; -use std::default::Default; -use std::io::{self, copy, Read}; -use std::fmt; +use std::fmt; +use std::mem; +use std::sync::Arc; use std::time::Duration; -use url::Url; -use url::ParseError as UrlError; +use futures::{Async, Future, Poll}; +use futures::future::{self, Either, Executor}; +use futures::sync::oneshot; +use http::{Method, Request, Response, Uri, Version}; +use http::header::{Entry, HeaderValue, HOST}; +use http::uri::Scheme; -use header::{Headers, Header, HeaderFormat}; -use header::{ContentLength, Host, Location}; -use method::Method; -use net::{NetworkConnector, NetworkStream, SslClient}; -use Error; +use body::{Body, Payload}; +use common::Exec; +use common::lazy as hyper_lazy; +use self::connect::{Connect, Destination}; +use self::pool::{Pool, Poolable, Reservation}; -use self::proxy::{Proxy, tunnel}; -use self::scheme::Scheme; -pub use self::pool::Pool; -pub use self::request::Request; -pub use self::response::Response; +#[cfg(feature = "runtime")] pub use self::connect::HttpConnector; -mod proxy; -pub mod pool; -pub mod request; -pub mod response; +pub mod conn; +pub mod connect; +pub(crate) mod dispatch; +#[cfg(feature = "runtime")] mod dns; +mod pool; +#[cfg(test)] +mod tests; -use http::Protocol; -use http::h1::Http11Protocol; - - -/// A Client to use additional features with Requests. -/// -/// Clients can handle things such as: redirect policy, connection pooling. -pub struct Client { - protocol: Box, - redirect_policy: RedirectPolicy, - read_timeout: Option, - write_timeout: Option, - proxy: Option<(Scheme, Cow<'static, str>, u16)> +/// A Client to make outgoing HTTP requests. +pub struct Client { + connector: Arc, + executor: Exec, + h1_writev: bool, + h1_title_case_headers: bool, + pool: Pool>, + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, } -impl fmt::Debug for Client { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Client") - .field("redirect_policy", &self.redirect_policy) - .field("read_timeout", &self.read_timeout) - .field("write_timeout", &self.write_timeout) - .field("proxy", &self.proxy) - .finish() +#[cfg(feature = "runtime")] +impl Client { + /// Create a new Client with the default config. + /// + /// # Note + /// + /// The default connector does **not** handle TLS. Speaking to `https` + /// destinations will require configuring a connector that implements TLS. + #[inline] + pub fn new() -> Client { + Builder::default().build_http() } } -impl Client { - - /// Create a new Client. - pub fn new() -> Client { - Client::with_pool_config(Default::default()) +#[cfg(feature = "runtime")] +impl Default for Client { + fn default() -> Client { + Client::new() } +} - /// Create a new Client with a configured Pool Config. - pub fn with_pool_config(config: pool::Config) -> Client { - Client::with_connector(Pool::new(config)) +impl Client<(), Body> { + /// Configure a Client. + /// + /// # Example + /// + /// ``` + /// # extern crate hyper; + /// # #[cfg(feature = "runtime")] + /// fn run () { + /// use hyper::Client; + /// + /// let client = Client::builder() + /// .keep_alive(true) + /// .build_http(); + /// # let infer: Client<_, hyper::Body> = client; + /// # drop(infer); + /// # } + /// # fn main() {} + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::default() } +} - /// Create a Client with an HTTP proxy to a (host, port). - pub fn with_http_proxy(host: H, port: u16) -> Client - where H: Into> { - let host = host.into(); - let proxy = tunnel((Scheme::Http, host.clone(), port)); - let mut client = Client::with_connector(Pool::with_connector(Default::default(), proxy)); - client.proxy = Some((Scheme::Http, host, port)); - client - } +impl Client +where C: Connect + Sync + 'static, + C::Transport: 'static, + C::Future: 'static, + B: Payload + Send + 'static, + B::Data: Send, +{ - /// Create a Client using a proxy with a custom connector and SSL client. - pub fn with_proxy_config(proxy_config: ProxyConfig) -> Client - where C: NetworkConnector + Send + Sync + 'static, - C::Stream: NetworkStream + Send + Clone, - S: SslClient + Send + Sync + 'static { - - let scheme = proxy_config.scheme; - let host = proxy_config.host; - let port = proxy_config.port; - let proxy = Proxy { - proxy: (scheme.clone(), host.clone(), port), - connector: proxy_config.connector, - ssl: proxy_config.ssl, - }; - - let mut client = match proxy_config.pool_config { - Some(pool_config) => Client::with_connector(Pool::with_connector(pool_config, proxy)), - None => Client::with_connector(proxy), - }; - client.proxy = Some((scheme, host, port)); - client - } - - /// Create a new client with a specific connector. - pub fn with_connector(connector: C) -> Client - where C: NetworkConnector + Send + Sync + 'static, S: NetworkStream + Send { - Client::with_protocol(Http11Protocol::with_connector(connector)) - } - - /// Create a new client with a specific `Protocol`. - pub fn with_protocol(protocol: P) -> Client { - Client { - protocol: Box::new(protocol), - redirect_policy: Default::default(), - read_timeout: None, - write_timeout: None, - proxy: None, + /// Send a `GET` request to the supplied `Uri`. + /// + /// # Note + /// + /// This requires that the `Payload` type have a `Default` implementation. + /// It *should* return an "empty" version of itself, such that + /// `Payload::is_end_stream` is `true`. + pub fn get(&self, uri: Uri) -> ResponseFuture + where + B: Default, + { + let body = B::default(); + if !body.is_end_stream() { + warn!("default Payload used for get() does not return true for is_end_stream"); } + + let mut req = Request::new(body); + *req.uri_mut() = uri; + self.request(req) } - /// Set the RedirectPolicy. - pub fn set_redirect_policy(&mut self, policy: RedirectPolicy) { - self.redirect_policy = policy; - } - - /// Set the read timeout value for all requests. - pub fn set_read_timeout(&mut self, dur: Option) { - self.read_timeout = dur; - } - - /// Set the write timeout value for all requests. - pub fn set_write_timeout(&mut self, dur: Option) { - self.write_timeout = dur; - } - - /// Build a Get request. - pub fn get(&self, url: U) -> RequestBuilder { - self.request(Method::Get, url) - } - - /// Build a Head request. - pub fn head(&self, url: U) -> RequestBuilder { - self.request(Method::Head, url) - } - - /// Build a Patch request. - pub fn patch(&self, url: U) -> RequestBuilder { - self.request(Method::Patch, url) - } - - /// Build a Post request. - pub fn post(&self, url: U) -> RequestBuilder { - self.request(Method::Post, url) - } - - /// Build a Put request. - pub fn put(&self, url: U) -> RequestBuilder { - self.request(Method::Put, url) - } - - /// Build a Delete request. - pub fn delete(&self, url: U) -> RequestBuilder { - self.request(Method::Delete, url) - } - - - /// Build a new request using this Client. - pub fn request(&self, method: Method, url: U) -> RequestBuilder { - RequestBuilder { - client: self, - method: method, - url: url.into_url(), - body: None, - headers: None, - } - } -} - -impl Default for Client { - fn default() -> Client { Client::new() } -} - -/// Options for an individual Request. -/// -/// One of these will be built for you if you use one of the convenience -/// methods, such as `get()`, `post()`, etc. -pub struct RequestBuilder<'a> { - client: &'a Client, - // We store a result here because it's good to keep RequestBuilder - // from being generic, but it is a nicer API to report the error - // from `send` (when other errors may be happening, so it already - // returns a `Result`). Why's it good to keep it non-generic? It - // stops downstream crates having to remonomorphise and recompile - // the code, which can take a while, since `send` is fairly large. - // (For an extreme example, a tiny crate containing - // `hyper::Client::new().get("x").send().unwrap();` took ~4s to - // compile with a generic RequestBuilder, but 2s with this scheme,) - url: Result, - headers: Option, - method: Method, - body: Option>, -} - -impl<'a> RequestBuilder<'a> { - - /// Set a request body to be sent. - pub fn body>>(mut self, body: B) -> RequestBuilder<'a> { - self.body = Some(body.into()); - self - } - - /// Add additional headers to the request. - pub fn headers(mut self, headers: Headers) -> RequestBuilder<'a> { - self.headers = Some(headers); - self - } - - /// Add an individual new header to the request. - pub fn header(mut self, header: H) -> RequestBuilder<'a> { - { - let headers = match self.headers { - Some(ref mut h) => h, - None => { - self.headers = Some(Headers::new()); - self.headers.as_mut().unwrap() - } - }; - - headers.set(header); - } - self - } - - /// Execute this request and receive a Response back. - pub fn send(self) -> ::Result { - let RequestBuilder { client, method, url, headers, body } = self; - let mut url = try!(url); - trace!("send method={:?}, url={:?}, client={:?}", method, url, client); - - let can_have_body = match method { - Method::Get | Method::Head => false, - _ => true + /// Send a constructed Request using this Client. + pub fn request(&self, mut req: Request) -> ResponseFuture { + let is_http_11 = self.ver == Ver::Http1 && match req.version() { + Version::HTTP_11 => true, + Version::HTTP_10 => false, + other => { + error!("Request has unsupported version \"{:?}\"", other); + return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_version()))); + } }; - let mut body = if can_have_body { - body - } else { - None - }; + let is_http_connect = req.method() == &Method::CONNECT; - loop { - let mut req = { - let (host, port) = try!(get_host_and_port(&url)); - let mut message = try!(client.protocol.new_message(&host, port, url.scheme())); - if url.scheme() == "http" && client.proxy.is_some() { - message.set_proxied(true); - } + if !is_http_11 && is_http_connect { + debug!("client does not support CONNECT requests for {:?}", req.version()); + return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_request_method()))); + } - let mut h = Headers::new(); - h.set(Host { - hostname: host.to_owned(), - port: Some(port), - }); - if let Some(ref headers) = headers { - h.extend(headers.iter()); - } - let headers = h; - Request::with_headers_and_message(method.clone(), url.clone(), headers, message) - }; - try!(req.set_write_timeout(client.write_timeout)); - try!(req.set_read_timeout(client.read_timeout)); - - match (can_have_body, body.as_ref()) { - (true, Some(body)) => match body.size() { - Some(size) => req.headers_mut().set(ContentLength(size)), - None => (), // chunked, Request will add it automatically - }, - (true, None) => req.headers_mut().set(ContentLength(0)), - _ => () // neither + let uri = req.uri().clone(); + let domain = match (uri.scheme_part(), uri.authority_part()) { + (Some(scheme), Some(auth)) => { + format!("{}://{}", scheme, auth) } - let mut streaming = try!(req.start()); - if let Some(mut rdr) = body.take() { - try!(copy(&mut rdr, &mut streaming)); - } - let res = try!(streaming.send()); - if !res.status.is_redirection() { - return Ok(res) - } - debug!("redirect code {:?} for {}", res.status, url); - - let loc = { - // punching borrowck here - let loc = match res.headers.get::() { - Some(&Location(ref loc)) => { - Some(url.join(loc)) - } - None => { - debug!("no Location header"); - // could be 304 Not Modified? - None - } + (None, Some(auth)) if is_http_connect => { + let scheme = match auth.port() { + Some(443) => { + set_scheme(req.uri_mut(), Scheme::HTTPS); + "https" + }, + _ => { + set_scheme(req.uri_mut(), Scheme::HTTP); + "http" + }, }; - match loc { - Some(r) => r, - None => return Ok(res) - } + format!("{}://{}", scheme, auth) + }, + _ => { + debug!("Client requires absolute-form URIs, received: {:?}", uri); + return ResponseFuture::new(Box::new(future::err(::Error::new_user_absolute_uri_required()))) + } + }; + + if self.set_host && self.ver == Ver::Http1 { + if let Entry::Vacant(entry) = req.headers_mut().entry(HOST).expect("HOST is always valid header name") { + let hostname = uri.host().expect("authority implies host"); + let host = if let Some(port) = uri.port() { + let s = format!("{}:{}", hostname, port); + HeaderValue::from_str(&s) + } else { + HeaderValue::from_str(hostname) + }.expect("uri host is valid header value"); + entry.insert(host); + } + } + + + let client = self.clone(); + let uri = req.uri().clone(); + let fut = RetryableSendRequest { + client: client, + future: self.send_request(req, &domain), + domain: domain, + uri: uri, + }; + ResponseFuture::new(Box::new(fut)) + } + + //TODO: replace with `impl Future` when stable + fn send_request(&self, mut req: Request, domain: &str) -> Box, Error=ClientError> + Send> { + let url = req.uri().clone(); + let ver = self.ver; + let pool_key = (Arc::new(domain.to_string()), self.ver); + let checkout = self.pool.checkout(pool_key.clone()); + let connect = { + let executor = self.executor.clone(); + let pool = self.pool.clone(); + let h1_writev = self.h1_writev; + let h1_title_case_headers = self.h1_title_case_headers; + let connector = self.connector.clone(); + let dst = Destination { + uri: url, }; - url = match loc { - Ok(u) => u, - Err(e) => { - debug!("Location header had invalid URI: {:?}", e); - return Ok(res); + hyper_lazy(move || { + if let Some(connecting) = pool.connecting(&pool_key) { + Either::A(connector.connect(dst) + .map_err(::Error::new_connect) + .and_then(move |(io, connected)| { + conn::Builder::new() + .exec(executor.clone()) + .h1_writev(h1_writev) + .h1_title_case_headers(h1_title_case_headers) + .http2_only(pool_key.1 == Ver::Http2) + .handshake(io) + .and_then(move |(tx, conn)| { + let bg = executor.execute(conn.map_err(|e| { + debug!("client connection error: {}", e) + })); + + // This task is critical, so an execute error + // should be returned. + if let Err(err) = bg { + warn!("error spawning critical client task: {}", err); + return Either::A(future::err(err)); + } + + // Wait for 'conn' to ready up before we + // declare this tx as usable + Either::B(tx.when_ready()) + }) + .map(move |tx| { + pool.pooled(connecting, PoolClient { + is_proxied: connected.is_proxied, + tx: match ver { + Ver::Http1 => PoolTx::Http1(tx), + Ver::Http2 => PoolTx::Http2(tx.into_http2()), + }, + }) + }) + })) + } else { + let canceled = ::Error::new_canceled(Some("HTTP/2 connection in progress")); + Either::B(future::err(canceled)) + } + }) + }; + + let executor = self.executor.clone(); + // The order of the `select` is depended on below... + let race = checkout.select2(connect) + .map(move |either| match either { + // Checkout won, connect future may have been started or not. + // + // If it has, let it finish and insert back into the pool, + // so as to not waste the socket... + Either::A((checked_out, connecting)) => { + // This depends on the `select` above having the correct + // order, such that if the checkout future were ready + // immediately, the connect future will never have been + // started. + // + // If it *wasn't* ready yet, then the connect future will + // have been started... + if connecting.started() { + let bg = connecting + .map(|_pooled| { + // dropping here should just place it in + // the Pool for us... + }) + .map_err(|err| { + trace!("background connect error: {}", err); + }); + // An execute error here isn't important, we're just trying + // to prevent a waste of a socket... + let _ = executor.execute(bg); + } + checked_out + }, + // Connect won, checkout can just be dropped. + Either::B((connected, _checkout)) => { + connected + }, + }) + .or_else(|either| match either { + // Either checkout or connect could get canceled: + // + // 1. Connect is canceled if this is HTTP/2 and there is + // an outstanding HTTP/2 connecting task. + // 2. Checkout is canceled if the pool cannot deliver an + // idle connection reliably. + // + // In both cases, we should just wait for the other future. + Either::A((err, connecting)) => { + if err.is_canceled() { + Either::A(Either::A(connecting.map_err(ClientError::Normal))) + } else { + Either::B(future::err(ClientError::Normal(err))) + } + }, + Either::B((err, checkout)) => { + if err.is_canceled() { + Either::A(Either::B(checkout.map_err(ClientError::Normal))) + } else { + Either::B(future::err(ClientError::Normal(err))) + } + } + }); + + let executor = self.executor.clone(); + let resp = race.and_then(move |mut pooled| { + let conn_reused = pooled.is_reused(); + if ver == Ver::Http1 { + // CONNECT always sends origin-form, so check it first... + if req.method() == &Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + }; + } else { + debug_assert!( + req.method() != &Method::CONNECT, + "Client should have returned Error for HTTP2 CONNECT" + ); + } + + let fut = pooled.send_request_retryable(req); + + // As of futures@0.1.21, there is a race condition in the mpsc + // channel, such that sending when the receiver is closing can + // result in the message being stuck inside the queue. It won't + // ever notify until the Sender side is dropped. + // + // To counteract this, we must check if our senders 'want' channel + // has been closed after having tried to send. If so, error out... + if pooled.is_closed() { + drop(pooled); + let fut = fut + .map_err(move |(err, orig_req)| { + if let Some(req) = orig_req { + ClientError::Canceled { + connection_reused: conn_reused, + reason: err, + req, + } + } else { + ClientError::Normal(err) + } + }); + Either::A(fut) + } else { + let fut = fut + .map_err(move |(err, orig_req)| { + if let Some(req) = orig_req { + ClientError::Canceled { + connection_reused: conn_reused, + reason: err, + req, + } + } else { + ClientError::Normal(err) + } + }) + .and_then(move |mut res| { + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if ver == Ver::Http2 || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else if !res.body().is_end_stream() { + let (delayed_tx, delayed_rx) = oneshot::channel(); + res.body_mut().delayed_eof(delayed_rx); + let on_idle = future::poll_fn(move || { + pooled.poll_ready() + }) + .then(move |_| { + // At this point, `pooled` is dropped, and had a chance + // to insert into the pool (if conn was idle) + drop(delayed_tx); + Ok(()) + }); + + if let Err(err) = executor.execute(on_idle) { + // This task isn't critical, so just log and ignore. + warn!("error spawning task to insert idle connection: {}", err); + } + } else { + // There's no body to delay, but the connection isn't + // ready yet. Only re-insert when it's ready + let on_idle = future::poll_fn(move || { + pooled.poll_ready() + }) + .then(|_| Ok(())); + + if let Err(err) = executor.execute(on_idle) { + // This task isn't critical, so just log and ignore. + warn!("error spawning task to insert idle connection: {}", err); + } + } + Ok(res) + }); + Either::B(fut) + } + }); + + Box::new(resp) + } +} + +impl Clone for Client { + fn clone(&self) -> Client { + Client { + connector: self.connector.clone(), + executor: self.executor.clone(), + h1_writev: self.h1_writev, + h1_title_case_headers: self.h1_title_case_headers, + pool: self.pool.clone(), + retry_canceled_requests: self.retry_canceled_requests, + set_host: self.set_host, + ver: self.ver, + } + } +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Client") + .finish() + } +} + +/// A `Future` that will resolve to an HTTP Response. +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: Box, Error=::Error> + Send>, +} + +impl ResponseFuture { + fn new(fut: Box, Error=::Error> + Send>) -> Self { + Self { + inner: fut, + } + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Future") + } +} + +impl Future for ResponseFuture { + type Item = Response; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + self.inner.poll() + } +} + +struct RetryableSendRequest { + client: Client, + domain: String, + future: Box, Error=ClientError> + Send>, + uri: Uri, +} + +impl Future for RetryableSendRequest +where + C: Connect + 'static, + C::Future: 'static, + B: Payload + Send + 'static, + B::Data: Send, +{ + type Item = Response; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + match self.future.poll() { + Ok(Async::Ready(resp)) => return Ok(Async::Ready(resp)), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(ClientError::Normal(err)) => return Err(err), + Err(ClientError::Canceled { + connection_reused, + mut req, + reason, + }) => { + if !self.client.retry_canceled_requests || !connection_reused { + // if client disabled, don't retry + // a fresh connection means we definitely can't retry + return Err(reason); + } + + trace!("unstarted request canceled, trying again (reason={:?})", reason); + *req.uri_mut() = self.uri.clone(); + self.future = self.client.send_request(req, &self.domain); } - }; - match client.redirect_policy { - // separate branches because they can't be one - RedirectPolicy::FollowAll => (), //continue - RedirectPolicy::FollowIf(cond) if cond(&url) => (), //continue - _ => return Ok(res), } } } } -/// An enum of possible body types for a Request. -pub enum Body<'a> { - /// A Reader does not necessarily know it's size, so it is chunked. - ChunkedBody(&'a mut (Read + 'a)), - /// For Readers that can know their size, like a `File`. - SizedBody(&'a mut (Read + 'a), u64), - /// A String has a size, and uses Content-Length. - BufBody(&'a [u8] , usize), +struct PoolClient { + is_proxied: bool, + tx: PoolTx, } -impl<'a> Body<'a> { - fn size(&self) -> Option { - match *self { - Body::SizedBody(_, len) => Some(len), - Body::BufBody(_, len) => Some(len as u64), - _ => None +enum PoolTx { + Http1(conn::SendRequest), + Http2(conn::Http2SendRequest), +} + +impl PoolClient { + fn poll_ready(&mut self) -> Poll<(), ::Error> { + match self.tx { + PoolTx::Http1(ref mut tx) => tx.poll_ready(), + PoolTx::Http2(_) => Ok(Async::Ready(())), + } + } + + fn is_ready(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn is_closed(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_closed(), + PoolTx::Http2(ref tx) => tx.is_closed(), } } } -impl<'a> Read for Body<'a> { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match *self { - Body::ChunkedBody(ref mut r) => r.read(buf), - Body::SizedBody(ref mut r, _) => r.read(buf), - Body::BufBody(ref mut r, _) => Read::read(r, buf), +impl PoolClient { + //TODO: replace with `impl Future` when stable + fn send_request_retryable(&mut self, req: Request) -> Box, Error=(::Error, Option>)> + Send> + where + B: Send, + { + match self.tx { + PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), + PoolTx::Http2(ref mut tx) => tx.send_request_retryable(req), } } } -impl<'a> Into> for &'a [u8] { - #[inline] - fn into(self) -> Body<'a> { - Body::BufBody(self, self.len()) - } -} - -impl<'a> Into> for &'a str { - #[inline] - fn into(self) -> Body<'a> { - self.as_bytes().into() - } -} - -impl<'a> Into> for &'a String { - #[inline] - fn into(self) -> Body<'a> { - self.as_bytes().into() - } -} - -impl<'a, R: Read> From<&'a mut R> for Body<'a> { - #[inline] - fn from(r: &'a mut R) -> Body<'a> { - Body::ChunkedBody(r) - } -} - -/// A helper trait to convert common objects into a Url. -pub trait IntoUrl { - /// Consumes the object, trying to return a Url. - fn into_url(self) -> Result; -} - -impl IntoUrl for Url { - fn into_url(self) -> Result { - Ok(self) - } -} - -impl<'a> IntoUrl for &'a str { - fn into_url(self) -> Result { - Url::parse(self) - } -} - -impl<'a> IntoUrl for &'a String { - fn into_url(self) -> Result { - Url::parse(self) - } -} - -/// Proxy server configuration with a custom connector and TLS wrapper. -pub struct ProxyConfig -where C: NetworkConnector + Send + Sync + 'static, - C::Stream: NetworkStream + Clone + Send, - S: SslClient + Send + Sync + 'static { - scheme: Scheme, - host: Cow<'static, str>, - port: u16, - pool_config: Option, - connector: C, - ssl: S, -} - -impl ProxyConfig -where C: NetworkConnector + Send + Sync + 'static, - C::Stream: NetworkStream + Clone + Send, - S: SslClient + Send + Sync + 'static { - - /// Create a new `ProxyConfig`. - #[inline] - pub fn new>>(scheme: &str, host: H, port: u16, connector: C, ssl: S) -> ProxyConfig { - ProxyConfig { - scheme: scheme.into(), - host: host.into(), - port: port, - pool_config: Some(pool::Config::default()), - connector: connector, - ssl: ssl, +impl Poolable for PoolClient +where + B: Send + 'static, +{ + fn is_open(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + PoolTx::Http2(ref tx) => tx.is_ready(), } } - /// Change the `pool::Config` for the proxy. - /// - /// Passing `None` disables the `Pool`. - /// - /// The default is enabled, with the default `pool::Config`. - pub fn set_pool_config(&mut self, pool_config: Option) { - self.pool_config = pool_config; - } -} - -/// Behavior regarding how to handle redirects within a Client. -#[derive(Copy)] -pub enum RedirectPolicy { - /// Don't follow any redirects. - FollowNone, - /// Follow all redirects. - FollowAll, - /// Follow a redirect if the contained function returns true. - FollowIf(fn(&Url) -> bool), -} - -impl fmt::Debug for RedirectPolicy { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - RedirectPolicy::FollowNone => fmt.write_str("FollowNone"), - RedirectPolicy::FollowAll => fmt.write_str("FollowAll"), - RedirectPolicy::FollowIf(_) => fmt.write_str("FollowIf"), + fn reserve(self) -> Reservation { + match self.tx { + PoolTx::Http1(tx) => { + Reservation::Unique(PoolClient { + is_proxied: self.is_proxied, + tx: PoolTx::Http1(tx), + }) + }, + PoolTx::Http2(tx) => { + let b = PoolClient { + is_proxied: self.is_proxied, + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + is_proxied: self.is_proxied, + tx: PoolTx::Http2(tx), + }; + Reservation::Shared(a, b) + } } } } -// This is a hack because of upstream typesystem issues. -impl Clone for RedirectPolicy { - fn clone(&self) -> RedirectPolicy { - *self +enum ClientError { + Normal(::Error), + Canceled { + connection_reused: bool, + req: Request, + reason: ::Error, } } -impl Default for RedirectPolicy { - fn default() -> RedirectPolicy { - RedirectPolicy::FollowAll - } +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +enum Ver { + Http1, + Http2, } - -fn get_host_and_port(url: &Url) -> ::Result<(&str, u16)> { - let host = match url.host_str() { - Some(host) => host, - None => return Err(Error::Uri(UrlError::EmptyHost)), +fn origin_form(uri: &mut Uri) { + let path = match uri.path_and_query() { + Some(path) if path.as_str() != "/" => { + let mut parts = ::http::uri::Parts::default(); + parts.path_and_query = Some(path.clone()); + Uri::from_parts(parts).expect("path is valid uri") + }, + _none_or_just_slash => { + debug_assert!(Uri::default() == "/"); + Uri::default() + } }; - trace!("host={:?}", host); - let port = match url.port_or_known_default() { - Some(port) => port, - None => return Err(Error::Uri(UrlError::InvalidPort)), - }; - trace!("port={:?}", port); - Ok((host, port)) + *uri = path } -mod scheme { - - #[derive(Clone, PartialEq, Eq, Debug, Hash)] - pub enum Scheme { - Http, - Https, - Other(String), +fn absolute_form(uri: &mut Uri) { + debug_assert!(uri.scheme_part().is_some(), "absolute_form needs a scheme"); + debug_assert!(uri.authority_part().is_some(), "absolute_form needs an authority"); + // If the URI is to HTTPS, and the connector claimed to be a proxy, + // then it *should* have tunneled, and so we don't want to send + // absolute-form in that case. + if uri.scheme_part() == Some(&Scheme::HTTPS) { + origin_form(uri); } +} - impl<'a> From<&'a str> for Scheme { - fn from(s: &'a str) -> Scheme { - match s { - "http" => Scheme::Http, - "https" => Scheme::Https, - s => Scheme::Other(String::from(s)), +fn authority_form(uri: &mut Uri) { + if log_enabled!(::log::Level::Warn) { + if let Some(path) = uri.path_and_query() { + // `https://hyper.rs` would parse with `/` path, don't + // annoy people about that... + if path != "/" { + warn!( + "HTTP/1.1 CONNECT request stripping path: {:?}", + path + ); } } } + *uri = match uri.authority_part() { + Some(auth) => { + let mut parts = ::http::uri::Parts::default(); + parts.authority = Some(auth.clone()); + Uri::from_parts(parts).expect("authority is valid") + }, + None => { + unreachable!("authority_form with relative uri"); + } + }; +} - impl AsRef for Scheme { - fn as_ref(&self) -> &str { - match *self { - Scheme::Http => "http", - Scheme::Https => "https", - Scheme::Other(ref s) => s, - } +fn set_scheme(uri: &mut Uri, scheme: Scheme) { + debug_assert!(uri.scheme_part().is_none(), "set_scheme expects no existing scheme"); + let old = mem::replace(uri, Uri::default()); + let mut parts: ::http::uri::Parts = old.into(); + parts.scheme = Some(scheme); + parts.path_and_query = Some("/".parse().expect("slash is a valid path")); + *uri = Uri::from_parts(parts).expect("scheme is valid"); +} + +/// Builder for a Client +#[derive(Clone)] +pub struct Builder { + //connect_timeout: Duration, + exec: Exec, + keep_alive: bool, + keep_alive_timeout: Option, + h1_writev: bool, + h1_title_case_headers: bool, + //TODO: make use of max_idle config + max_idle: usize, + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, +} + +impl Default for Builder { + fn default() -> Self { + Self { + exec: Exec::Default, + keep_alive: true, + keep_alive_timeout: Some(Duration::from_secs(90)), + h1_writev: true, + h1_title_case_headers: false, + max_idle: 5, + retry_canceled_requests: true, + set_host: true, + ver: Ver::Http1, } } +} +impl Builder { + /// Enable or disable keep-alive mechanics. + /// + /// Default is enabled. + #[inline] + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.keep_alive = val; + self + } + + /// Set an optional timeout for idle sockets being kept-alive. + /// + /// Pass `None` to disable timeout. + /// + /// Default is 90 seconds. + #[inline] + pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.keep_alive_timeout = val.into(); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Default is `true`. + #[inline] + pub fn http1_writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = val; + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.h1_title_case_headers = val; + self + } + + /// Set whether the connection **must** use HTTP/2. + /// + /// Note that setting this to true prevents HTTP/1 from being allowed. + /// + /// Default is false. + pub fn http2_only(&mut self, val: bool) -> &mut Self { + self.ver = if val { + Ver::Http2 + } else { + Ver::Http1 + }; + self + } + + /// Set whether to retry requests that get disrupted before ever starting + /// to write. + /// + /// This means a request that is queued, and gets given an idle, reused + /// connection, and then encounters an error immediately as the idle + /// connection was found to be unusable. + /// + /// When this is set to `false`, the related `ResponseFuture` would instead + /// resolve to an `Error::Cancel`. + /// + /// Default is `true`. + #[inline] + pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { + self.retry_canceled_requests = val; + self + } + + /// Set whether to automatically add the `Host` header to requests. + /// + /// If true, and a request does not include a `Host` header, one will be + /// added automatically, derived from the authority of the `Uri`. + /// + /// Default is `true`. + #[inline] + pub fn set_host(&mut self, val: bool) -> &mut Self { + self.set_host = val; + self + } + + /// Provide an executor to execute background `Connection` tasks. + pub fn executor(&mut self, exec: E) -> &mut Self + where + E: Executor + Send>> + Send + Sync + 'static, + { + self.exec = Exec::Executor(Arc::new(exec)); + self + } + + /// Builder a client with this configuration and the default `HttpConnector`. + #[cfg(feature = "runtime")] + pub fn build_http(&self) -> Client + where + B: Payload + Send, + B::Data: Send, + { + let mut connector = HttpConnector::new(4); + if self.keep_alive { + connector.set_keepalive(self.keep_alive_timeout); + } + self.build(connector) + } + + /// Combine the configuration of this builder with a connector to create a `Client`. + pub fn build(&self, connector: C) -> Client + where + C: Connect, + C::Transport: 'static, + C::Future: 'static, + B: Payload + Send, + B::Data: Send, + { + Client { + connector: Arc::new(connector), + executor: self.exec.clone(), + h1_writev: self.h1_writev, + h1_title_case_headers: self.h1_title_case_headers, + pool: Pool::new(self.keep_alive, self.keep_alive_timeout, &self.exec), + retry_canceled_requests: self.retry_canceled_requests, + set_host: self.set_host, + ver: self.ver, + } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Builder") + .field("keep_alive", &self.keep_alive) + .field("keep_alive_timeout", &self.keep_alive_timeout) + .field("http1_writev", &self.h1_writev) + .field("max_idle", &self.max_idle) + .field("set_host", &self.set_host) + .field("version", &self.ver) + .finish() + } } #[cfg(test)] -mod tests { - use std::io::Read; - use header::Server; - use http::h1::Http11Message; - use mock::{MockStream, MockSsl}; - use super::{Client, RedirectPolicy}; - use super::scheme::Scheme; - use super::proxy::Proxy; - use super::pool::Pool; - use url::Url; - - mock_connector!(MockRedirectPolicy { - "http://127.0.0.1" => "HTTP/1.1 301 Redirect\r\n\ - Location: http://127.0.0.2\r\n\ - Server: mock1\r\n\ - \r\n\ - " - "http://127.0.0.2" => "HTTP/1.1 302 Found\r\n\ - Location: https://127.0.0.3\r\n\ - Server: mock2\r\n\ - \r\n\ - " - "https://127.0.0.3" => "HTTP/1.1 200 OK\r\n\ - Server: mock3\r\n\ - \r\n\ - " - }); - +mod unit_tests { + use super::*; #[test] - fn test_proxy() { - use super::pool::PooledStream; - type MessageStream = PooledStream>; - mock_connector!(ProxyConnector { - b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n" - }); - let tunnel = Proxy { - connector: ProxyConnector, - proxy: (Scheme::Http, "example.proxy".into(), 8008), - ssl: MockSsl, - }; - let mut client = Client::with_connector(Pool::with_connector(Default::default(), tunnel)); - client.proxy = Some((Scheme::Http, "example.proxy".into(), 8008)); - let mut dump = vec![]; - client.get("http://127.0.0.1/foo/bar").send().unwrap().read_to_end(&mut dump).unwrap(); - - let box_message = client.protocol.new_message("127.0.0.1", 80, "http").unwrap(); - let message = box_message.downcast::().unwrap(); - let stream = message.into_inner().downcast::().unwrap().into_inner().into_normal().unwrap(); - - let s = ::std::str::from_utf8(&stream.write).unwrap(); - let request_line = "GET http://127.0.0.1/foo/bar HTTP/1.1\r\n"; - assert!(s.starts_with(request_line), "{:?} doesn't start with {:?}", s, request_line); - assert!(s.contains("Host: 127.0.0.1\r\n")); + fn set_relative_uri_with_implicit_path() { + let mut uri = "http://hyper.rs".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/"); } #[test] - fn test_proxy_tunnel() { - use super::pool::PooledStream; - type MessageStream = PooledStream>; + fn test_origin_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); - mock_connector!(ProxyConnector { - b"HTTP/1.1 200 OK\r\n\r\n", - b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n" - }); - let tunnel = Proxy { - connector: ProxyConnector, - proxy: (Scheme::Http, "example.proxy".into(), 8008), - ssl: MockSsl, - }; - let mut client = Client::with_connector(Pool::with_connector(Default::default(), tunnel)); - client.proxy = Some((Scheme::Http, "example.proxy".into(), 8008)); - let mut dump = vec![]; - client.get("https://127.0.0.1/foo/bar").send().unwrap().read_to_end(&mut dump).unwrap(); - - let box_message = client.protocol.new_message("127.0.0.1", 443, "https").unwrap(); - let message = box_message.downcast::().unwrap(); - let stream = message.into_inner().downcast::().unwrap().into_inner().into_tunneled().unwrap(); - - let s = ::std::str::from_utf8(&stream.write).unwrap(); - let connect_line = "CONNECT 127.0.0.1:443 HTTP/1.1\r\nHost: 127.0.0.1:443\r\n\r\n"; - assert_eq!(&s[..connect_line.len()], connect_line); - - let s = &s[connect_line.len()..]; - let request_line = "GET /foo/bar HTTP/1.1\r\n"; - assert_eq!(&s[..request_line.len()], request_line); - assert!(s.contains("Host: 127.0.0.1\r\n")); + let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides?foo=bar"); } #[test] - fn test_redirect_followall() { - let mut client = Client::with_connector(MockRedirectPolicy); - client.set_redirect_policy(RedirectPolicy::FollowAll); + fn test_absolute_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "http://hyper.rs/guides"); - let res = client.get("http://127.0.0.1").send().unwrap(); - assert_eq!(res.headers.get(), Some(&Server("mock3".to_owned()))); + let mut uri = "https://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); } #[test] - fn test_redirect_dontfollow() { - let mut client = Client::with_connector(MockRedirectPolicy); - client.set_redirect_policy(RedirectPolicy::FollowNone); - let res = client.get("http://127.0.0.1").send().unwrap(); - assert_eq!(res.headers.get(), Some(&Server("mock1".to_owned()))); - } + fn test_authority_form() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); - #[test] - fn test_redirect_followif() { - fn follow_if(url: &Url) -> bool { - !url.as_str().contains("127.0.0.3") - } - let mut client = Client::with_connector(MockRedirectPolicy); - client.set_redirect_policy(RedirectPolicy::FollowIf(follow_if)); - let res = client.get("http://127.0.0.1").send().unwrap(); - assert_eq!(res.headers.get(), Some(&Server("mock2".to_owned()))); - } + let mut uri = "http://hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); - mock_connector!(Issue640Connector { - b"HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\n", - b"GET", - b"HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\n", - b"HEAD", - b"HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\n", - b"POST" - }); - - // see issue #640 - #[test] - fn test_head_response_body_keep_alive() { - let client = Client::with_connector(Pool::with_connector(Default::default(), Issue640Connector)); - - let mut s = String::new(); - client.get("http://127.0.0.1").send().unwrap().read_to_string(&mut s).unwrap(); - assert_eq!(s, "GET"); - - let mut s = String::new(); - client.head("http://127.0.0.1").send().unwrap().read_to_string(&mut s).unwrap(); - assert_eq!(s, ""); - - let mut s = String::new(); - client.post("http://127.0.0.1").send().unwrap().read_to_string(&mut s).unwrap(); - assert_eq!(s, "POST"); - } - - #[test] - fn test_request_body_error_is_returned() { - mock_connector!(Connector { - b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\n", - b"HELLO" - }); - - struct BadBody; - - impl ::std::io::Read for BadBody { - fn read(&mut self, _buf: &mut [u8]) -> ::std::io::Result { - Err(::std::io::Error::new(::std::io::ErrorKind::Other, "BadBody read")) - } - } - - let client = Client::with_connector(Connector); - let err = client.post("http://127.0.0.1").body(&mut BadBody).send().unwrap_err(); - assert_eq!(err.to_string(), "BadBody read"); + let mut uri = "hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); } } diff --git a/third_party/rust/hyper/src/client/pool.rs b/third_party/rust/hyper/src/client/pool.rs index fcffba080195..d4c4e23d70f6 100644 --- a/third_party/rust/hyper/src/client/pool.rs +++ b/third_party/rust/hyper/src/client/pool.rs @@ -1,499 +1,930 @@ -//! Client Connection Pooling -use std::borrow::ToOwned; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet, VecDeque}; use std::fmt; -use std::io::{self, Read, Write}; -use std::net::{SocketAddr, Shutdown}; -use std::sync::{Arc, Mutex}; -use std::sync::atomic::{AtomicBool, Ordering}; - +use std::ops::{Deref, DerefMut}; +use std::sync::{Arc, Mutex, Weak}; use std::time::{Duration, Instant}; -use net::{NetworkConnector, NetworkStream, DefaultConnector}; -use client::scheme::Scheme; +use futures::{Future, Async, Poll}; +use futures::sync::oneshot; +#[cfg(feature = "runtime")] +use tokio_timer::Interval; -use self::stale::{StaleCheck, Stale}; +use common::Exec; +use super::Ver; -/// The `NetworkConnector` that behaves as a connection pool used by hyper's `Client`. -pub struct Pool { - connector: C, - inner: Arc::Stream>>>, - stale_check: Option>, +pub(super) struct Pool { + inner: Arc>, } -/// Config options for the `Pool`. -#[derive(Debug)] -pub struct Config { - /// The maximum idle connections *per host*. - pub max_idle: usize, +// Before using a pooled connection, make sure the sender is not dead. +// +// This is a trait to allow the `client::pool::tests` to work for `i32`. +// +// See https://github.com/hyperium/hyper/issues/1429 +pub(super) trait Poolable: Send + Sized + 'static { + fn is_open(&self) -> bool; + /// Reserve this connection. + /// + /// Allows for HTTP/2 to return a shared reservation. + fn reserve(self) -> Reservation; } -impl Default for Config { - #[inline] - fn default() -> Config { - Config { - max_idle: 5, - } - } +/// When checking out a pooled connection, it might be that the connection +/// only supports a single reservation, or it might be usable for many. +/// +/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be +/// used for multiple requests. +pub(super) enum Reservation { + /// This connection could be used multiple times, the first one will be + /// reinserted into the `idle` pool, and the second will be given to + /// the `Checkout`. + Shared(T, T), + /// This connection requires unique access. It will be returned after + /// use is complete. + Unique(T), } -// Because `Config` has all its properties public, it would be a breaking -// change to add new ones. Sigh. -#[derive(Debug)] -struct Config2 { - idle_timeout: Option, - max_idle: usize, +/// Simple type alias in case the key type needs to be adjusted. +type Key = (Arc, Ver); + +struct PoolInner { + connections: Mutex>, + enabled: bool, } - -#[derive(Debug)] -struct PoolImpl { - conns: HashMap>>, - config: Config2, +struct Connections { + // A flag that a connection is being estabilished, and the connection + // should be shared. This prevents making multiple HTTP/2 connections + // to the same host. + connecting: HashSet, + // These are internal Conns sitting in the event loop in the KeepAlive + // state, waiting to receive a new Request to send on the socket. + idle: HashMap>>, + // These are outstanding Checkouts that are waiting for a socket to be + // able to send a Request one. This is used when "racing" for a new + // connection. + // + // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait + // for the Pool to receive an idle Conn. When a Conn becomes idle, + // this list is checked for any parked Checkouts, and tries to notify + // them that the Conn could be used instead of waiting for a brand new + // connection. + waiters: HashMap>>, + // A oneshot channel is used to allow the interval to be notified when + // the Pool completely drops. That way, the interval can cancel immediately. + #[cfg(feature = "runtime")] + idle_interval_ref: Option>, + #[cfg(feature = "runtime")] + exec: Exec, + timeout: Option, } -type Key = (String, u16, Scheme); +// This is because `Weak::new()` *allocates* space for `T`, even if it +// doesn't need it! +struct WeakOpt(Option>); -fn key>(host: &str, port: u16, scheme: T) -> Key { - (host.to_owned(), port, scheme.into()) -} - -impl Pool { - /// Creates a `Pool` with a `DefaultConnector`. - #[inline] - pub fn new(config: Config) -> Pool { - Pool::with_connector(config, DefaultConnector::default()) - } -} - -impl Pool { - /// Creates a `Pool` with a specified `NetworkConnector`. - #[inline] - pub fn with_connector(config: Config, connector: C) -> Pool { +impl Pool { + pub fn new(enabled: bool, timeout: Option, __exec: &Exec) -> Pool { Pool { - connector: connector, - inner: Arc::new(Mutex::new(PoolImpl { - conns: HashMap::new(), - config: Config2 { - idle_timeout: None, - max_idle: config.max_idle, - } - })), - stale_check: None, + inner: Arc::new(PoolInner { + connections: Mutex::new(Connections { + connecting: HashSet::new(), + idle: HashMap::new(), + #[cfg(feature = "runtime")] + idle_interval_ref: None, + waiters: HashMap::new(), + #[cfg(feature = "runtime")] + exec: __exec.clone(), + timeout, + }), + enabled, + }), } } - /// Set a duration for how long an idle connection is still valid. - pub fn set_idle_timeout(&mut self, timeout: Option) { - self.inner.lock().unwrap().config.idle_timeout = timeout; - } - - pub fn set_stale_check(&mut self, callback: F) - where F: Fn(StaleCheck) -> Stale + Send + Sync + 'static { - self.stale_check = Some(Box::new(callback)); - } - - /// Clear all idle connections from the Pool, closing them. - #[inline] - pub fn clear_idle(&mut self) { - self.inner.lock().unwrap().conns.clear(); - } - - // private - - fn checkout(&self, key: &Key) -> Option> { - while let Some(mut inner) = self.lookup(key) { - if let Some(ref stale_check) = self.stale_check { - let dur = inner.idle.expect("idle is never missing inside pool").elapsed(); - let arg = stale::check(&mut inner.stream, dur); - if stale_check(arg).is_stale() { - trace!("ejecting stale connection"); - continue; - } - } - return Some(inner); - } - None - } - - - fn lookup(&self, key: &Key) -> Option> { - let mut locked = self.inner.lock().unwrap(); - let mut should_remove = false; - let deadline = locked.config.idle_timeout.map(|dur| Instant::now() - dur); - let inner = locked.conns.get_mut(key).and_then(|vec| { - while let Some(inner) = vec.pop() { - should_remove = vec.is_empty(); - if let Some(deadline) = deadline { - if inner.idle.expect("idle is never missing inside pool") < deadline { - trace!("ejecting expired connection"); - continue; - } - } - return Some(inner); - } - None - }); - if should_remove { - locked.conns.remove(key); - } - inner - } -} - -impl PoolImpl { - fn reuse(&mut self, key: Key, conn: PooledStreamInner) { - trace!("reuse {:?}", key); - let conns = self.conns.entry(key).or_insert(vec![]); - if conns.len() < self.config.max_idle { - conns.push(conn); - } - } -} - -impl, S: NetworkStream + Send> NetworkConnector for Pool { - type Stream = PooledStream; - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result> { - let key = key(host, port, scheme); - let inner = match self.checkout(&key) { - Some(inner) => { - trace!("Pool had connection, using"); - inner - }, - None => PooledStreamInner { - key: key.clone(), - idle: None, - stream: try!(self.connector.connect(host, port, scheme)), - previous_response_expected_no_content: false, - } - - }; - Ok(PooledStream { - has_read: false, - inner: Some(inner), - is_closed: AtomicBool::new(false), - pool: self.inner.clone(), - }) - } -} - -type StaleCallback = Box) -> Stale + Send + Sync + 'static>; - -// private on purpose -// -// Yes, I know! Shame on me! This hurts docs! And it means it only -// works with closures! I know! -// -// The thing is, this is experiemental. I'm not certain about the naming. -// Or other things. So I don't really want it in the docs, yet. -// -// As for only working with closures, that's fine. A closure is probably -// enough, and if it isn't, well you can grab the stream and duration and -// pass those to a function, and then figure out whether to call stale() -// or fresh() based on the return value. -// -// Point is, it's not that bad. And it's not ready to publicize. -mod stale { - use std::time::Duration; - - pub struct StaleCheck<'a, S: 'a> { - stream: &'a mut S, - duration: Duration, - } - - #[inline] - pub fn check<'a, S: 'a>(stream: &'a mut S, dur: Duration) -> StaleCheck<'a, S> { - StaleCheck { - stream: stream, - duration: dur, - } - } - - impl<'a, S: 'a> StaleCheck<'a, S> { - pub fn stream(&mut self) -> &mut S { - self.stream - } - - pub fn idle_duration(&self) -> Duration { - self.duration - } - - pub fn stale(self) -> Stale { - Stale(true) - } - - pub fn fresh(self) -> Stale { - Stale(false) - } - } - - pub struct Stale(bool); - - - impl Stale { - #[inline] - pub fn is_stale(self) -> bool { - self.0 - } - } -} - - -/// A Stream that will try to be returned to the Pool when dropped. -pub struct PooledStream { - has_read: bool, - inner: Option>, - // mutated in &self methods - is_closed: AtomicBool, - pool: Arc>>, -} - -// manual impl to add the 'static bound for 1.7 compat -impl fmt::Debug for PooledStream where S: fmt::Debug + 'static { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("PooledStream") - .field("inner", &self.inner) - .field("has_read", &self.has_read) - .field("is_closed", &self.is_closed.load(Ordering::Relaxed)) - .field("pool", &self.pool) - .finish() - } -} - -impl PooledStream { - /// Take the wrapped stream out of the pool completely. - pub fn into_inner(mut self) -> S { - self.inner.take().expect("PooledStream lost its inner stream").stream - } - - /// Gets a borrowed reference to the underlying stream. - pub fn get_ref(&self) -> &S { - &self.inner.as_ref().expect("PooledStream lost its inner stream").stream - } - #[cfg(test)] - fn get_mut(&mut self) -> &mut S { - &mut self.inner.as_mut().expect("PooledStream lost its inner stream").stream + pub(super) fn no_timer(&self) { + // Prevent an actual interval from being created for this pool... + #[cfg(feature = "runtime")] + { + let mut inner = self.inner.connections.lock().unwrap(); + assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); + let (tx, _) = oneshot::channel(); + inner.idle_interval_ref = Some(tx); + } } } -#[derive(Debug)] -struct PooledStreamInner { - key: Key, - idle: Option, - stream: S, - previous_response_expected_no_content: bool, -} +impl Pool { + /// Returns a `Checkout` which is a future that resolves if an idle + /// connection becomes available. + pub fn checkout(&self, key: Key) -> Checkout { + Checkout { + key, + pool: self.clone(), + waiter: None, + } + } -impl Read for PooledStream { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let inner = self.inner.as_mut().unwrap(); - let n = try!(inner.stream.read(buf)); - if n == 0 { - // if the wrapped stream returns EOF (Ok(0)), that means the - // server has closed the stream. we must be sure this stream - // is dropped and not put back into the pool. - self.is_closed.store(true, Ordering::Relaxed); - - // if the stream has never read bytes before, then the pooled - // stream may have been disconnected by the server while - // we checked it back out - if !self.has_read && inner.idle.is_some() { - // idle being some means this is a reused stream - Err(io::Error::new( - io::ErrorKind::ConnectionAborted, - "Pooled stream disconnected" - )) + /// Ensure that there is only ever 1 connecting task for HTTP/2 + /// connections. This does nothing for HTTP/1. + pub(super) fn connecting(&self, key: &Key) -> Option> { + if key.1 == Ver::Http2 && self.inner.enabled { + let mut inner = self.inner.connections.lock().unwrap(); + if inner.connecting.insert(key.clone()) { + let connecting = Connecting { + key: key.clone(), + pool: WeakOpt::downgrade(&self.inner), + }; + Some(connecting) } else { - Ok(0) + trace!("HTTP/2 connecting already in progress for {:?}", key.0); + None } } else { - self.has_read = true; - Ok(n) + Some(Connecting { + key: key.clone(), + // in HTTP/1's case, there is never a lock, so we don't + // need to do anything in Drop. + pool: WeakOpt::none(), + }) } } -} -impl Write for PooledStream { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.as_mut().unwrap().stream.write(buf) + #[cfg(feature = "runtime")] + #[cfg(test)] + pub(super) fn h1_key(&self, s: &str) -> Key { + (Arc::new(s.to_string()), Ver::Http1) } - #[inline] - fn flush(&mut self) -> io::Result<()> { - self.inner.as_mut().unwrap().stream.flush() + #[cfg(feature = "runtime")] + #[cfg(test)] + pub(super) fn idle_count(&self, key: &Key) -> usize { + self + .inner + .connections + .lock() + .unwrap() + .idle + .get(key) + .map(|list| list.len()) + .unwrap_or(0) + } + + fn take(&self, key: &Key) -> Option> { + let entry = { + let mut inner = self.inner.connections.lock().unwrap(); + let expiration = Expiration::new(inner.timeout); + let maybe_entry = inner.idle.get_mut(key) + .and_then(|list| { + trace!("take? {:?}: expiration = {:?}", key, expiration.0); + // A block to end the mutable borrow on list, + // so the map below can check is_empty() + { + let popper = IdlePopper { + key, + list, + }; + popper.pop(&expiration) + } + .map(|e| (e, list.is_empty())) + }); + + let (entry, empty) = if let Some((e, empty)) = maybe_entry { + (Some(e), empty) + } else { + // No entry found means nuke the list for sure. + (None, true) + }; + if empty { + //TODO: This could be done with the HashMap::entry API instead. + inner.idle.remove(key); + } + entry + }; + + entry.map(|e| self.reuse(key, e.value)) + } + + pub(super) fn pooled(&self, mut connecting: Connecting, value: T) -> Pooled { + let (value, pool_ref) = if self.inner.enabled { + match value.reserve() { + Reservation::Shared(to_insert, to_return) => { + debug_assert_eq!( + connecting.key.1, + Ver::Http2, + "shared reservation without Http2" + ); + let mut inner = self.inner.connections.lock().unwrap(); + inner.put(connecting.key.clone(), to_insert, &self.inner); + // Do this here instead of Drop for Connecting because we + // already have a lock, no need to lock the mutex twice. + inner.connected(&connecting.key); + // prevent the Drop of Connecting from repeating inner.connected() + connecting.pool = WeakOpt::none(); + + // Shared reservations don't need a reference to the pool, + // since the pool always keeps a copy. + (to_return, WeakOpt::none()) + }, + Reservation::Unique(value) => { + // Unique reservations must take a reference to the pool + // since they hope to reinsert once the reservation is + // completed + (value, WeakOpt::downgrade(&self.inner)) + }, + } + } else { + // If pool is not enabled, skip all the things... + + // The Connecting should have had no pool ref + debug_assert!(connecting.pool.upgrade().is_none()); + + (value, WeakOpt::none()) + }; + Pooled { + key: connecting.key.clone(), + is_reused: false, + pool: pool_ref, + value: Some(value) + } + } + + fn reuse(&self, key: &Key, value: T) -> Pooled { + debug!("reuse idle connection for {:?}", key); + // TODO: unhack this + // In Pool::pooled(), which is used for inserting brand new connections, + // there's some code that adjusts the pool reference taken depending + // on if the Reservation can be shared or is unique. By the time + // reuse() is called, the reservation has already been made, and + // we just have the final value, without knowledge of if this is + // unique or shared. So, the hack is to just assume Ver::Http2 means + // shared... :( + let pool_ref = if key.1 == Ver::Http2 { + WeakOpt::none() + } else { + WeakOpt::downgrade(&self.inner) + }; + + Pooled { + is_reused: true, + key: key.clone(), + pool: pool_ref, + value: Some(value), + } + } + + fn waiter(&mut self, key: Key, tx: oneshot::Sender) { + trace!("checkout waiting for idle connection: {:?}", key); + self.inner.connections.lock().unwrap() + .waiters.entry(key) + .or_insert(VecDeque::new()) + .push_back(tx); } } -impl NetworkStream for PooledStream { - #[inline] - fn peer_addr(&mut self) -> io::Result { - self.inner.as_mut().unwrap().stream.peer_addr() - .map_err(|e| { - self.is_closed.store(true, Ordering::Relaxed); - e - }) - } - - #[inline] - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.inner.as_ref().unwrap().stream.set_read_timeout(dur) - .map_err(|e| { - self.is_closed.store(true, Ordering::Relaxed); - e - }) - } - - #[inline] - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.inner.as_ref().unwrap().stream.set_write_timeout(dur) - .map_err(|e| { - self.is_closed.store(true, Ordering::Relaxed); - e - }) - } - - #[inline] - fn close(&mut self, how: Shutdown) -> io::Result<()> { - self.is_closed.store(true, Ordering::Relaxed); - self.inner.as_mut().unwrap().stream.close(how) - } - - #[inline] - fn set_previous_response_expected_no_content(&mut self, expected: bool) { - trace!("set_previous_response_expected_no_content {}", expected); - self.inner.as_mut().unwrap().previous_response_expected_no_content = expected; - } - - #[inline] - fn previous_response_expected_no_content(&self) -> bool { - let answer = self.inner.as_ref().unwrap().previous_response_expected_no_content; - trace!("previous_response_expected_no_content {}", answer); - answer - } +/// Pop off this list, looking for a usable connection that hasn't expired. +struct IdlePopper<'a, T: 'a> { + key: &'a Key, + list: &'a mut Vec>, } -impl Drop for PooledStream { - fn drop(&mut self) { - let is_closed = self.is_closed.load(Ordering::Relaxed); - trace!("PooledStream.drop, is_closed={}", is_closed); - if !is_closed { - self.inner.take().map(|mut inner| { - let now = Instant::now(); - inner.idle = Some(now); - if let Ok(mut pool) = self.pool.lock() { - pool.reuse(inner.key.clone(), inner); +impl<'a, T: Poolable + 'a> IdlePopper<'a, T> { + fn pop(self, expiration: &Expiration) -> Option> { + while let Some(entry) = self.list.pop() { + // If the connection has been closed, or is older than our idle + // timeout, simply drop it and keep looking... + if !entry.value.is_open() { + trace!("removing closed connection for {:?}", self.key); + continue; + } + // TODO: Actually, since the `idle` list is pushed to the end always, + // that would imply that if *this* entry is expired, then anything + // "earlier" in the list would *have* to be expired also... Right? + // + // In that case, we could just break out of the loop and drop the + // whole list... + if expiration.expires(entry.idle_at) { + trace!("removing expired connection for {:?}", self.key); + continue; + } + + let value = match entry.value.reserve() { + Reservation::Shared(to_reinsert, to_checkout) => { + self.list.push(Idle { + idle_at: Instant::now(), + value: to_reinsert, + }); + to_checkout + }, + Reservation::Unique(unique) => { + unique } - // else poisoned, give up + }; + + return Some(Idle { + idle_at: entry.idle_at, + value, }); } + + None + } +} + +impl Connections { + fn put(&mut self, key: Key, value: T, __pool_ref: &Arc>) { + if key.1 == Ver::Http2 && self.idle.contains_key(&key) { + trace!("put; existing idle HTTP/2 connection for {:?}", key); + return; + } + trace!("put; add idle connection for {:?}", key); + let mut remove_waiters = false; + let mut value = Some(value); + if let Some(waiters) = self.waiters.get_mut(&key) { + while let Some(tx) = waiters.pop_front() { + if !tx.is_canceled() { + let reserved = value.take().expect("value already sent"); + let reserved = match reserved.reserve() { + Reservation::Shared(to_keep, to_send) => { + value = Some(to_keep); + to_send + }, + Reservation::Unique(uniq) => uniq, + }; + match tx.send(reserved) { + Ok(()) => { + if value.is_none() { + break; + } else { + continue; + } + }, + Err(e) => { + value = Some(e); + } + } + } + + trace!("put; removing canceled waiter for {:?}", key); + } + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(&key); + } + + match value { + Some(value) => { + debug!("pooling idle connection for {:?}", key); + self.idle.entry(key) + .or_insert(Vec::new()) + .push(Idle { + value: value, + idle_at: Instant::now(), + }); + + #[cfg(feature = "runtime")] + { + self.spawn_idle_interval(__pool_ref); + } + } + None => trace!("put; found waiter for {:?}", key), + } + } + + /// A `Connecting` task is complete. Not necessarily successfully, + /// but the lock is going away, so clean up. + fn connected(&mut self, key: &Key) { + let existed = self.connecting.remove(key); + debug_assert!( + existed, + "Connecting dropped, key not in pool.connecting" + ); + // cancel any waiters. if there are any, it's because + // this Connecting task didn't complete successfully. + // those waiters would never receive a connection. + self.waiters.remove(key); + } + + #[cfg(feature = "runtime")] + fn spawn_idle_interval(&mut self, pool_ref: &Arc>) { + let (dur, rx) = { + debug_assert!(pool_ref.enabled); + + if self.idle_interval_ref.is_some() { + return; + } + + if let Some(dur) = self.timeout { + let (tx, rx) = oneshot::channel(); + self.idle_interval_ref = Some(tx); + (dur, rx) + } else { + return + } + }; + + let start = Instant::now() + dur; + + let interval = IdleInterval { + interval: Interval::new(start, dur), + pool: WeakOpt::downgrade(pool_ref), + pool_drop_notifier: rx, + }; + + if let Err(err) = self.exec.execute(interval) { + // This task isn't critical, so simply log and ignore. + warn!("error spawning connection pool idle interval: {}", err); + } + } +} + +impl Connections { + /// Any `FutureResponse`s that were created will have made a `Checkout`, + /// and possibly inserted into the pool that it is waiting for an idle + /// connection. If a user ever dropped that future, we need to clean out + /// those parked senders. + fn clean_waiters(&mut self, key: &Key) { + let mut remove_waiters = false; + if let Some(waiters) = self.waiters.get_mut(key) { + waiters.retain(|tx| { + !tx.is_canceled() + }); + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(key); + } + } +} + +#[cfg(feature = "runtime")] +impl Connections { + /// This should *only* be called by the IdleInterval. + fn clear_expired(&mut self) { + let dur = self.timeout.expect("interval assumes timeout"); + + let now = Instant::now(); + //self.last_idle_check_at = now; + + self.idle.retain(|key, values| { + values.retain(|entry| { + if !entry.value.is_open() { + trace!("idle interval evicting closed for {:?}", key); + return false; + } + if now - entry.idle_at > dur { + trace!("idle interval evicting expired for {:?}", key); + return false; + } + + // Otherwise, keep this value... + true + }); + + // returning false evicts this key/val + !values.is_empty() + }); + } +} + +impl Clone for Pool { + fn clone(&self) -> Pool { + Pool { + inner: self.inner.clone(), + } + } +} + +/// A wrapped poolable value that tries to reinsert to the Pool on Drop. +// Note: The bounds `T: Poolable` is needed for the Drop impl. +pub(super) struct Pooled { + value: Option, + is_reused: bool, + key: Key, + pool: WeakOpt>, +} + +impl Pooled { + pub fn is_reused(&self) -> bool { + self.is_reused + } + + pub fn is_pool_enabled(&self) -> bool { + self.pool.0.is_some() + } + + fn as_ref(&self) -> &T { + self.value.as_ref().expect("not dropped") + } + + fn as_mut(&mut self) -> &mut T { + self.value.as_mut().expect("not dropped") + } +} + +impl Deref for Pooled { + type Target = T; + fn deref(&self) -> &T { + self.as_ref() + } +} + +impl DerefMut for Pooled { + fn deref_mut(&mut self) -> &mut T { + self.as_mut() + } +} + +impl Drop for Pooled { + fn drop(&mut self) { + if let Some(value) = self.value.take() { + if !value.is_open() { + // If we *already* know the connection is done here, + // it shouldn't be re-inserted back into the pool. + return; + } + + if let Some(pool) = self.pool.upgrade() { + // Pooled should not have had a real reference if pool is + // not enabled! + debug_assert!(pool.enabled); + + if let Ok(mut inner) = pool.connections.lock() { + inner.put(self.key.clone(), value, &pool); + } + } else if self.key.1 == Ver::Http1 { + trace!("pool dropped, dropping pooled ({:?})", self.key); + } + // Ver::Http2 is already in the Pool (or dead), so we wouldn't + // have an actual reference to the Pool. + } + } +} + +impl fmt::Debug for Pooled { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Pooled") + .field("key", &self.key) + .finish() + } +} + +struct Idle { + idle_at: Instant, + value: T, +} + +pub(super) struct Checkout { + key: Key, + pool: Pool, + waiter: Option>, +} + +impl Checkout { + fn poll_waiter(&mut self) -> Poll>, ::Error> { + static CANCELED: &str = "pool checkout failed"; + if let Some(mut rx) = self.waiter.take() { + match rx.poll() { + Ok(Async::Ready(value)) => { + if value.is_open() { + Ok(Async::Ready(Some(self.pool.reuse(&self.key, value)))) + } else { + Err(::Error::new_canceled(Some(CANCELED))) + } + }, + Ok(Async::NotReady) => { + self.waiter = Some(rx); + Ok(Async::NotReady) + }, + Err(_canceled) => Err(::Error::new_canceled(Some(CANCELED))), + } + } else { + Ok(Async::Ready(None)) + } + } + + fn add_waiter(&mut self) { + if self.waiter.is_none() { + let (tx, mut rx) = oneshot::channel(); + let _ = rx.poll(); // park this task + self.pool.waiter(self.key.clone(), tx); + self.waiter = Some(rx); + } + } +} + +impl Future for Checkout { + type Item = Pooled; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + if let Some(pooled) = try_ready!(self.poll_waiter()) { + return Ok(Async::Ready(pooled)); + } + + let entry = self.pool.take(&self.key); + + if let Some(pooled) = entry { + Ok(Async::Ready(pooled)) + } else { + self.add_waiter(); + Ok(Async::NotReady) + } + } +} + +impl Drop for Checkout { + fn drop(&mut self) { + if self.waiter.take().is_some() { + if let Ok(mut inner) = self.pool.inner.connections.lock() { + inner.clean_waiters(&self.key); + } + } + } +} + +pub(super) struct Connecting { + key: Key, + pool: WeakOpt>, +} + +impl Drop for Connecting { + fn drop(&mut self) { + if let Some(pool) = self.pool.upgrade() { + // No need to panic on drop, that could abort! + if let Ok(mut inner) = pool.connections.lock() { + debug_assert_eq!( + self.key.1, + Ver::Http2, + "Connecting constructed without Http2" + ); + inner.connected(&self.key); + } + } + } +} + +struct Expiration(Option); + +impl Expiration { + fn new(dur: Option) -> Expiration { + Expiration(dur) + } + + fn expires(&self, instant: Instant) -> bool { + match self.0 { + Some(timeout) => instant.elapsed() > timeout, + None => false, + } + } +} + +#[cfg(feature = "runtime")] +struct IdleInterval { + interval: Interval, + pool: WeakOpt>, + // This allows the IdleInterval to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + pool_drop_notifier: oneshot::Receiver<::common::Never>, +} + +#[cfg(feature = "runtime")] +impl Future for IdleInterval { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll { + // Interval is a Stream + use futures::Stream; + + loop { + match self.pool_drop_notifier.poll() { + Ok(Async::Ready(n)) => match n {}, + Ok(Async::NotReady) => (), + Err(_canceled) => { + trace!("pool closed, canceling idle interval"); + return Ok(Async::Ready(())); + } + } + + try_ready!(self.interval.poll().map_err(|err| { + error!("idle interval timer error: {}", err); + })); + + if let Some(inner) = self.pool.upgrade() { + if let Ok(mut inner) = inner.connections.lock() { + trace!("idle interval checking for expired"); + inner.clear_expired(); + continue; + } + } + return Ok(Async::Ready(())); + } + } +} + +impl WeakOpt { + fn none() -> Self { + WeakOpt(None) + } + + fn downgrade(arc: &Arc) -> Self { + WeakOpt(Some(Arc::downgrade(arc))) + } + + fn upgrade(&self) -> Option> { + self.0 + .as_ref() + .and_then(Weak::upgrade) } } #[cfg(test)] mod tests { - use std::net::Shutdown; - use std::io::Read; + use std::sync::Arc; use std::time::Duration; - use mock::{MockConnector}; - use net::{NetworkConnector, NetworkStream}; + use futures::{Async, Future}; + use futures::future; + use common::Exec; + use super::{Connecting, Key, Poolable, Pool, Reservation, Ver, WeakOpt}; - use super::{Pool, key}; + /// Test unique reservations. + #[derive(Debug, PartialEq, Eq)] + struct Uniq(T); - macro_rules! mocked { - () => ({ - Pool::with_connector(Default::default(), MockConnector) - }) + impl Poolable for Uniq { + fn is_open(&self) -> bool { + true + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) + } + } + + fn c(key: Key) -> Connecting { + Connecting { + key, + pool: WeakOpt::none(), + } + } + + fn pool_no_timer() -> Pool { + let pool = Pool::new(true, Some(Duration::from_millis(100)), &Exec::Default); + pool.no_timer(); + pool } #[test] - fn test_connect_and_drop() { - let mut pool = mocked!(); - pool.set_idle_timeout(Some(Duration::from_millis(100))); - let key = key("127.0.0.1", 3000, "http"); - let mut stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - assert_eq!(stream.get_ref().id, 0); - stream.get_mut().id = 9; - drop(stream); - { - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 1); - assert_eq!(locked.conns.get(&key).unwrap().len(), 1); - } - let stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); //reused - assert_eq!(stream.get_ref().id, 9); - drop(stream); - { - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 1); - assert_eq!(locked.conns.get(&key).unwrap().len(), 1); + fn test_pool_checkout_smoke() { + let pool = pool_no_timer(); + let key = (Arc::new("foo".to_string()), Ver::Http1); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + drop(pooled); + + match pool.checkout(key).poll().unwrap() { + Async::Ready(pooled) => assert_eq!(*pooled, Uniq(41)), + _ => panic!("not ready"), } } #[test] - fn test_double_connect_reuse() { - let mut pool = mocked!(); - pool.set_idle_timeout(Some(Duration::from_millis(100))); - let key = key("127.0.0.1", 3000, "http"); - let stream1 = pool.connect("127.0.0.1", 3000, "http").unwrap(); - let stream2 = pool.connect("127.0.0.1", 3000, "http").unwrap(); - drop(stream1); - drop(stream2); - let stream1 = pool.connect("127.0.0.1", 3000, "http").unwrap(); - { - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 1); - assert_eq!(locked.conns.get(&key).unwrap().len(), 1); + fn test_pool_checkout_returns_none_if_expired() { + future::lazy(|| { + let pool = pool_no_timer(); + let key = (Arc::new("foo".to_string()), Ver::Http1); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + drop(pooled); + ::std::thread::sleep(pool.inner.connections.lock().unwrap().timeout.unwrap()); + assert!(pool.checkout(key).poll().unwrap().is_not_ready()); + ::futures::future::ok::<(), ()>(()) + }).wait().unwrap(); + } + + #[test] + fn test_pool_checkout_removes_expired() { + future::lazy(|| { + let pool = pool_no_timer(); + let key = (Arc::new("foo".to_string()), Ver::Http1); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + assert_eq!(pool.inner.connections.lock().unwrap().idle.get(&key).map(|entries| entries.len()), Some(3)); + ::std::thread::sleep(pool.inner.connections.lock().unwrap().timeout.unwrap()); + + // checkout.poll() should clean out the expired + pool.checkout(key.clone()).poll().unwrap(); + assert!(pool.inner.connections.lock().unwrap().idle.get(&key).is_none()); + + Ok::<(), ()>(()) + }).wait().unwrap(); + } + + #[cfg(feature = "runtime")] + #[test] + fn test_pool_timer_removes_expired() { + use std::time::Instant; + use tokio_timer::Delay; + let mut rt = ::tokio::runtime::current_thread::Runtime::new().unwrap(); + let pool = Pool::new(true, Some(Duration::from_millis(100)), &Exec::Default); + + let key = (Arc::new("foo".to_string()), Ver::Http1); + + // Since pool.pooled() will be calling spawn on drop, need to be sure + // those drops are called while `rt` is the current executor. To do so, + // call those inside a future. + rt.block_on(::futures::future::lazy(|| { + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + Ok::<_, ()>(()) + })).unwrap(); + + assert_eq!(pool.inner.connections.lock().unwrap().idle.get(&key).map(|entries| entries.len()), Some(3)); + + // Let the timer tick passed the expiration... + rt + .block_on(Delay::new(Instant::now() + Duration::from_millis(200))) + .expect("rt block_on 200ms"); + + assert!(pool.inner.connections.lock().unwrap().idle.get(&key).is_none()); + } + + #[test] + fn test_pool_checkout_task_unparked() { + let pool = pool_no_timer(); + let key = (Arc::new("foo".to_string()), Ver::Http1); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + let checkout = pool.checkout(key).join(future::lazy(move || { + // the checkout future will park first, + // and then this lazy future will be polled, which will insert + // the pooled back into the pool + // + // this test makes sure that doing so will unpark the checkout + drop(pooled); + Ok(()) + })).map(|(entry, _)| entry); + assert_eq!(*checkout.wait().unwrap(), Uniq(41)); + } + + #[test] + fn test_pool_checkout_drop_cleans_up_waiters() { + future::lazy(|| { + let pool = pool_no_timer::>(); + let key = (Arc::new("localhost:12345".to_string()), Ver::Http1); + + let mut checkout1 = pool.checkout(key.clone()); + let mut checkout2 = pool.checkout(key.clone()); + + // first poll needed to get into Pool's parked + checkout1.poll().unwrap(); + assert_eq!(pool.inner.connections.lock().unwrap().waiters.get(&key).unwrap().len(), 1); + checkout2.poll().unwrap(); + assert_eq!(pool.inner.connections.lock().unwrap().waiters.get(&key).unwrap().len(), 2); + + // on drop, clean up Pool + drop(checkout1); + assert_eq!(pool.inner.connections.lock().unwrap().waiters.get(&key).unwrap().len(), 1); + + drop(checkout2); + assert!(pool.inner.connections.lock().unwrap().waiters.get(&key).is_none()); + + ::futures::future::ok::<(), ()>(()) + }).wait().unwrap(); + } + + #[derive(Debug)] + struct CanClose { + val: i32, + closed: bool, + } + + impl Poolable for CanClose { + fn is_open(&self) -> bool { + !self.closed + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) } - let _ = stream1; } #[test] - fn test_closed() { - let pool = mocked!(); - let mut stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - stream.close(Shutdown::Both).unwrap(); - drop(stream); - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 0); - } + fn pooled_drop_if_closed_doesnt_reinsert() { + let pool = pool_no_timer(); + let key = (Arc::new("localhost:12345".to_string()), Ver::Http1); + pool.pooled(c(key.clone()), CanClose { + val: 57, + closed: true, + }); - #[test] - fn test_eof_closes() { - let pool = mocked!(); - - let mut stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - assert_eq!(stream.read(&mut [0]).unwrap(), 0); - drop(stream); - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 0); - } - - #[test] - fn test_read_conn_aborted() { - let pool = mocked!(); - - pool.connect("127.0.0.1", 3000, "http").unwrap(); - let mut stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - let err = stream.read(&mut [0]).unwrap_err(); - assert_eq!(err.kind(), ::std::io::ErrorKind::ConnectionAborted); - drop(stream); - let locked = pool.inner.lock().unwrap(); - assert_eq!(locked.conns.len(), 0); - } - - #[test] - fn test_idle_timeout() { - let mut pool = mocked!(); - pool.set_idle_timeout(Some(Duration::from_millis(10))); - let mut stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - assert_eq!(stream.get_ref().id, 0); - stream.get_mut().id = 1337; - drop(stream); - ::std::thread::sleep(Duration::from_millis(100)); - let stream = pool.connect("127.0.0.1", 3000, "http").unwrap(); - assert_eq!(stream.get_ref().id, 0); + assert!(!pool.inner.connections.lock().unwrap().idle.contains_key(&key)); } } diff --git a/third_party/rust/hyper/src/client/proxy.rs b/third_party/rust/hyper/src/client/proxy.rs deleted file mode 100644 index db7bc1eaf023..000000000000 --- a/third_party/rust/hyper/src/client/proxy.rs +++ /dev/null @@ -1,221 +0,0 @@ -use std::borrow::Cow; -use std::io; -use std::net::{SocketAddr, Shutdown}; -use std::time::Duration; - -use client::scheme::Scheme; -use method::Method; -use net::{NetworkConnector, HttpConnector, NetworkStream, SslClient}; - -pub fn tunnel(proxy: (Scheme, Cow<'static, str>, u16)) -> Proxy { - Proxy { - connector: HttpConnector, - proxy: proxy, - ssl: self::no_ssl::Plaintext, - } - -} - -pub struct Proxy -where C: NetworkConnector + Send + Sync + 'static, - C::Stream: NetworkStream + Send + Clone, - S: SslClient { - pub connector: C, - pub proxy: (Scheme, Cow<'static, str>, u16), - pub ssl: S, -} - -impl NetworkConnector for Proxy -where C: NetworkConnector + Send + Sync + 'static, - C::Stream: NetworkStream + Send + Clone, - S: SslClient { - type Stream = Proxied; - - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { - use httparse; - use std::io::{Read, Write}; - use ::version::HttpVersion::Http11; - trace!("{:?} proxy for '{}://{}:{}'", self.proxy, scheme, host, port); - match scheme { - "http" => { - self.connector.connect(self.proxy.1.as_ref(), self.proxy.2, self.proxy.0.as_ref()) - .map(Proxied::Normal) - }, - "https" => { - let mut stream = try!(self.connector.connect(self.proxy.1.as_ref(), self.proxy.2, self.proxy.0.as_ref())); - trace!("{:?} CONNECT {}:{}", self.proxy, host, port); - try!(write!(&mut stream, "{method} {host}:{port} {version}\r\nHost: {host}:{port}\r\n\r\n", - method=Method::Connect, host=host, port=port, version=Http11)); - try!(stream.flush()); - let mut buf = [0; 1024]; - let mut n = 0; - while n < buf.len() { - n += try!(stream.read(&mut buf[n..])); - let mut headers = [httparse::EMPTY_HEADER; 10]; - let mut res = httparse::Response::new(&mut headers); - if try!(res.parse(&buf[..n])).is_complete() { - let code = res.code.expect("complete parsing lost code"); - if code >= 200 && code < 300 { - trace!("CONNECT success = {:?}", code); - return self.ssl.wrap_client(stream, host) - .map(Proxied::Tunneled) - } else { - trace!("CONNECT response = {:?}", code); - return Err(::Error::Status); - } - } - } - Err(::Error::TooLarge) - }, - _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid scheme").into()) - } - } -} - -#[derive(Debug)] -pub enum Proxied { - Normal(T1), - Tunneled(T2) -} - -#[cfg(test)] -impl Proxied { - pub fn into_normal(self) -> Result { - match self { - Proxied::Normal(t1) => Ok(t1), - _ => Err(self) - } - } - - pub fn into_tunneled(self) -> Result { - match self { - Proxied::Tunneled(t2) => Ok(t2), - _ => Err(self) - } - } -} - -impl io::Read for Proxied { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match *self { - Proxied::Normal(ref mut t) => io::Read::read(t, buf), - Proxied::Tunneled(ref mut t) => io::Read::read(t, buf), - } - } -} - -impl io::Write for Proxied { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - match *self { - Proxied::Normal(ref mut t) => io::Write::write(t, buf), - Proxied::Tunneled(ref mut t) => io::Write::write(t, buf), - } - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - match *self { - Proxied::Normal(ref mut t) => io::Write::flush(t), - Proxied::Tunneled(ref mut t) => io::Write::flush(t), - } - } -} - -impl NetworkStream for Proxied { - #[inline] - fn peer_addr(&mut self) -> io::Result { - match *self { - Proxied::Normal(ref mut s) => s.peer_addr(), - Proxied::Tunneled(ref mut s) => s.peer_addr() - } - } - - #[inline] - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - match *self { - Proxied::Normal(ref inner) => inner.set_read_timeout(dur), - Proxied::Tunneled(ref inner) => inner.set_read_timeout(dur) - } - } - - #[inline] - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - match *self { - Proxied::Normal(ref inner) => inner.set_write_timeout(dur), - Proxied::Tunneled(ref inner) => inner.set_write_timeout(dur) - } - } - - #[inline] - fn close(&mut self, how: Shutdown) -> io::Result<()> { - match *self { - Proxied::Normal(ref mut s) => s.close(how), - Proxied::Tunneled(ref mut s) => s.close(how) - } - } -} - -#[cfg(not(any(feature = "openssl", feature = "security-framework")))] -mod no_ssl { - use std::io; - use std::net::{Shutdown, SocketAddr}; - use std::time::Duration; - - use net::{SslClient, NetworkStream}; - - pub struct Plaintext; - - #[derive(Clone)] - pub enum Void {} - - impl io::Read for Void { - #[inline] - fn read(&mut self, _buf: &mut [u8]) -> io::Result { - match *self {} - } - } - - impl io::Write for Void { - #[inline] - fn write(&mut self, _buf: &[u8]) -> io::Result { - match *self {} - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - match *self {} - } - } - - impl NetworkStream for Void { - #[inline] - fn peer_addr(&mut self) -> io::Result { - match *self {} - } - - #[inline] - fn set_read_timeout(&self, _dur: Option) -> io::Result<()> { - match *self {} - } - - #[inline] - fn set_write_timeout(&self, _dur: Option) -> io::Result<()> { - match *self {} - } - - #[inline] - fn close(&mut self, _how: Shutdown) -> io::Result<()> { - match *self {} - } - } - - impl SslClient for Plaintext { - type Stream = Void; - - fn wrap_client(&self, _stream: T, _host: &str) -> ::Result { - Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid scheme").into()) - } - } -} diff --git a/third_party/rust/hyper/src/client/request.rs b/third_party/rust/hyper/src/client/request.rs deleted file mode 100644 index 8743373c5b47..000000000000 --- a/third_party/rust/hyper/src/client/request.rs +++ /dev/null @@ -1,314 +0,0 @@ -//! Client Requests -use std::marker::PhantomData; -use std::io::{self, Write}; - -use std::time::Duration; - -use url::Url; - -use method::Method; -use header::Headers; -use header::Host; -use net::{NetworkStream, NetworkConnector, DefaultConnector, Fresh, Streaming}; -use version; -use client::{Response, get_host_and_port}; - -use http::{HttpMessage, RequestHead}; -use http::h1::Http11Message; - - -/// A client request to a remote server. -/// The W type tracks the state of the request, Fresh vs Streaming. -pub struct Request { - /// The target URI for this request. - pub url: Url, - - /// The HTTP version of this request. - pub version: version::HttpVersion, - - message: Box, - headers: Headers, - method: Method, - - _marker: PhantomData, -} - -impl Request { - /// Read the Request headers. - #[inline] - pub fn headers(&self) -> &Headers { &self.headers } - - /// Read the Request method. - #[inline] - pub fn method(&self) -> Method { self.method.clone() } - - /// Set the write timeout. - #[inline] - pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.message.set_write_timeout(dur) - } - - /// Set the read timeout. - #[inline] - pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.message.set_read_timeout(dur) - } -} - -impl Request { - /// Create a new `Request` that will use the given `HttpMessage` for its communication - /// with the server. This implies that the given `HttpMessage` instance has already been - /// properly initialized by the caller (e.g. a TCP connection's already established). - pub fn with_message(method: Method, url: Url, message: Box) - -> ::Result> { - let mut headers = Headers::new(); - { - let (host, port) = try!(get_host_and_port(&url)); - headers.set(Host { - hostname: host.to_owned(), - port: Some(port), - }); - } - - Ok(Request::with_headers_and_message(method, url, headers, message)) - } - - #[doc(hidden)] - pub fn with_headers_and_message(method: Method, url: Url, headers: Headers, message: Box) - -> Request { - Request { - method: method, - headers: headers, - url: url, - version: version::HttpVersion::Http11, - message: message, - _marker: PhantomData, - } - } - - /// Create a new client request. - pub fn new(method: Method, url: Url) -> ::Result> { - let conn = DefaultConnector::default(); - Request::with_connector(method, url, &conn) - } - - /// Create a new client request with a specific underlying NetworkStream. - pub fn with_connector(method: Method, url: Url, connector: &C) - -> ::Result> where - C: NetworkConnector, - S: Into> { - let stream = { - let (host, port) = try!(get_host_and_port(&url)); - try!(connector.connect(host, port, url.scheme())).into() - }; - - Request::with_message(method, url, Box::new(Http11Message::with_stream(stream))) - } - - /// Consume a Fresh Request, writing the headers and method, - /// returning a Streaming Request. - pub fn start(mut self) -> ::Result> { - let head = match self.message.set_outgoing(RequestHead { - headers: self.headers, - method: self.method, - url: self.url, - }) { - Ok(head) => head, - Err(e) => { - let _ = self.message.close_connection(); - return Err(From::from(e)); - } - }; - - Ok(Request { - method: head.method, - headers: head.headers, - url: head.url, - version: self.version, - message: self.message, - _marker: PhantomData, - }) - } - - /// Get a mutable reference to the Request headers. - #[inline] - pub fn headers_mut(&mut self) -> &mut Headers { &mut self.headers } -} - - - -impl Request { - /// Completes writing the request, and returns a response to read from. - /// - /// Consumes the Request. - pub fn send(self) -> ::Result { - Response::with_message(self.url, self.message) - } -} - -impl Write for Request { - #[inline] - fn write(&mut self, msg: &[u8]) -> io::Result { - match self.message.write(msg) { - Ok(n) => Ok(n), - Err(e) => { - let _ = self.message.close_connection(); - Err(e) - } - } - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - match self.message.flush() { - Ok(r) => Ok(r), - Err(e) => { - let _ = self.message.close_connection(); - Err(e) - } - } - } -} - -#[cfg(test)] -mod tests { - use std::io::Write; - use std::str::from_utf8; - use url::Url; - use method::Method::{Get, Head, Post}; - use mock::{MockStream, MockConnector}; - use net::Fresh; - use header::{ContentLength,TransferEncoding,Encoding}; - use url::form_urlencoded; - use super::Request; - use http::h1::Http11Message; - - fn run_request(req: Request) -> Vec { - let req = req.start().unwrap(); - let message = req.message; - let mut message = message.downcast::().ok().unwrap(); - message.flush_outgoing().unwrap(); - let stream = *message - .into_inner().downcast::().ok().unwrap(); - stream.write - } - - fn assert_no_body(s: &str) { - assert!(!s.contains("Content-Length:")); - assert!(!s.contains("Transfer-Encoding:")); - } - - #[test] - fn test_get_empty_body() { - let req = Request::with_connector( - Get, Url::parse("http://example.dom").unwrap(), &mut MockConnector - ).unwrap(); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert_no_body(s); - } - - #[test] - fn test_head_empty_body() { - let req = Request::with_connector( - Head, Url::parse("http://example.dom").unwrap(), &mut MockConnector - ).unwrap(); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert_no_body(s); - } - - #[test] - fn test_url_query() { - let url = Url::parse("http://example.dom?q=value").unwrap(); - let req = Request::with_connector( - Get, url, &mut MockConnector - ).unwrap(); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert!(s.contains("?q=value")); - } - - #[test] - fn test_post_content_length() { - let url = Url::parse("http://example.dom").unwrap(); - let mut req = Request::with_connector( - Post, url, &mut MockConnector - ).unwrap(); - let mut body = String::new(); - form_urlencoded::Serializer::new(&mut body).append_pair("q", "value"); - req.headers_mut().set(ContentLength(body.len() as u64)); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert!(s.contains("Content-Length:")); - } - - #[test] - fn test_post_chunked() { - let url = Url::parse("http://example.dom").unwrap(); - let req = Request::with_connector( - Post, url, &mut MockConnector - ).unwrap(); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert!(!s.contains("Content-Length:")); - } - - #[test] - fn test_host_header() { - let url = Url::parse("http://example.dom").unwrap(); - let req = Request::with_connector( - Get, url, &mut MockConnector - ).unwrap(); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert!(s.contains("Host: example.dom")); - } - - #[test] - fn test_proxy() { - let url = Url::parse("http://example.dom").unwrap(); - let mut req = Request::with_connector( - Get, url, &mut MockConnector - ).unwrap(); - req.message.set_proxied(true); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - let request_line = "GET http://example.dom/ HTTP/1.1"; - assert_eq!(&s[..request_line.len()], request_line); - assert!(s.contains("Host: example.dom")); - } - - #[test] - fn test_post_chunked_with_encoding() { - let url = Url::parse("http://example.dom").unwrap(); - let mut req = Request::with_connector( - Post, url, &mut MockConnector - ).unwrap(); - req.headers_mut().set(TransferEncoding(vec![Encoding::Chunked])); - let bytes = run_request(req); - let s = from_utf8(&bytes[..]).unwrap(); - assert!(!s.contains("Content-Length:")); - assert!(s.contains("Transfer-Encoding:")); - } - - #[test] - fn test_write_error_closes() { - let url = Url::parse("http://hyper.rs").unwrap(); - let req = Request::with_connector( - Get, url, &mut MockConnector - ).unwrap(); - let mut req = req.start().unwrap(); - - req.message.downcast_mut::().unwrap() - .get_mut().downcast_mut::().unwrap() - .error_on_write = true; - - req.write(b"foo").unwrap(); - assert!(req.flush().is_err()); - - assert!(req.message.downcast_ref::().unwrap() - .get_ref().downcast_ref::().unwrap() - .is_closed); - } -} diff --git a/third_party/rust/hyper/src/client/response.rs b/third_party/rust/hyper/src/client/response.rs deleted file mode 100644 index 261bcd96880e..000000000000 --- a/third_party/rust/hyper/src/client/response.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! Client Responses -use std::io::{self, Read}; - -use url::Url; - -use header; -use net::NetworkStream; -use http::{self, RawStatus, ResponseHead, HttpMessage}; -use http::h1::Http11Message; -use status; -use version; - -/// A response for a client request to a remote server. -#[derive(Debug)] -pub struct Response { - /// The status from the server. - pub status: status::StatusCode, - /// The headers from the server. - pub headers: header::Headers, - /// The HTTP version of this response from the server. - pub version: version::HttpVersion, - /// The final URL of this response. - pub url: Url, - status_raw: RawStatus, - message: Box, -} - -impl Response { - /// Creates a new response from a server. - pub fn new(url: Url, stream: Box) -> ::Result { - trace!("Response::new"); - Response::with_message(url, Box::new(Http11Message::with_stream(stream))) - } - - /// Creates a new response received from the server on the given `HttpMessage`. - pub fn with_message(url: Url, mut message: Box) -> ::Result { - trace!("Response::with_message"); - let ResponseHead { headers, raw_status, version } = match message.get_incoming() { - Ok(head) => head, - Err(e) => { - let _ = message.close_connection(); - return Err(From::from(e)); - } - }; - let status = status::StatusCode::from_u16(raw_status.0); - debug!("version={:?}, status={:?}", version, status); - debug!("headers={:?}", headers); - - Ok(Response { - status: status, - version: version, - headers: headers, - url: url, - status_raw: raw_status, - message: message, - }) - } - - /// Get the raw status code and reason. - #[inline] - pub fn status_raw(&self) -> &RawStatus { - &self.status_raw - } - - /// Gets a borrowed reference to the underlying `HttpMessage`. - #[inline] - pub fn get_ref(&self) -> &HttpMessage { - &*self.message - } -} - -/// Read the response body. -impl Read for Response { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match self.message.read(buf) { - Err(e) => { - let _ = self.message.close_connection(); - Err(e) - } - r => r - } - } -} - -impl Drop for Response { - fn drop(&mut self) { - // if not drained, theres old bits in the Reader. we can't reuse this, - // since those old bits would end up in new Responses - // - // otherwise, the response has been drained. we should check that the - // server has agreed to keep the connection open - let is_drained = !self.message.has_body(); - trace!("Response.drop is_drained={}", is_drained); - if !(is_drained && http::should_keep_alive(self.version, &self.headers)) { - trace!("Response.drop closing connection"); - if let Err(e) = self.message.close_connection() { - info!("Response.drop error closing connection: {}", e); - } - } - } -} - -#[cfg(test)] -mod tests { - use std::io::{self, Read}; - - use url::Url; - - use header::TransferEncoding; - use header::Encoding; - use http::HttpMessage; - use mock::MockStream; - use status; - use version; - use http::h1::Http11Message; - - use super::Response; - - fn read_to_string(mut r: Response) -> io::Result { - let mut s = String::new(); - try!(r.read_to_string(&mut s)); - Ok(s) - } - - - #[test] - fn test_into_inner() { - let message: Box = Box::new( - Http11Message::with_stream(Box::new(MockStream::new()))); - let message = message.downcast::().ok().unwrap(); - let b = message.into_inner().downcast::().ok().unwrap(); - assert_eq!(b, Box::new(MockStream::new())); - } - - #[test] - fn test_parse_chunked_response() { - let stream = MockStream::with_input(b"\ - HTTP/1.1 200 OK\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1\r\n\ - q\r\n\ - 2\r\n\ - we\r\n\ - 2\r\n\ - rt\r\n\ - 0\r\n\ - \r\n" - ); - - let url = Url::parse("http://hyper.rs").unwrap(); - let res = Response::new(url, Box::new(stream)).unwrap(); - - // The status line is correct? - assert_eq!(res.status, status::StatusCode::Ok); - assert_eq!(res.version, version::HttpVersion::Http11); - // The header is correct? - match res.headers.get::() { - Some(encodings) => { - assert_eq!(1, encodings.len()); - assert_eq!(Encoding::Chunked, encodings[0]); - }, - None => panic!("Transfer-Encoding: chunked expected!"), - }; - // The body is correct? - assert_eq!(read_to_string(res).unwrap(), "qwert".to_owned()); - } - - /// Tests that when a chunk size is not a valid radix-16 number, an error - /// is returned. - #[test] - fn test_invalid_chunk_size_not_hex_digit() { - let stream = MockStream::with_input(b"\ - HTTP/1.1 200 OK\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - X\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - let url = Url::parse("http://hyper.rs").unwrap(); - let res = Response::new(url, Box::new(stream)).unwrap(); - - assert!(read_to_string(res).is_err()); - } - - /// Tests that when a chunk size contains an invalid extension, an error is - /// returned. - #[test] - fn test_invalid_chunk_size_extension() { - let stream = MockStream::with_input(b"\ - HTTP/1.1 200 OK\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1 this is an invalid extension\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - let url = Url::parse("http://hyper.rs").unwrap(); - let res = Response::new(url, Box::new(stream)).unwrap(); - - assert!(read_to_string(res).is_err()); - } - - /// Tests that when a valid extension that contains a digit is appended to - /// the chunk size, the chunk is correctly read. - #[test] - fn test_chunk_size_with_extension() { - let stream = MockStream::with_input(b"\ - HTTP/1.1 200 OK\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1;this is an extension with a digit 1\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - let url = Url::parse("http://hyper.rs").unwrap(); - let res = Response::new(url, Box::new(stream)).unwrap(); - - assert_eq!(read_to_string(res).unwrap(), "1".to_owned()); - } - - #[test] - fn test_parse_error_closes() { - let url = Url::parse("http://hyper.rs").unwrap(); - let stream = MockStream::with_input(b"\ - definitely not http - "); - - assert!(Response::new(url, Box::new(stream)).is_err()); - } -} diff --git a/third_party/rust/hyper/src/client/tests.rs b/third_party/rust/hyper/src/client/tests.rs new file mode 100644 index 000000000000..7df6a7efe5a0 --- /dev/null +++ b/third_party/rust/hyper/src/client/tests.rs @@ -0,0 +1,210 @@ +#![cfg(feature = "runtime")] +extern crate pretty_env_logger; + +use futures::{Async, Future, Stream}; +use futures::future::poll_fn; +use futures::sync::oneshot; +use tokio::runtime::current_thread::Runtime; + +use mock::MockConnector; +use super::*; + +#[test] +fn retryable_request() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + let sock1 = connector.mock("http://mock.local"); + let sock2 = connector.mock("http://mock.local"); + + let client = Client::builder() + .build::<_, ::Body>(connector); + + client.pool.no_timer(); + + { + + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res1 = client.request(req); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + } + drop(sock1); + + let req = Request::builder() + .uri("http://mock.local/b") + .body(Default::default()) + .unwrap(); + let res2 = client.request(req) + .map(|res| { + assert_eq!(res.status().as_u16(), 222); + }); + let srv2 = poll_fn(|| { + try_ready!(sock2.read(&mut [0u8; 512])); + try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv2 poll_fn error: {}", e)); + + rt.block_on(res2.join(srv2)).expect("res2"); +} + +#[test] +fn conn_reset_after_write() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + let sock1 = connector.mock("http://mock.local"); + + let client = Client::builder() + .build::<_, ::Body>(connector); + + client.pool.no_timer(); + + { + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res1 = client.request(req); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv1 poll_fn error: {}", e)); + rt.block_on(res1.join(srv1)).expect("res1"); + } + + let req = Request::builder() + .uri("http://mock.local/a") + .body(Default::default()) + .unwrap(); + let res2 = client.request(req); + let mut sock1 = Some(sock1); + let srv2 = poll_fn(|| { + // We purposefully keep the socket open until the client + // has written the second request, and THEN disconnect. + // + // Not because we expect servers to be jerks, but to trigger + // state where we write on an assumedly good connetion, and + // only reset the close AFTER we wrote bytes. + try_ready!(sock1.as_mut().unwrap().read(&mut [0u8; 512])); + sock1.take(); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv2 poll_fn error: {}", e)); + let err = rt.block_on(res2.join(srv2)).expect_err("res2"); + match err.kind() { + &::error::Kind::Incomplete => (), + other => panic!("expected Incomplete, found {:?}", other) + } +} + +#[test] +fn checkout_win_allows_connect_future_to_be_pooled() { + let _ = pretty_env_logger::try_init(); + + let mut rt = Runtime::new().expect("new rt"); + let mut connector = MockConnector::new(); + + + let (tx, rx) = oneshot::channel::<()>(); + let sock1 = connector.mock("http://mock.local"); + let sock2 = connector.mock_fut("http://mock.local", rx); + + let client = Client::builder() + .build::<_, ::Body>(connector); + + client.pool.no_timer(); + + let uri = "http://mock.local/a".parse::<::Uri>().expect("uri parse"); + + // First request just sets us up to have a connection able to be put + // back in the pool. *However*, it doesn't insert immediately. The + // body has 1 pending byte, and we will only drain in request 2, once + // the connect future has been started. + let mut body = { + let res1 = client.get(uri.clone()) + .map(|res| res.into_body().concat2()); + let srv1 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + // Chunked is used so as to force 2 body reads. + try_ready!(sock1.write(b"\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + 1\r\nx\r\n\ + 0\r\n\r\n\ + ")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv1 poll_fn error: {}", e)); + + rt.block_on(res1.join(srv1)).expect("res1").0 + }; + + + // The second request triggers the only mocked connect future, but then + // the drained body allows the first socket to go back to the pool, + // "winning" the checkout race. + { + let res2 = client.get(uri.clone()); + let drain = poll_fn(move || { + body.poll() + }); + let srv2 = poll_fn(|| { + try_ready!(sock1.read(&mut [0u8; 512])); + try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nx")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv2 poll_fn error: {}", e)); + + rt.block_on(res2.join(drain).join(srv2)).expect("res2"); + } + + // "Release" the mocked connect future, and let the runtime spin once so + // it's all setup... + { + let mut tx = Some(tx); + let client = &client; + let key = client.pool.h1_key("http://mock.local"); + let mut tick_cnt = 0; + let fut = poll_fn(move || { + tx.take(); + + if client.pool.idle_count(&key) == 0 { + tick_cnt += 1; + assert!(tick_cnt < 10, "ticked too many times waiting for idle"); + trace!("no idle yet; tick count: {}", tick_cnt); + ::futures::task::current().notify(); + Ok(Async::NotReady) + } else { + Ok::<_, ()>(Async::Ready(())) + } + }); + rt.block_on(fut).unwrap(); + } + + // Third request just tests out that the "loser" connection was pooled. If + // it isn't, this will panic since the MockConnector doesn't have any more + // mocks to give out. + { + let res3 = client.get(uri); + let srv3 = poll_fn(|| { + try_ready!(sock2.read(&mut [0u8; 512])); + try_ready!(sock2.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); + Ok(Async::Ready(())) + }).map_err(|e: ::std::io::Error| panic!("srv3 poll_fn error: {}", e)); + + rt.block_on(res3.join(srv3)).expect("res3"); + } +} + + diff --git a/third_party/rust/hyper/src/common/buf.rs b/third_party/rust/hyper/src/common/buf.rs new file mode 100644 index 000000000000..2adc6b34ce6a --- /dev/null +++ b/third_party/rust/hyper/src/common/buf.rs @@ -0,0 +1,34 @@ +use bytes::Buf; +use iovec::IoVec; + +/// A `Buf` wrapping a static byte slice. +#[derive(Debug)] +pub(crate) struct StaticBuf(pub(crate) &'static [u8]); + +impl Buf for StaticBuf { + #[inline] + fn remaining(&self) -> usize { + self.0.len() + } + + #[inline] + fn bytes(&self) -> &[u8] { + self.0 + } + + #[inline] + fn advance(&mut self, cnt: usize) { + self.0 = &self.0[cnt..]; + } + + #[inline] + fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize { + if dst.is_empty() || self.0.is_empty() { + 0 + } else { + dst[0] = self.0.into(); + 1 + } + } +} + diff --git a/third_party/rust/hyper/src/common/exec.rs b/third_party/rust/hyper/src/common/exec.rs new file mode 100644 index 000000000000..231939b4c78f --- /dev/null +++ b/third_party/rust/hyper/src/common/exec.rs @@ -0,0 +1,54 @@ +use std::fmt; +use std::sync::Arc; + +use futures::future::{Executor, Future}; + +/// Either the user provides an executor for background tasks, or we use +/// `tokio::spawn`. +#[derive(Clone)] +pub(crate) enum Exec { + Default, + Executor(Arc + Send>> + Send + Sync>), +} + + +impl Exec { + pub(crate) fn execute(&self, fut: F) -> ::Result<()> + where + F: Future + Send + 'static, + { + match *self { + Exec::Default => { + #[cfg(feature = "runtime")] + { + use ::tokio_executor::Executor; + ::tokio_executor::DefaultExecutor::current() + .spawn(Box::new(fut)) + .map_err(|err| { + warn!("executor error: {:?}", err); + ::Error::new_execute() + }) + } + #[cfg(not(feature = "runtime"))] + { + // If no runtime, we need an executor! + panic!("executor must be set") + } + }, + Exec::Executor(ref e) => { + e.execute(Box::new(fut)) + .map_err(|err| { + warn!("executor error: {:?}", err.kind()); + ::Error::new_execute() + }) + }, + } + } +} + +impl fmt::Debug for Exec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Exec") + .finish() + } +} diff --git a/third_party/rust/hyper/src/common/io/mod.rs b/third_party/rust/hyper/src/common/io/mod.rs new file mode 100644 index 000000000000..2e6d506153e2 --- /dev/null +++ b/third_party/rust/hyper/src/common/io/mod.rs @@ -0,0 +1,3 @@ +mod rewind; + +pub(crate) use self::rewind::Rewind; diff --git a/third_party/rust/hyper/src/common/io/rewind.rs b/third_party/rust/hyper/src/common/io/rewind.rs new file mode 100644 index 000000000000..797dad9a7415 --- /dev/null +++ b/third_party/rust/hyper/src/common/io/rewind.rs @@ -0,0 +1,222 @@ +use std::cmp; +use std::io::{self, Read, Write}; + +use bytes::{Buf, BufMut, Bytes, IntoBuf}; +use futures::{Async, Poll}; +use tokio_io::{AsyncRead, AsyncWrite}; + +/// Combine a buffer with an IO, rewinding reads to use the buffer. +#[derive(Debug)] +pub(crate) struct Rewind { + pre: Option, + inner: T, +} + +impl Rewind { + pub(crate) fn new(io: T) -> Self { + Rewind { + pre: None, + inner: io, + } + } + + pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { + Rewind { + pre: Some(buf), + inner: io, + } + } + + pub(crate) fn rewind(&mut self, bs: Bytes) { + debug_assert!(self.pre.is_none()); + self.pre = Some(bs); + } + + pub(crate) fn into_inner(self) -> (T, Bytes) { + (self.inner, self.pre.unwrap_or_else(Bytes::new)) + } +} + +impl Read for Rewind +where + T: Read, +{ + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Some(pre_bs) = self.pre.take() { + // If there are no remaining bytes, let the bytes get dropped. + if pre_bs.len() > 0 { + let mut pre_reader = pre_bs.into_buf().reader(); + let read_cnt = pre_reader.read(buf)?; + + let mut new_pre = pre_reader.into_inner().into_inner(); + new_pre.advance(read_cnt); + + // Put back whats left + if new_pre.len() > 0 { + self.pre = Some(new_pre); + } + + return Ok(read_cnt); + } + } + self.inner.read(buf) + } +} + +impl Write for Rewind +where + T: Write, +{ + #[inline] + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl AsyncRead for Rewind +where + T: AsyncRead, +{ + #[inline] + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } + + #[inline] + fn read_buf(&mut self, buf: &mut B) -> Poll { + if let Some(bs) = self.pre.take() { + let pre_len = bs.len(); + // If there are no remaining bytes, let the bytes get dropped. + if pre_len > 0 { + let cnt = cmp::min(buf.remaining_mut(), pre_len); + let pre_buf = bs.into_buf(); + let mut xfer = Buf::take(pre_buf, cnt); + buf.put(&mut xfer); + + let mut new_pre = xfer.into_inner().into_inner(); + new_pre.advance(cnt); + + // Put back whats left + if new_pre.len() > 0 { + self.pre = Some(new_pre); + } + + return Ok(Async::Ready(cnt)); + } + } + self.inner.read_buf(buf) + } +} + +impl AsyncWrite for Rewind +where + T: AsyncWrite, +{ + #[inline] + fn shutdown(&mut self) -> Poll<(), io::Error> { + AsyncWrite::shutdown(&mut self.inner) + } + + #[inline] + fn write_buf(&mut self, buf: &mut B) -> Poll { + self.inner.write_buf(buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + extern crate tokio_mockstream; + use self::tokio_mockstream::MockStream; + use std::io::Cursor; + + // Test a partial rewind + #[test] + fn async_partial_rewind() { + let bs = &mut [104, 101, 108, 108, 111]; + let o1 = &mut [0, 0]; + let o2 = &mut [0, 0, 0, 0, 0]; + + let mut stream = Rewind::new(MockStream::new(bs)); + let mut o1_cursor = Cursor::new(o1); + // Read off some bytes, ensure we filled o1 + match stream.read_buf(&mut o1_cursor).unwrap() { + Async::NotReady => panic!("should be ready"), + Async::Ready(cnt) => assert_eq!(2, cnt), + } + + // Rewind the stream so that it is as if we never read in the first place. + let read_buf = Bytes::from(&o1_cursor.into_inner()[..]); + stream.rewind(read_buf); + + // We poll 2x here since the first time we'll only get what is in the + // prefix (the rewinded part) of the Rewind.\ + let mut o2_cursor = Cursor::new(o2); + stream.read_buf(&mut o2_cursor).unwrap(); + stream.read_buf(&mut o2_cursor).unwrap(); + let o2_final = o2_cursor.into_inner(); + + // At this point we should have read everything that was in the MockStream + assert_eq!(&o2_final, &bs); + } + // Test a full rewind + #[test] + fn async_full_rewind() { + let bs = &mut [104, 101, 108, 108, 111]; + let o1 = &mut [0, 0, 0, 0, 0]; + let o2 = &mut [0, 0, 0, 0, 0]; + + let mut stream = Rewind::new(MockStream::new(bs)); + let mut o1_cursor = Cursor::new(o1); + match stream.read_buf(&mut o1_cursor).unwrap() { + Async::NotReady => panic!("should be ready"), + Async::Ready(cnt) => assert_eq!(5, cnt), + } + + let read_buf = Bytes::from(&o1_cursor.into_inner()[..]); + stream.rewind(read_buf); + + let mut o2_cursor = Cursor::new(o2); + stream.read_buf(&mut o2_cursor).unwrap(); + stream.read_buf(&mut o2_cursor).unwrap(); + let o2_final = o2_cursor.into_inner(); + + assert_eq!(&o2_final, &bs); + } + #[test] + fn partial_rewind() { + let bs = &mut [104, 101, 108, 108, 111]; + let o1 = &mut [0, 0]; + let o2 = &mut [0, 0, 0, 0, 0]; + + let mut stream = Rewind::new(MockStream::new(bs)); + stream.read(o1).unwrap(); + + let read_buf = Bytes::from(&o1[..]); + stream.rewind(read_buf); + let cnt = stream.read(o2).unwrap(); + stream.read(&mut o2[cnt..]).unwrap(); + assert_eq!(&o2, &bs); + } + #[test] + fn full_rewind() { + let bs = &mut [104, 101, 108, 108, 111]; + let o1 = &mut [0, 0, 0, 0, 0]; + let o2 = &mut [0, 0, 0, 0, 0]; + + let mut stream = Rewind::new(MockStream::new(bs)); + stream.read(o1).unwrap(); + + let read_buf = Bytes::from(&o1[..]); + stream.rewind(read_buf); + let cnt = stream.read(o2).unwrap(); + stream.read(&mut o2[cnt..]).unwrap(); + assert_eq!(&o2, &bs); + } +} diff --git a/third_party/rust/hyper/src/common/lazy.rs b/third_party/rust/hyper/src/common/lazy.rs new file mode 100644 index 000000000000..10dfe17992cc --- /dev/null +++ b/third_party/rust/hyper/src/common/lazy.rs @@ -0,0 +1,63 @@ +use std::mem; + +use futures::{Future, IntoFuture, Poll}; + +pub(crate) fn lazy(func: F) -> Lazy +where + F: FnOnce() -> R, + R: IntoFuture, +{ + Lazy { + inner: Inner::Init(func), + } +} + +pub struct Lazy { + inner: Inner +} + +enum Inner { + Init(F), + Fut(R), + Empty, +} + +impl Lazy +where + F: FnOnce() -> R, + R: IntoFuture, +{ + pub fn started(&self) -> bool { + match self.inner { + Inner::Init(_) => false, + Inner::Fut(_) | + Inner::Empty => true, + } + } +} + +impl Future for Lazy +where + F: FnOnce() -> R, + R: IntoFuture, +{ + type Item = R::Item; + type Error = R::Error; + + fn poll(&mut self) -> Poll { + match self.inner { + Inner::Fut(ref mut f) => return f.poll(), + _ => (), + } + + match mem::replace(&mut self.inner, Inner::Empty) { + Inner::Init(func) => { + let mut fut = func().into_future(); + let ret = fut.poll(); + self.inner = Inner::Fut(fut); + ret + }, + _ => unreachable!("lazy state wrong"), + } + } +} diff --git a/third_party/rust/hyper/src/common/mod.rs b/third_party/rust/hyper/src/common/mod.rs new file mode 100644 index 000000000000..3cf1e04c1df4 --- /dev/null +++ b/third_party/rust/hyper/src/common/mod.rs @@ -0,0 +1,10 @@ +mod buf; +mod exec; +pub(crate) mod io; +mod lazy; +mod never; + +pub(crate) use self::buf::StaticBuf; +pub(crate) use self::exec::Exec; +pub(crate) use self::lazy::lazy; +pub use self::never::Never; diff --git a/third_party/rust/hyper/src/common/never.rs b/third_party/rust/hyper/src/common/never.rs new file mode 100644 index 000000000000..dfb763b2a478 --- /dev/null +++ b/third_party/rust/hyper/src/common/never.rs @@ -0,0 +1,22 @@ +//! An uninhabitable type meaning it can never happen. +//! +//! To be replaced with `!` once it is stable. + +use std::error::Error; +use std::fmt; + +#[derive(Debug)] +pub enum Never {} + +impl fmt::Display for Never { + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } +} + +impl Error for Never { + fn description(&self) -> &str { + match *self {} + } +} + diff --git a/third_party/rust/hyper/src/error.rs b/third_party/rust/hyper/src/error.rs index 81c77cd9cec1..3478f30b7dbd 100644 --- a/third_party/rust/hyper/src/error.rs +++ b/third_party/rust/hyper/src/error.rs @@ -1,219 +1,387 @@ //! Error and Result module. use std::error::Error as StdError; use std::fmt; -use std::io::Error as IoError; -use std::str::Utf8Error; -use std::string::FromUtf8Error; +use std::io; use httparse; -use url; - -#[cfg(feature = "openssl")] -use openssl::ssl::error::SslError; - -use self::Error::{ - Method, - Uri, - Version, - Header, - Status, - Io, - Ssl, - TooLarge, - Utf8 -}; - -pub use url::ParseError; +use http; /// Result type often returned from methods that can have hyper `Error`s. pub type Result = ::std::result::Result; -/// A set of errors that can occur parsing HTTP streams. -#[derive(Debug)] -pub enum Error { - /// An invalid `Method`, such as `GE,T`. - Method, - /// An invalid `RequestUri`, such as `exam ple.domain`. - Uri(url::ParseError), - /// An invalid `HttpVersion`, such as `HTP/1.1` - Version, - /// An invalid `Header`. - Header, - /// A message head is too large to be reasonable. - TooLarge, - /// An invalid `Status`, such as `1337 ELITE`. - Status, - /// An `io::Error` that occurred while trying to read or write to a network stream. - Io(IoError), - /// An error from a SSL library. - Ssl(Box), - /// Parsing a field as string failed - Utf8(Utf8Error), +type Cause = Box; - #[doc(hidden)] - __Nonexhaustive(Void) +/// Represents errors that can occur handling HTTP streams. +pub struct Error { + inner: Box, } -#[doc(hidden)] -pub struct Void(()); +struct ErrorImpl { + kind: Kind, + cause: Option, +} -impl fmt::Debug for Void { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - unreachable!() +#[derive(Debug, PartialEq)] +pub(crate) enum Kind { + Parse(Parse), + /// A message reached EOF, but is not complete. + Incomplete, + /// A client connection received a response when not waiting for one. + MismatchedResponse, + /// A pending item was dropped before ever being processed. + Canceled, + /// Indicates a connection is closed. + Closed, + /// An `io::Error` that occurred while trying to read or write to a network stream. + Io, + /// Error occurred while connecting. + Connect, + /// Error creating a TcpListener. + #[cfg(feature = "runtime")] + Listen, + /// Error accepting on an Incoming stream. + Accept, + /// Error calling user's NewService::new_service(). + NewService, + /// Error from future of user's Service::call(). + Service, + /// Error while reading a body from connection. + Body, + /// Error while writing a body to connection. + BodyWrite, + /// Error calling user's Payload::poll_data(). + BodyUser, + /// Error calling AsyncWrite::shutdown() + Shutdown, + + /// A general error from h2. + Http2, + + /// User tried to create a Request with bad version. + UnsupportedVersion, + /// User tried to create a CONNECT Request with the Client. + UnsupportedRequestMethod, + /// User tried to respond with a 1xx (not 101) response code. + UnsupportedStatusCode, + /// User tried to send a Request with Client with non-absolute URI. + AbsoluteUriRequired, + + /// User tried polling for an upgrade that doesn't exist. + NoUpgrade, + + /// User polled for an upgrade, but low-level API is not using upgrades. + ManualUpgrade, + + /// Error trying to call `Executor::execute`. + Execute, +} + +#[derive(Debug, PartialEq)] +pub(crate) enum Parse { + Method, + Version, + VersionH2, + Uri, + Header, + TooLarge, + Status, +} + +/* +#[derive(Debug)] +pub(crate) enum User { + VersionNotSupported, + MethodNotSupported, + InvalidRequestUri, +} +*/ + +impl Error { + //TODO(error): should there be these kinds of inspection methods? + // + // - is_io() + // - is_connect() + // - is_closed() + // - etc? + + /// Returns true if this was an HTTP parse error. + pub fn is_parse(&self) -> bool { + match self.inner.kind { + Kind::Parse(_) => true, + _ => false, + } + } + + /// Returns true if this error was caused by user code. + pub fn is_user(&self) -> bool { + match self.inner.kind { + Kind::BodyUser | + Kind::NewService | + Kind::Service | + Kind::Closed | + Kind::UnsupportedVersion | + Kind::UnsupportedRequestMethod | + Kind::UnsupportedStatusCode | + Kind::AbsoluteUriRequired | + Kind::NoUpgrade | + Kind::Execute => true, + _ => false, + } + } + + /// Returns true if this was about a `Request` that was canceled. + pub fn is_canceled(&self) -> bool { + self.inner.kind == Kind::Canceled + } + + /// Returns true if a sender's channel is closed. + pub fn is_closed(&self) -> bool { + self.inner.kind == Kind::Closed + } + + /// Returns the error's cause. + /// + /// This is identical to `Error::cause` except that it provides extra + /// bounds required to be able to downcast the error. + pub fn cause2(&self) -> Option<&(StdError + 'static + Sync + Send)> { + self.inner.cause.as_ref().map(|e| &**e) + } + + /// Consumes the error, returning its cause. + pub fn into_cause(self) -> Option> { + self.inner.cause + } + + pub(crate) fn new(kind: Kind, cause: Option) -> Error { + Error { + inner: Box::new(ErrorImpl { + kind, + cause, + }), + } + } + + pub(crate) fn kind(&self) -> &Kind { + &self.inner.kind + } + + pub(crate) fn new_canceled>(cause: Option) -> Error { + Error::new(Kind::Canceled, cause.map(Into::into)) + } + + pub(crate) fn new_incomplete() -> Error { + Error::new(Kind::Incomplete, None) + } + + pub(crate) fn new_too_large() -> Error { + Error::new(Kind::Parse(Parse::TooLarge), None) + } + + pub(crate) fn new_header() -> Error { + Error::new(Kind::Parse(Parse::Header), None) + } + + pub(crate) fn new_version_h2() -> Error { + Error::new(Kind::Parse(Parse::VersionH2), None) + } + + pub(crate) fn new_mismatched_response() -> Error { + Error::new(Kind::MismatchedResponse, None) + } + + pub(crate) fn new_io(cause: io::Error) -> Error { + Error::new(Kind::Io, Some(cause.into())) + } + + #[cfg(feature = "runtime")] + pub(crate) fn new_listen>(cause: E) -> Error { + Error::new(Kind::Listen, Some(cause.into())) + } + + pub(crate) fn new_accept>(cause: E) -> Error { + Error::new(Kind::Accept, Some(cause.into())) + } + + pub(crate) fn new_connect>(cause: E) -> Error { + Error::new(Kind::Connect, Some(cause.into())) + } + + pub(crate) fn new_closed() -> Error { + Error::new(Kind::Closed, None) + } + + pub(crate) fn new_body>(cause: E) -> Error { + Error::new(Kind::Body, Some(cause.into())) + } + + pub(crate) fn new_body_write>(cause: E) -> Error { + Error::new(Kind::BodyWrite, Some(cause.into())) + } + + pub(crate) fn new_user_unsupported_version() -> Error { + Error::new(Kind::UnsupportedVersion, None) + } + + pub(crate) fn new_user_unsupported_request_method() -> Error { + Error::new(Kind::UnsupportedRequestMethod, None) + } + + pub(crate) fn new_user_unsupported_status_code() -> Error { + Error::new(Kind::UnsupportedStatusCode, None) + } + + pub(crate) fn new_user_absolute_uri_required() -> Error { + Error::new(Kind::AbsoluteUriRequired, None) + } + + pub(crate) fn new_user_no_upgrade() -> Error { + Error::new(Kind::NoUpgrade, None) + } + + pub(crate) fn new_user_manual_upgrade() -> Error { + Error::new(Kind::ManualUpgrade, None) + } + + pub(crate) fn new_user_new_service>(cause: E) -> Error { + Error::new(Kind::NewService, Some(cause.into())) + } + + pub(crate) fn new_user_service>(cause: E) -> Error { + Error::new(Kind::Service, Some(cause.into())) + } + + pub(crate) fn new_user_body>(cause: E) -> Error { + Error::new(Kind::BodyUser, Some(cause.into())) + } + + pub(crate) fn new_shutdown(cause: io::Error) -> Error { + Error::new(Kind::Shutdown, Some(Box::new(cause))) + } + + pub(crate) fn new_execute() -> Error { + Error::new(Kind::Execute, None) + } + + pub(crate) fn new_h2(cause: ::h2::Error) -> Error { + Error::new(Kind::Http2, Some(Box::new(cause))) + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Error") + .field("kind", &self.inner.kind) + .field("cause", &self.inner.cause) + .finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Uri(ref e) => fmt::Display::fmt(e, f), - Io(ref e) => fmt::Display::fmt(e, f), - Ssl(ref e) => fmt::Display::fmt(e, f), - Utf8(ref e) => fmt::Display::fmt(e, f), - ref e => f.write_str(e.description()), + if let Some(ref cause) = self.inner.cause { + write!(f, "{}: {}", self.description(), cause) + } else { + f.write_str(self.description()) } } } impl StdError for Error { fn description(&self) -> &str { - match *self { - Method => "Invalid Method specified", - Version => "Invalid HTTP version specified", - Header => "Invalid Header provided", - TooLarge => "Message head is too large", - Status => "Invalid Status provided", - Uri(ref e) => e.description(), - Io(ref e) => e.description(), - Ssl(ref e) => e.description(), - Utf8(ref e) => e.description(), - Error::__Nonexhaustive(..) => unreachable!(), + match self.inner.kind { + Kind::Parse(Parse::Method) => "invalid Method specified", + Kind::Parse(Parse::Version) => "invalid HTTP version specified", + Kind::Parse(Parse::VersionH2) => "invalid HTTP version specified (Http2)", + Kind::Parse(Parse::Uri) => "invalid URI", + Kind::Parse(Parse::Header) => "invalid Header provided", + Kind::Parse(Parse::TooLarge) => "message head is too large", + Kind::Parse(Parse::Status) => "invalid Status provided", + Kind::Incomplete => "parsed HTTP message from remote is incomplete", + Kind::MismatchedResponse => "response received without matching request", + Kind::Closed => "connection closed", + Kind::Connect => "an error occurred trying to connect", + Kind::Canceled => "an operation was canceled internally before starting", + #[cfg(feature = "runtime")] + Kind::Listen => "error creating server listener", + Kind::Accept => "error accepting connection", + Kind::NewService => "calling user's new_service failed", + Kind::Service => "error from user's server service", + Kind::Body => "error reading a body from connection", + Kind::BodyWrite => "error writing a body to connection", + Kind::BodyUser => "error from user's Payload stream", + Kind::Shutdown => "error shutting down connection", + Kind::Http2 => "http2 general error", + Kind::UnsupportedVersion => "request has unsupported HTTP version", + Kind::UnsupportedRequestMethod => "request has unsupported HTTP method", + Kind::UnsupportedStatusCode => "response has 1xx status code, not supported by server", + Kind::AbsoluteUriRequired => "client requires absolute-form URIs", + Kind::NoUpgrade => "no upgrade available", + Kind::ManualUpgrade => "upgrade expected but low level API in use", + Kind::Execute => "executor failed to spawn task", + + Kind::Io => "an IO error occurred", } } fn cause(&self) -> Option<&StdError> { - match *self { - Io(ref error) => Some(error), - Ssl(ref error) => Some(&**error), - Uri(ref error) => Some(error), - Utf8(ref error) => Some(error), - _ => None, - } + self + .inner + .cause + .as_ref() + .map(|cause| &**cause as &StdError) } } -impl From for Error { - fn from(err: IoError) -> Error { - Io(err) +#[doc(hidden)] +impl From for Error { + fn from(err: Parse) -> Error { + Error::new(Kind::Parse(err), None) } } -impl From for Error { - fn from(err: url::ParseError) -> Error { - Uri(err) - } -} - -#[cfg(feature = "openssl")] -impl From for Error { - fn from(err: SslError) -> Error { +impl From for Parse { + fn from(err: httparse::Error) -> Parse { match err { - SslError::StreamError(err) => Io(err), - err => Ssl(Box::new(err)), + httparse::Error::HeaderName | + httparse::Error::HeaderValue | + httparse::Error::NewLine | + httparse::Error::Token => Parse::Header, + httparse::Error::Status => Parse::Status, + httparse::Error::TooManyHeaders => Parse::TooLarge, + httparse::Error::Version => Parse::Version, } } } -impl From for Error { - fn from(err: Utf8Error) -> Error { - Utf8(err) +impl From for Parse { + fn from(_: http::method::InvalidMethod) -> Parse { + Parse::Method } } -impl From for Error { - fn from(err: FromUtf8Error) -> Error { - Utf8(err.utf8_error()) +impl From for Parse { + fn from(_: http::status::InvalidStatusCode) -> Parse { + Parse::Status } } -impl From for Error { - fn from(err: httparse::Error) -> Error { - match err { - httparse::Error::HeaderName => Header, - httparse::Error::HeaderValue => Header, - httparse::Error::NewLine => Header, - httparse::Error::Status => Status, - httparse::Error::Token => Header, - httparse::Error::TooManyHeaders => TooLarge, - httparse::Error::Version => Version, - } +impl From for Parse { + fn from(_: http::uri::InvalidUri) -> Parse { + Parse::Uri } } +impl From for Parse { + fn from(_: http::uri::InvalidUriParts) -> Parse { + Parse::Uri + } +} + +#[doc(hidden)] +trait AssertSendSync: Send + Sync + 'static {} +#[doc(hidden)] +impl AssertSendSync for Error {} + #[cfg(test)] mod tests { - use std::error::Error as StdError; - use std::io; - use httparse; - use url; - use super::Error; - use super::Error::*; - - #[test] - fn test_cause() { - let orig = io::Error::new(io::ErrorKind::Other, "other"); - let desc = orig.description().to_owned(); - let e = Io(orig); - assert_eq!(e.cause().unwrap().description(), desc); - } - - macro_rules! from { - ($from:expr => $error:pat) => { - match Error::from($from) { - e @ $error => { - assert!(e.description().len() > 5); - } , - _ => panic!("{:?}", $from) - } - } - } - - macro_rules! from_and_cause { - ($from:expr => $error:pat) => { - match Error::from($from) { - e @ $error => { - let desc = e.cause().unwrap().description(); - assert_eq!(desc, $from.description().to_owned()); - assert_eq!(desc, e.description()); - }, - _ => panic!("{:?}", $from) - } - } - } - - #[test] - fn test_from() { - - from_and_cause!(io::Error::new(io::ErrorKind::Other, "other") => Io(..)); - from_and_cause!(url::ParseError::EmptyHost => Uri(..)); - - from!(httparse::Error::HeaderName => Header); - from!(httparse::Error::HeaderName => Header); - from!(httparse::Error::HeaderValue => Header); - from!(httparse::Error::NewLine => Header); - from!(httparse::Error::Status => Status); - from!(httparse::Error::Token => Header); - from!(httparse::Error::TooManyHeaders => TooLarge); - from!(httparse::Error::Version => Version); - } - - #[cfg(feature = "openssl")] - #[test] - fn test_from_ssl() { - use openssl::ssl::error::SslError; - - from!(SslError::StreamError( - io::Error::new(io::ErrorKind::Other, "ssl negotiation")) => Io(..)); - from_and_cause!(SslError::SslSessionClosed => Ssl(..)); - } } diff --git a/third_party/rust/hyper/src/header/common/accept.rs b/third_party/rust/hyper/src/header/common/accept.rs deleted file mode 100644 index f58a143855e0..000000000000 --- a/third_party/rust/hyper/src/header/common/accept.rs +++ /dev/null @@ -1,143 +0,0 @@ -use mime::Mime; - -use header::{QualityItem, qitem}; - -header! { - /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) - /// - /// The `Accept` header field can be used by user agents to specify - /// response media types that are acceptable. Accept header fields can - /// be used to indicate that the request is specifically limited to a - /// small set of desired types, as in the case of a request for an - /// in-line image - /// - /// # ABNF - /// ```plain - /// Accept = #( media-range [ accept-params ] ) - /// - /// media-range = ( "*/*" - /// / ( type "/" "*" ) - /// / ( type "/" subtype ) - /// ) *( OWS ";" OWS parameter ) - /// accept-params = weight *( accept-ext ) - /// accept-ext = OWS ";" OWS token [ "=" ( token / quoted-string ) ] - /// ``` - /// - /// # Example values - /// * `audio/*; q=0.2, audio/basic` (`*` value won't parse correctly) - /// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Accept, qitem}; - /// use hyper::mime::{Mime, TopLevel, SubLevel}; - /// - /// let mut headers = Headers::new(); - /// - /// headers.set( - /// Accept(vec![ - /// qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), - /// ]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, Accept, qitem}; - /// use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Accept(vec![ - /// qitem(Mime(TopLevel::Application, SubLevel::Json, - /// vec![(Attr::Charset, Value::Utf8)])), - /// ]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, Accept, QualityItem, Quality, qitem}; - /// use hyper::mime::{Mime, TopLevel, SubLevel}; - /// - /// let mut headers = Headers::new(); - /// - /// headers.set( - /// Accept(vec![ - /// qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), - /// qitem(Mime(TopLevel::Application, - /// SubLevel::Ext("xhtml+xml".to_owned()), vec![])), - /// QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), - /// Quality(900)), - /// qitem(Mime(TopLevel::Image, - /// SubLevel::Ext("webp".to_owned()), vec![])), - /// QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), - /// Quality(800)) - /// ]) - /// ); - /// ``` - /// - /// # Notes - /// * Using always Mime types to represent `media-range` differs from the ABNF. - /// * **FIXME**: `accept-ext` is not supported. - (Accept, "Accept") => (QualityItem)+ - - test_accept { - // Tests from the RFC - // FIXME: Test fails, first value containing a "*" fails to parse - // test_header!( - // test1, - // vec![b"audio/*; q=0.2, audio/basic"], - // Some(HeaderField(vec![ - // QualityItem::new(Mime(TopLevel::Audio, SubLevel::Star, vec![]), Quality(200)), - // qitem(Mime(TopLevel::Audio, SubLevel::Ext("basic".to_owned()), vec![])), - // ]))); - test_header!( - test2, - vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"], - Some(HeaderField(vec![ - QualityItem::new(Mime(TopLevel::Text, SubLevel::Plain, vec![]), Quality(500)), - qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), - QualityItem::new( - Mime(TopLevel::Text, SubLevel::Ext("x-dvi".to_owned()), vec![]), - Quality(800)), - qitem(Mime(TopLevel::Text, SubLevel::Ext("x-c".to_owned()), vec![])), - ]))); - // Custom tests - test_header!( - test3, - vec![b"text/plain; charset=utf-8"], - Some(Accept(vec![ - qitem(Mime(TopLevel::Text, SubLevel::Plain, vec![(Attr::Charset, Value::Utf8)])), - ]))); - test_header!( - test4, - vec![b"text/plain; charset=utf-8; q=0.5"], - Some(Accept(vec![ - QualityItem::new(Mime(TopLevel::Text, - SubLevel::Plain, vec![(Attr::Charset, Value::Utf8)]), - Quality(500)), - ]))); - } -} - -impl Accept { - /// A constructor to easily create `Accept: */*`. - pub fn star() -> Accept { - Accept(vec![qitem(mime!(Star/Star))]) - } - - /// A constructor to easily create `Accept: application/json`. - pub fn json() -> Accept { - Accept(vec![qitem(mime!(Application/Json))]) - } - - /// A constructor to easily create `Accept: text/*`. - pub fn text() -> Accept { - Accept(vec![qitem(mime!(Text/Star))]) - } - - /// A constructor to easily create `Accept: image/*`. - pub fn image() -> Accept { - Accept(vec![qitem(mime!(Image/Star))]) - } -} - - -bench_header!(bench, Accept, { vec![b"text/plain; q=0.5, text/html".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/accept_charset.rs b/third_party/rust/hyper/src/header/common/accept_charset.rs deleted file mode 100644 index 7e4d1247fae7..000000000000 --- a/third_party/rust/hyper/src/header/common/accept_charset.rs +++ /dev/null @@ -1,56 +0,0 @@ -use header::{Charset, QualityItem}; - -header! { - /// `Accept-Charset` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.3) - /// - /// The `Accept-Charset` header field can be sent by a user agent to - /// indicate what charsets are acceptable in textual response content. - /// This field allows user agents capable of understanding more - /// comprehensive or special-purpose charsets to signal that capability - /// to an origin server that is capable of representing information in - /// those charsets. - /// - /// # ABNF - /// ```plain - /// Accept-Charset = 1#( ( charset / "*" ) [ weight ] ) - /// ``` - /// - /// # Example values - /// * `iso-8859-5, unicode-1-1;q=0.8` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AcceptCharset, Charset, qitem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptCharset(vec![qitem(Charset::Us_Ascii)]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptCharset, Charset, Quality, QualityItem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptCharset(vec![ - /// QualityItem::new(Charset::Us_Ascii, Quality(900)), - /// QualityItem::new(Charset::Iso_8859_10, Quality(200)), - /// ]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptCharset, Charset, qitem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))]) - /// ); - /// ``` - (AcceptCharset, "Accept-Charset") => (QualityItem)+ - - test_accept_charset { - /// Testcase from RFC - test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/accept_encoding.rs b/third_party/rust/hyper/src/header/common/accept_encoding.rs deleted file mode 100644 index cbc0de627dfa..000000000000 --- a/third_party/rust/hyper/src/header/common/accept_encoding.rs +++ /dev/null @@ -1,71 +0,0 @@ -use header::{Encoding, QualityItem}; - -header! { - /// `Accept-Encoding` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.4) - /// - /// The `Accept-Encoding` header field can be used by user agents to - /// indicate what response content-codings are - /// acceptable in the response. An `identity` token is used as a synonym - /// for "no encoding" in order to communicate when no encoding is - /// preferred. - /// - /// # ABNF - /// ```plain - /// Accept-Encoding = #( codings [ weight ] ) - /// codings = content-coding / "identity" / "*" - /// ``` - /// - /// # Example values - /// * `compress, gzip` - /// * `` - /// * `*` - /// * `compress;q=0.5, gzip;q=1` - /// * `gzip;q=1.0, identity; q=0.5, *;q=0` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AcceptEncoding, Encoding, qitem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptEncoding(vec![qitem(Encoding::Chunked)]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptEncoding, Encoding, qitem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptEncoding(vec![ - /// qitem(Encoding::Chunked), - /// qitem(Encoding::Gzip), - /// qitem(Encoding::Deflate), - /// ]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptEncoding, Encoding, QualityItem, Quality, qitem}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptEncoding(vec![ - /// qitem(Encoding::Chunked), - /// QualityItem::new(Encoding::Gzip, Quality(600)), - /// QualityItem::new(Encoding::EncodingExt("*".to_owned()), Quality(0)), - /// ]) - /// ); - /// ``` - (AcceptEncoding, "Accept-Encoding") => (QualityItem)* - - test_accept_encoding { - // From the RFC - test_header!(test1, vec![b"compress, gzip"]); - test_header!(test2, vec![b""], Some(AcceptEncoding(vec![]))); - test_header!(test3, vec![b"*"]); - // Note: Removed quality 1 from gzip - test_header!(test4, vec![b"compress;q=0.5, gzip"]); - // Note: Removed quality 1 from gzip - test_header!(test5, vec![b"gzip, identity; q=0.5, *;q=0"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/accept_language.rs b/third_party/rust/hyper/src/header/common/accept_language.rs deleted file mode 100644 index 4b7f1ac00168..000000000000 --- a/third_party/rust/hyper/src/header/common/accept_language.rs +++ /dev/null @@ -1,70 +0,0 @@ -use language_tags::LanguageTag; -use header::QualityItem; - -header! { - /// `Accept-Language` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.5) - /// - /// The `Accept-Language` header field can be used by user agents to - /// indicate the set of natural languages that are preferred in the - /// response. - /// - /// # ABNF - /// ```plain - /// Accept-Language = 1#( language-range [ weight ] ) - /// language-range = - /// ``` - /// - /// # Example values - /// * `da, en-gb;q=0.8, en;q=0.7` - /// * `en-us;q=1.0, en;q=0.5, fr` - /// - /// # Examples - /// ``` - /// use hyper::LanguageTag; - /// use hyper::header::{Headers, AcceptLanguage, qitem}; - /// - /// let mut headers = Headers::new(); - /// let mut langtag: LanguageTag = Default::default(); - /// langtag.language = Some("en".to_owned()); - /// langtag.region = Some("US".to_owned()); - /// headers.set( - /// AcceptLanguage(vec![ - /// qitem(langtag), - /// ]) - /// ); - /// ``` - /// ``` - /// # extern crate hyper; - /// # #[macro_use] extern crate language_tags; - /// # use hyper::header::{Headers, AcceptLanguage, QualityItem, Quality, qitem}; - /// # - /// # fn main() { - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptLanguage(vec![ - /// qitem(langtag!(da)), - /// QualityItem::new(langtag!(en;;;GB), Quality(800)), - /// QualityItem::new(langtag!(en), Quality(700)), - /// ]) - /// ); - /// # } - /// ``` - (AcceptLanguage, "Accept-Language") => (QualityItem)+ - - test_accept_language { - // From the RFC - test_header!(test1, vec![b"da, en-gb;q=0.8, en;q=0.7"]); - // Own test - test_header!( - test2, vec![b"en-US, en; q=0.5, fr"], - Some(AcceptLanguage(vec![ - qitem(langtag!(en;;;US)), - QualityItem::new(langtag!(en), Quality(500)), - qitem(langtag!(fr)), - ]))); - } -} - -bench_header!(bench, AcceptLanguage, - { vec![b"en-us;q=1.0, en;q=0.5, fr".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/accept_ranges.rs b/third_party/rust/hyper/src/header/common/accept_ranges.rs deleted file mode 100644 index 86e847dbd429..000000000000 --- a/third_party/rust/hyper/src/header/common/accept_ranges.rs +++ /dev/null @@ -1,101 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; - -header! { - /// `Accept-Ranges` header, defined in - /// [RFC7233](http://tools.ietf.org/html/rfc7233#section-2.3) - /// - /// The `Accept-Ranges` header field allows a server to indicate that it - /// supports range requests for the target resource. - /// - /// # ABNF - /// ```plain - /// Accept-Ranges = acceptable-ranges - /// acceptable-ranges = 1#range-unit / \"none\" - /// - /// # Example values - /// * `bytes` - /// * `none` - /// * `unknown-unit` - /// ``` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; - /// - /// let mut headers = Headers::new(); - /// headers.set(AcceptRanges(vec![RangeUnit::Bytes])); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; - /// - /// let mut headers = Headers::new(); - /// headers.set(AcceptRanges(vec![RangeUnit::None])); - /// ``` - /// ``` - /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AcceptRanges(vec![ - /// RangeUnit::Unregistered("nibbles".to_owned()), - /// RangeUnit::Bytes, - /// RangeUnit::Unregistered("doublets".to_owned()), - /// RangeUnit::Unregistered("quadlets".to_owned()), - /// ]) - /// ); - /// ``` - (AcceptRanges, "Accept-Ranges") => (RangeUnit)+ - - test_acccept_ranges { - test_header!(test1, vec![b"bytes"]); - test_header!(test2, vec![b"none"]); - test_header!(test3, vec![b"unknown-unit"]); - test_header!(test4, vec![b"bytes, unknown-unit"]); - } -} - -/// Range Units, described in [RFC7233](http://tools.ietf.org/html/rfc7233#section-2) -/// -/// A representation can be partitioned into subranges according to -/// various structural units, depending on the structure inherent in the -/// representation's media type. -/// -/// # ABNF -/// ```plain -/// range-unit = bytes-unit / other-range-unit -/// bytes-unit = "bytes" -/// other-range-unit = token -/// ``` -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum RangeUnit { - /// Indicating byte-range requests are supported. - Bytes, - /// Reserved as keyword, indicating no ranges are supported. - None, - /// The given range unit is not registered at IANA. - Unregistered(String), -} - - -impl FromStr for RangeUnit { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - match s { - "bytes" => Ok(RangeUnit::Bytes), - "none" => Ok(RangeUnit::None), - // FIXME: Check if s is really a Token - _ => Ok(RangeUnit::Unregistered(s.to_owned())), - } - } -} - -impl Display for RangeUnit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - RangeUnit::Bytes => f.write_str("bytes"), - RangeUnit::None => f.write_str("none"), - RangeUnit::Unregistered(ref x) => f.write_str(&x), - } - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_allow_credentials.rs b/third_party/rust/hyper/src/header/common/access_control_allow_credentials.rs deleted file mode 100644 index 03ef89302431..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_allow_credentials.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::fmt::{self, Display}; -use std::str; -use unicase::UniCase; -use header::{Header, HeaderFormat}; - -/// `Access-Control-Allow-Credentials` header, part of -/// [CORS](http://www.w3.org/TR/cors/#access-control-allow-headers-response-header) -/// -/// > The Access-Control-Allow-Credentials HTTP response header indicates whether the -/// > response to request can be exposed when the credentials flag is true. When part -/// > of the response to an preflight request it indicates that the actual request can -/// > be made with credentials. The Access-Control-Allow-Credentials HTTP header must -/// > match the following ABNF: -/// -/// # ABNF -/// ```plain -/// Access-Control-Allow-Credentials: "Access-Control-Allow-Credentials" ":" "true" -/// ``` -/// -/// Since there is only one acceptable field value, the header struct does not accept -/// any values at all. Setting an empty `AccessControlAllowCredentials` header is -/// sufficient. See the examples below. -/// -/// # Example values -/// * "true" -/// -/// # Examples -/// ``` -/// # extern crate hyper; -/// # fn main() { -/// -/// use hyper::header::{Headers, AccessControlAllowCredentials}; -/// -/// let mut headers = Headers::new(); -/// headers.set(AccessControlAllowCredentials); -/// # } -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct AccessControlAllowCredentials; - -const ACCESS_CONTROL_ALLOW_CREDENTIALS_TRUE: UniCase<&'static str> = UniCase("true"); - -impl Header for AccessControlAllowCredentials { - fn header_name() -> &'static str { - "Access-Control-Allow-Credentials" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - if raw.len() == 1 { - let text = unsafe { - // safe because: - // 1. we just checked raw.len == 1 - // 2. we don't actually care if it's utf8, we just want to - // compare the bytes with the "case" normalized. If it's not - // utf8, then the byte comparison will fail, and we'll return - // None. No big deal. - str::from_utf8_unchecked(raw.get_unchecked(0)) - }; - if UniCase(text) == ACCESS_CONTROL_ALLOW_CREDENTIALS_TRUE { - return Ok(AccessControlAllowCredentials); - } - } - Err(::Error::Header) - } -} - -impl HeaderFormat for AccessControlAllowCredentials { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("true") - } -} - -impl Display for AccessControlAllowCredentials { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - self.fmt_header(f) - } -} - -#[cfg(test)] -mod test_access_control_allow_credentials { - use std::str; - use header::*; - use super::AccessControlAllowCredentials as HeaderField; - test_header!(works, vec![b"true"], Some(HeaderField)); - test_header!(ignores_case, vec![b"True"]); - test_header!(not_bool, vec![b"false"], None); - test_header!(only_single, vec![b"true", b"true"], None); - test_header!(no_gibberish, vec!["\u{645}\u{631}\u{62d}\u{628}\u{627}".as_bytes()], None); -} \ No newline at end of file diff --git a/third_party/rust/hyper/src/header/common/access_control_allow_headers.rs b/third_party/rust/hyper/src/header/common/access_control_allow_headers.rs deleted file mode 100644 index e753bde1ec4c..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_allow_headers.rs +++ /dev/null @@ -1,58 +0,0 @@ -use unicase::UniCase; - -header! { - /// `Access-Control-Allow-Headers` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-allow-headers-response-header) - /// - /// The `Access-Control-Allow-Headers` header indicates, as part of the - /// response to a preflight request, which header field names can be used - /// during the actual request. - /// - /// # ABNF - /// ```plain - /// Access-Control-Allow-Headers: "Access-Control-Allow-Headers" ":" #field-name - /// ``` - /// - /// # Example values - /// * `accept-language, date` - /// - /// # Examples - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlAllowHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlAllowHeaders(vec![UniCase("date".to_owned())]) - /// ); - /// # } - /// ``` - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlAllowHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlAllowHeaders(vec![ - /// UniCase("accept-language".to_owned()), - /// UniCase("date".to_owned()), - /// ]) - /// ); - /// # } - /// ``` - (AccessControlAllowHeaders, "Access-Control-Allow-Headers") => (UniCase)* - - test_access_control_allow_headers { - test_header!(test1, vec![b"accept-language, date"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_allow_methods.rs b/third_party/rust/hyper/src/header/common/access_control_allow_methods.rs deleted file mode 100644 index 7917e1992d53..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_allow_methods.rs +++ /dev/null @@ -1,48 +0,0 @@ -use method::Method; - -header! { - /// `Access-Control-Allow-Methods` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-allow-methods-response-header) - /// - /// The `Access-Control-Allow-Methods` header indicates, as part of the - /// response to a preflight request, which methods can be used during the - /// actual request. - /// - /// # ABNF - /// ```plain - /// Access-Control-Allow-Methods: "Access-Control-Allow-Methods" ":" #Method - /// ``` - /// - /// # Example values - /// * `PUT, DELETE, XMODIFY` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AccessControlAllowMethods}; - /// use hyper::method::Method; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlAllowMethods(vec![Method::Get]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, AccessControlAllowMethods}; - /// use hyper::method::Method; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlAllowMethods(vec![ - /// Method::Get, - /// Method::Post, - /// Method::Patch, - /// Method::Extension("COPY".to_owned()), - /// ]) - /// ); - /// ``` - (AccessControlAllowMethods, "Access-Control-Allow-Methods") => (Method)* - - test_access_control_allow_methods { - test_header!(test1, vec![b"PUT, DELETE, XMODIFY"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_allow_origin.rs b/third_party/rust/hyper/src/header/common/access_control_allow_origin.rs deleted file mode 100644 index 306966e867f5..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_allow_origin.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::fmt::{self, Display}; - -use header::{Header, HeaderFormat}; - -/// The `Access-Control-Allow-Origin` response header, -/// part of [CORS](http://www.w3.org/TR/cors/#access-control-allow-origin-response-header) -/// -/// The `Access-Control-Allow-Origin` header indicates whether a resource -/// can be shared based by returning the value of the Origin request header, -/// "*", or "null" in the response. -/// -/// # ABNF -/// ```plain -/// Access-Control-Allow-Origin = "Access-Control-Allow-Origin" ":" origin-list-or-null | "*" -/// ``` -/// -/// # Example values -/// * `null` -/// * `*` -/// * `http://google.com/` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, AccessControlAllowOrigin}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// AccessControlAllowOrigin::Any -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, AccessControlAllowOrigin}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// AccessControlAllowOrigin::Null, -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, AccessControlAllowOrigin}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// AccessControlAllowOrigin::Value("http://hyper.rs".to_owned()) -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub enum AccessControlAllowOrigin { - /// Allow all origins - Any, - /// A hidden origin - Null, - /// Allow one particular origin - Value(String), -} - -impl Header for AccessControlAllowOrigin { - fn header_name() -> &'static str { - "Access-Control-Allow-Origin" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - if raw.len() != 1 { - return Err(::Error::Header) - } - let value = unsafe { raw.get_unchecked(0) }; - Ok(match &value[..] { - b"*" => AccessControlAllowOrigin::Any, - b"null" => AccessControlAllowOrigin::Null, - _ => AccessControlAllowOrigin::Value(try!(String::from_utf8(value.clone()))) - }) - } -} - -impl HeaderFormat for AccessControlAllowOrigin { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - AccessControlAllowOrigin::Any => f.write_str("*"), - AccessControlAllowOrigin::Null => f.write_str("null"), - AccessControlAllowOrigin::Value(ref url) => Display::fmt(url, f), - } - } -} - -impl Display for AccessControlAllowOrigin { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - self.fmt_header(f) - } -} - -#[cfg(test)] -mod test_access_control_allow_orgin { - use header::*; - use super::AccessControlAllowOrigin as HeaderField; - test_header!(test1, vec![b"null"]); - test_header!(test2, vec![b"*"]); - test_header!(test3, vec![b"http://google.com/"]); -} diff --git a/third_party/rust/hyper/src/header/common/access_control_expose_headers.rs b/third_party/rust/hyper/src/header/common/access_control_expose_headers.rs deleted file mode 100644 index ac187446c4a0..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_expose_headers.rs +++ /dev/null @@ -1,60 +0,0 @@ -use unicase::UniCase; - -header! { - /// `Access-Control-Expose-Headers` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-expose-headers-response-header) - /// - /// The Access-Control-Expose-Headers header indicates which headers are safe to expose to the - /// API of a CORS API specification. - /// - /// # ABNF - /// ```plain - /// Access-Control-Expose-Headers = "Access-Control-Expose-Headers" ":" #field-name - /// ``` - /// - /// # Example values - /// * `ETag, Content-Length` - /// - /// # Examples - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlExposeHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlExposeHeaders(vec![ - /// UniCase("etag".to_owned()), - /// UniCase("content-length".to_owned()) - /// ]) - /// ); - /// # } - /// ``` - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlExposeHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlExposeHeaders(vec![ - /// UniCase("etag".to_owned()), - /// UniCase("content-length".to_owned()) - /// ]) - /// ); - /// # } - /// ``` - (AccessControlExposeHeaders, "Access-Control-Expose-Headers") => (UniCase)* - - test_access_control_expose_headers { - test_header!(test1, vec![b"etag, content-length"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_max_age.rs b/third_party/rust/hyper/src/header/common/access_control_max_age.rs deleted file mode 100644 index d487dc742895..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_max_age.rs +++ /dev/null @@ -1,28 +0,0 @@ -header! { - /// `Access-Control-Max-Age` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-max-age-response-header) - /// - /// The `Access-Control-Max-Age` header indicates how long the results of a - /// preflight request can be cached in a preflight result cache. - /// - /// # ABNF - /// ```plain - /// Access-Control-Max-Age = \"Access-Control-Max-Age\" \":\" delta-seconds - /// ``` - /// - /// # Example values - /// * `531` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AccessControlMaxAge}; - /// - /// let mut headers = Headers::new(); - /// headers.set(AccessControlMaxAge(1728000u32)); - /// ``` - (AccessControlMaxAge, "Access-Control-Max-Age") => [u32] - - test_access_control_max_age { - test_header!(test1, vec![b"531"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_request_headers.rs b/third_party/rust/hyper/src/header/common/access_control_request_headers.rs deleted file mode 100644 index b08cb33cd819..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_request_headers.rs +++ /dev/null @@ -1,58 +0,0 @@ -use unicase::UniCase; - -header! { - /// `Access-Control-Request-Headers` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-request-headers-request-header) - /// - /// The `Access-Control-Request-Headers` header indicates which headers will - /// be used in the actual request as part of the preflight request. - /// during the actual request. - /// - /// # ABNF - /// ```plain - /// Access-Control-Allow-Headers: "Access-Control-Allow-Headers" ":" #field-name - /// ``` - /// - /// # Example values - /// * `accept-language, date` - /// - /// # Examples - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlRequestHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlRequestHeaders(vec![UniCase("date".to_owned())]) - /// ); - /// # } - /// ``` - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, AccessControlRequestHeaders}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// AccessControlRequestHeaders(vec![ - /// UniCase("accept-language".to_owned()), - /// UniCase("date".to_owned()), - /// ]) - /// ); - /// # } - /// ``` - (AccessControlRequestHeaders, "Access-Control-Request-Headers") => (UniCase)* - - test_access_control_request_headers { - test_header!(test1, vec![b"accept-language, date"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/access_control_request_method.rs b/third_party/rust/hyper/src/header/common/access_control_request_method.rs deleted file mode 100644 index afeb4e296f62..000000000000 --- a/third_party/rust/hyper/src/header/common/access_control_request_method.rs +++ /dev/null @@ -1,30 +0,0 @@ -use method::Method; - -header! { - /// `Access-Control-Request-Method` header, part of - /// [CORS](http://www.w3.org/TR/cors/#access-control-request-method-request-header) - /// - /// The `Access-Control-Request-Method` header indicates which method will be - /// used in the actual request as part of the preflight request. - /// # ABNF - /// ```plain - /// Access-Control-Request-Method: \"Access-Control-Request-Method\" \":\" Method - /// ``` - /// - /// # Example values - /// * `GET` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, AccessControlRequestMethod}; - /// use hyper::method::Method; - /// - /// let mut headers = Headers::new(); - /// headers.set(AccessControlRequestMethod(Method::Get)); - /// ``` - (AccessControlRequestMethod, "Access-Control-Request-Method") => [Method] - - test_access_control_request_method { - test_header!(test1, vec![b"GET"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/allow.rs b/third_party/rust/hyper/src/header/common/allow.rs deleted file mode 100644 index a43220a6e8b4..000000000000 --- a/third_party/rust/hyper/src/header/common/allow.rs +++ /dev/null @@ -1,76 +0,0 @@ -use method::Method; - -header! { - /// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1) - /// - /// The `Allow` header field lists the set of methods advertised as - /// supported by the target resource. The purpose of this field is - /// strictly to inform the recipient of valid request methods associated - /// with the resource. - /// - /// # ABNF - /// ```plain - /// Allow = #method - /// ``` - /// - /// # Example values - /// * `GET, HEAD, PUT` - /// * `OPTIONS, GET, PUT, POST, DELETE, HEAD, TRACE, CONNECT, PATCH, fOObAr` - /// * `` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Allow}; - /// use hyper::method::Method; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Allow(vec![Method::Get]) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, Allow}; - /// use hyper::method::Method; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Allow(vec![ - /// Method::Get, - /// Method::Post, - /// Method::Patch, - /// Method::Extension("COPY".to_owned()), - /// ]) - /// ); - /// ``` - (Allow, "Allow") => (Method)* - - test_allow { - // From the RFC - test_header!( - test1, - vec![b"GET, HEAD, PUT"], - Some(HeaderField(vec![Method::Get, Method::Head, Method::Put]))); - // Own tests - test_header!( - test2, - vec![b"OPTIONS, GET, PUT, POST, DELETE, HEAD, TRACE, CONNECT, PATCH, fOObAr"], - Some(HeaderField(vec![ - Method::Options, - Method::Get, - Method::Put, - Method::Post, - Method::Delete, - Method::Head, - Method::Trace, - Method::Connect, - Method::Patch, - Method::Extension("fOObAr".to_owned())]))); - test_header!( - test3, - vec![b""], - Some(HeaderField(Vec::::new()))); - } -} - -bench_header!(bench, - Allow, { vec![b"OPTIONS,GET,PUT,POST,DELETE,HEAD,TRACE,CONNECT,PATCH,fOObAr".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/authorization.rs b/third_party/rust/hyper/src/header/common/authorization.rs deleted file mode 100644 index 681851060bbd..000000000000 --- a/third_party/rust/hyper/src/header/common/authorization.rs +++ /dev/null @@ -1,289 +0,0 @@ -use std::any::Any; -use std::fmt::{self, Display}; -use std::str::{FromStr, from_utf8}; -use std::ops::{Deref, DerefMut}; -use base64::{encode, decode}; -use header::{Header, HeaderFormat}; - -/// `Authorization` header, defined in [RFC7235](https://tools.ietf.org/html/rfc7235#section-4.2) -/// -/// The `Authorization` header field allows a user agent to authenticate -/// itself with an origin server -- usually, but not necessarily, after -/// receiving a 401 (Unauthorized) response. Its value consists of -/// credentials containing the authentication information of the user -/// agent for the realm of the resource being requested. -/// -/// # ABNF -/// ```plain -/// Authorization = credentials -/// ``` -/// -/// # Example values -/// * `Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==` -/// * `Bearer fpKL54jvWmEGVoRdCNjG` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Authorization}; -/// -/// let mut headers = Headers::new(); -/// headers.set(Authorization("let me in".to_owned())); -/// ``` -/// ``` -/// use hyper::header::{Headers, Authorization, Basic}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Authorization( -/// Basic { -/// username: "Aladdin".to_owned(), -/// password: Some("open sesame".to_owned()) -/// } -/// ) -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, Authorization, Bearer}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Authorization( -/// Bearer { -/// token: "QWxhZGRpbjpvcGVuIHNlc2FtZQ".to_owned() -/// } -/// ) -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct Authorization(pub S); - -impl Deref for Authorization { - type Target = S; - - fn deref(&self) -> &S { - &self.0 - } -} - -impl DerefMut for Authorization { - fn deref_mut(&mut self) -> &mut S { - &mut self.0 - } -} - -impl Header for Authorization where ::Err: 'static { - fn header_name() -> &'static str { - "Authorization" - } - - fn parse_header(raw: &[Vec]) -> ::Result> { - if raw.len() != 1 { - return Err(::Error::Header); - } - let header = try!(from_utf8(unsafe { &raw.get_unchecked(0)[..] })); - if let Some(scheme) = ::scheme() { - if header.starts_with(scheme) && header.len() > scheme.len() + 1 { - match header[scheme.len() + 1..].parse::().map(Authorization) { - Ok(h) => Ok(h), - Err(_) => Err(::Error::Header) - } - } else { - Err(::Error::Header) - } - } else { - match header.parse::().map(Authorization) { - Ok(h) => Ok(h), - Err(_) => Err(::Error::Header) - } - } - } -} - -impl HeaderFormat for Authorization where ::Err: 'static { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(scheme) = ::scheme() { - try!(write!(f, "{} ", scheme)) - }; - self.0.fmt_scheme(f) - } -} - -/// An Authorization scheme to be used in the header. -pub trait Scheme: FromStr + fmt::Debug + Clone + Send + Sync { - /// An optional Scheme name. - /// - /// Will be replaced with an associated constant once available. - fn scheme() -> Option<&'static str>; - /// Format the Scheme data into a header value. - fn fmt_scheme(&self, &mut fmt::Formatter) -> fmt::Result; -} - -impl Scheme for String { - fn scheme() -> Option<&'static str> { - None - } - - fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(self, f) - } -} - -/// Credential holder for Basic Authentication -#[derive(Clone, PartialEq, Debug)] -pub struct Basic { - /// The username as a possibly empty string - pub username: String, - /// The password. `None` if the `:` delimiter character was not - /// part of the parsed input. - pub password: Option -} - -impl Scheme for Basic { - fn scheme() -> Option<&'static str> { - Some("Basic") - } - - fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { - //FIXME: serialize::base64 could use some Debug implementation, so - //that we don't have to allocate a new string here just to write it - //to the formatter. - let mut text = self.username.clone(); - text.push(':'); - if let Some(ref pass) = self.password { - text.push_str(&pass[..]); - } - - f.write_str(&encode(text.as_bytes())) - } -} - -impl FromStr for Basic { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - match decode(s) { - Ok(decoded) => match String::from_utf8(decoded) { - Ok(text) => { - let parts = &mut text.split(':'); - let user = match parts.next() { - Some(part) => part.to_owned(), - None => return Err(::Error::Header) - }; - let password = match parts.next() { - Some(part) => Some(part.to_owned()), - None => None - }; - Ok(Basic { - username: user, - password: password - }) - }, - Err(e) => { - debug!("Basic::from_utf8 error={:?}", e); - Err(::Error::Header) - } - }, - Err(e) => { - debug!("Basic::from_base64 error={:?}", e); - Err(::Error::Header) - } - } - } -} - -#[derive(Clone, PartialEq, Debug)] -///Token holder for Bearer Authentication, most often seen with oauth -pub struct Bearer { - ///Actual bearer token as a string - pub token: String -} - -impl Scheme for Bearer { - fn scheme() -> Option<&'static str> { - Some("Bearer") - } - - fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.token) - } -} - -impl FromStr for Bearer { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - Ok(Bearer { token: s.to_owned()}) - } -} - -#[cfg(test)] -mod tests { - use super::{Authorization, Basic, Bearer}; - use super::super::super::{Headers, Header}; - - #[test] - fn test_raw_auth() { - let mut headers = Headers::new(); - headers.set(Authorization("foo bar baz".to_owned())); - assert_eq!(headers.to_string(), "Authorization: foo bar baz\r\n".to_owned()); - } - - #[test] - fn test_raw_auth_parse() { - let header: Authorization = Header::parse_header( - &[b"foo bar baz".to_vec()]).unwrap(); - assert_eq!(header.0, "foo bar baz"); - } - - #[test] - fn test_basic_auth() { - let mut headers = Headers::new(); - headers.set(Authorization( - Basic { username: "Aladdin".to_owned(), password: Some("open sesame".to_owned()) })); - assert_eq!( - headers.to_string(), - "Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\r\n".to_owned()); - } - - #[test] - fn test_basic_auth_no_password() { - let mut headers = Headers::new(); - headers.set(Authorization(Basic { username: "Aladdin".to_owned(), password: None })); - assert_eq!(headers.to_string(), "Authorization: Basic QWxhZGRpbjo=\r\n".to_owned()); - } - - #[test] - fn test_basic_auth_parse() { - let auth: Authorization = Header::parse_header( - &[b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==".to_vec()]).unwrap(); - assert_eq!(auth.0.username, "Aladdin"); - assert_eq!(auth.0.password, Some("open sesame".to_owned())); - } - - #[test] - fn test_basic_auth_parse_no_password() { - let auth: Authorization = Header::parse_header( - &[b"Basic QWxhZGRpbjo=".to_vec()]).unwrap(); - assert_eq!(auth.0.username, "Aladdin"); - assert_eq!(auth.0.password, Some("".to_owned())); - } - - #[test] - fn test_bearer_auth() { - let mut headers = Headers::new(); - headers.set(Authorization( - Bearer { token: "fpKL54jvWmEGVoRdCNjG".to_owned() })); - assert_eq!( - headers.to_string(), - "Authorization: Bearer fpKL54jvWmEGVoRdCNjG\r\n".to_owned()); - } - - #[test] - fn test_bearer_auth_parse() { - let auth: Authorization = Header::parse_header( - &[b"Bearer fpKL54jvWmEGVoRdCNjG".to_vec()]).unwrap(); - assert_eq!(auth.0.token, "fpKL54jvWmEGVoRdCNjG"); - } -} - -bench_header!(raw, Authorization, { vec![b"foo bar baz".to_vec()] }); -bench_header!(basic, Authorization, { vec![b"Basic QWxhZGRpbjpuIHNlc2FtZQ==".to_vec()] }); -bench_header!(bearer, Authorization, { vec![b"Bearer fpKL54jvWmEGVoRdCNjG".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/cache_control.rs b/third_party/rust/hyper/src/header/common/cache_control.rs deleted file mode 100644 index c9a19e17b90a..000000000000 --- a/third_party/rust/hyper/src/header/common/cache_control.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::fmt; -use std::str::FromStr; -use header::{Header, HeaderFormat}; -use header::parsing::{from_comma_delimited, fmt_comma_delimited}; - -/// `Cache-Control` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.2) -/// -/// The `Cache-Control` header field is used to specify directives for -/// caches along the request/response chain. Such cache directives are -/// unidirectional in that the presence of a directive in a request does -/// not imply that the same directive is to be given in the response. -/// -/// # ABNF -/// ```plain -/// Cache-Control = 1#cache-directive -/// cache-directive = token [ "=" ( token / quoted-string ) ] -/// ``` -/// -/// # Example values -/// * `no-cache` -/// * `private, community="UCI"` -/// * `max-age=30` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, CacheControl, CacheDirective}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// CacheControl(vec![CacheDirective::MaxAge(86400u32)]) -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, CacheControl, CacheDirective}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// CacheControl(vec![ -/// CacheDirective::NoCache, -/// CacheDirective::Private, -/// CacheDirective::MaxAge(360u32), -/// CacheDirective::Extension("foo".to_owned(), -/// Some("bar".to_owned())), -/// ]) -/// ); -/// ``` -#[derive(PartialEq, Clone, Debug)] -pub struct CacheControl(pub Vec); - -__hyper__deref!(CacheControl => Vec); - -impl Header for CacheControl { - fn header_name() -> &'static str { - "Cache-Control" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - let directives = try!(from_comma_delimited(raw)); - if !directives.is_empty() { - Ok(CacheControl(directives)) - } else { - Err(::Error::Header) - } - } -} - -impl HeaderFormat for CacheControl { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for CacheControl { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt_comma_delimited(f, &self[..]) - } -} - -/// `CacheControl` contains a list of these directives. -#[derive(PartialEq, Clone, Debug)] -pub enum CacheDirective { - /// "no-cache" - NoCache, - /// "no-store" - NoStore, - /// "no-transform" - NoTransform, - /// "only-if-cached" - OnlyIfCached, - - // request directives - /// "max-age=delta" - MaxAge(u32), - /// "max-stale=delta" - MaxStale(u32), - /// "min-fresh=delta" - MinFresh(u32), - - // response directives - /// "must-revalidate" - MustRevalidate, - /// "public" - Public, - /// "private" - Private, - /// "proxy-revalidate" - ProxyRevalidate, - /// "s-maxage=delta" - SMaxAge(u32), - - /// Extension directives. Optionally include an argument. - Extension(String, Option) -} - -impl fmt::Display for CacheDirective { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::CacheDirective::*; - fmt::Display::fmt(match *self { - NoCache => "no-cache", - NoStore => "no-store", - NoTransform => "no-transform", - OnlyIfCached => "only-if-cached", - - MaxAge(secs) => return write!(f, "max-age={}", secs), - MaxStale(secs) => return write!(f, "max-stale={}", secs), - MinFresh(secs) => return write!(f, "min-fresh={}", secs), - - MustRevalidate => "must-revalidate", - Public => "public", - Private => "private", - ProxyRevalidate => "proxy-revalidate", - SMaxAge(secs) => return write!(f, "s-maxage={}", secs), - - Extension(ref name, None) => &name[..], - Extension(ref name, Some(ref arg)) => return write!(f, "{}={}", name, arg), - - }, f) - } -} - -impl FromStr for CacheDirective { - type Err = Option<::Err>; - fn from_str(s: &str) -> Result::Err>> { - use self::CacheDirective::*; - match s { - "no-cache" => Ok(NoCache), - "no-store" => Ok(NoStore), - "no-transform" => Ok(NoTransform), - "only-if-cached" => Ok(OnlyIfCached), - "must-revalidate" => Ok(MustRevalidate), - "public" => Ok(Public), - "private" => Ok(Private), - "proxy-revalidate" => Ok(ProxyRevalidate), - "" => Err(None), - _ => match s.find('=') { - Some(idx) if idx+1 < s.len() => match (&s[..idx], (&s[idx+1..]).trim_matches('"')) { - ("max-age" , secs) => secs.parse().map(MaxAge).map_err(Some), - ("max-stale", secs) => secs.parse().map(MaxStale).map_err(Some), - ("min-fresh", secs) => secs.parse().map(MinFresh).map_err(Some), - ("s-maxage", secs) => secs.parse().map(SMaxAge).map_err(Some), - (left, right) => Ok(Extension(left.to_owned(), Some(right.to_owned()))) - }, - Some(_) => Err(None), - None => Ok(Extension(s.to_owned(), None)) - } - } - } -} - -#[cfg(test)] -mod tests { - use header::Header; - use super::*; - - #[test] - fn test_parse_multiple_headers() { - let cache = Header::parse_header(&[b"no-cache".to_vec(), b"private".to_vec()]); - assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::NoCache, - CacheDirective::Private]))) - } - - #[test] - fn test_parse_argument() { - let cache = Header::parse_header(&[b"max-age=100, private".to_vec()]); - assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(100), - CacheDirective::Private]))) - } - - #[test] - fn test_parse_quote_form() { - let cache = Header::parse_header(&[b"max-age=\"200\"".to_vec()]); - assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(200)]))) - } - - #[test] - fn test_parse_extension() { - let cache = Header::parse_header(&[b"foo, bar=baz".to_vec()]); - assert_eq!(cache.ok(), Some(CacheControl(vec![ - CacheDirective::Extension("foo".to_owned(), None), - CacheDirective::Extension("bar".to_owned(), Some("baz".to_owned()))]))) - } - - #[test] - fn test_parse_bad_syntax() { - let cache: ::Result = Header::parse_header(&[b"foo=".to_vec()]); - assert_eq!(cache.ok(), None) - } -} - -bench_header!(normal, - CacheControl, { vec![b"no-cache, private".to_vec(), b"max-age=100".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/connection.rs b/third_party/rust/hyper/src/header/common/connection.rs deleted file mode 100644 index 5e868333e074..000000000000 --- a/third_party/rust/hyper/src/header/common/connection.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; -use unicase::UniCase; - -pub use self::ConnectionOption::{KeepAlive, Close, ConnectionHeader}; - -const KEEP_ALIVE: UniCase<&'static str> = UniCase("keep-alive"); -const CLOSE: UniCase<&'static str> = UniCase("close"); - -/// Values that can be in the `Connection` header. -#[derive(Clone, PartialEq, Debug)] -pub enum ConnectionOption { - /// The `keep-alive` connection value. - KeepAlive, - /// The `close` connection value. - Close, - /// Values in the Connection header that are supposed to be names of other Headers. - /// - /// > When a header field aside from Connection is used to supply control - /// > information for or about the current connection, the sender MUST list - /// > the corresponding field-name within the Connection header field. - // TODO: it would be nice if these "Strings" could be stronger types, since - // they are supposed to relate to other Header fields (which we have strong - // types for). - ConnectionHeader(UniCase), -} - -impl FromStr for ConnectionOption { - type Err = (); - fn from_str(s: &str) -> Result { - if UniCase(s) == KEEP_ALIVE { - Ok(KeepAlive) - } else if UniCase(s) == CLOSE { - Ok(Close) - } else { - Ok(ConnectionHeader(UniCase(s.to_owned()))) - } - } -} - -impl Display for ConnectionOption { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - KeepAlive => "keep-alive", - Close => "close", - ConnectionHeader(UniCase(ref s)) => s.as_ref() - }) - } -} - -header! { - /// `Connection` header, defined in - /// [RFC7230](http://tools.ietf.org/html/rfc7230#section-6.1) - /// - /// The `Connection` header field allows the sender to indicate desired - /// control options for the current connection. In order to avoid - /// confusing downstream recipients, a proxy or gateway MUST remove or - /// replace any received connection options before forwarding the - /// message. - /// - /// # ABNF - /// ```plain - /// Connection = 1#connection-option - /// connection-option = token - /// - /// # Example values - /// * `close` - /// * `keep-alive` - /// * `upgrade` - /// ``` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Connection}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Connection::keep_alive()); - /// ``` - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, Connection, ConnectionOption}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Connection(vec![ - /// ConnectionOption::ConnectionHeader(UniCase("upgrade".to_owned())), - /// ]) - /// ); - /// # } - /// ``` - (Connection, "Connection") => (ConnectionOption)+ - - test_connection { - test_header!(test1, vec![b"close"]); - test_header!(test2, vec![b"keep-alive"]); - test_header!(test3, vec![b"upgrade"]); - } -} - -impl Connection { - /// A constructor to easily create a `Connection: close` header. - #[inline] - pub fn close() -> Connection { - Connection(vec![ConnectionOption::Close]) - } - - /// A constructor to easily create a `Connection: keep-alive` header. - #[inline] - pub fn keep_alive() -> Connection { - Connection(vec![ConnectionOption::KeepAlive]) - } -} - -bench_header!(close, Connection, { vec![b"close".to_vec()] }); -bench_header!(keep_alive, Connection, { vec![b"keep-alive".to_vec()] }); -bench_header!(header, Connection, { vec![b"authorization".to_vec()] }); - -#[cfg(test)] -mod tests { - use super::{Connection,ConnectionHeader}; - use header::Header; - use unicase::UniCase; - - fn parse_option(header: Vec) -> Connection { - let val = vec![header]; - let connection: Connection = Header::parse_header(&val[..]).unwrap(); - connection - } - - #[test] - fn test_parse() { - assert_eq!(Connection::close(),parse_option(b"close".to_vec())); - assert_eq!(Connection::keep_alive(),parse_option(b"keep-alive".to_vec())); - assert_eq!(Connection::keep_alive(),parse_option(b"Keep-Alive".to_vec())); - assert_eq!(Connection(vec![ConnectionHeader(UniCase("upgrade".to_owned()))]), - parse_option(b"upgrade".to_vec())); - } -} diff --git a/third_party/rust/hyper/src/header/common/content_disposition.rs b/third_party/rust/hyper/src/header/common/content_disposition.rs deleted file mode 100644 index e6d38743d23d..000000000000 --- a/third_party/rust/hyper/src/header/common/content_disposition.rs +++ /dev/null @@ -1,265 +0,0 @@ -// # References -// -// "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt -// "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt -// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc2388.txt -// Browser conformance tests at: http://greenbytes.de/tech/tc2231/ -// IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml - -use language_tags::LanguageTag; -use std::fmt; -use unicase::UniCase; -use url::percent_encoding; - -use header::{Header, HeaderFormat, parsing}; -use header::parsing::{parse_extended_value, HTTP_VALUE}; -use header::shared::Charset; - -/// The implied disposition of the content of the HTTP body -#[derive(Clone, Debug, PartialEq)] -pub enum DispositionType { - /// Inline implies default processing - Inline, - /// Attachment implies that the recipient should prompt the user to save the response locally, - /// rather than process it normally (as per its media type). - Attachment, - /// Extension type. Should be handled by recipients the same way as Attachment - Ext(String) -} - -/// A parameter to the disposition type -#[derive(Clone, Debug, PartialEq)] -pub enum DispositionParam { - /// A Filename consisting of a Charset, an optional LanguageTag, and finally a sequence of - /// bytes representing the filename - Filename(Charset, Option, Vec), - /// Extension type consisting of token and value. Recipients should ignore unrecognized - /// parameters. - Ext(String, String) -} - -/// A `Content-Disposition` header, (re)defined in [RFC6266](https://tools.ietf.org/html/rfc6266) -/// -/// The Content-Disposition response header field is used to convey -/// additional information about how to process the response payload, and -/// also can be used to attach additional metadata, such as the filename -/// to use when saving the response payload locally. -/// -/// # ABNF -/// ```plain -/// content-disposition = "Content-Disposition" ":" -/// disposition-type *( ";" disposition-parm ) -/// -/// disposition-type = "inline" | "attachment" | disp-ext-type -/// ; case-insensitive -/// -/// disp-ext-type = token -/// -/// disposition-parm = filename-parm | disp-ext-parm -/// -/// filename-parm = "filename" "=" value -/// | "filename*" "=" ext-value -/// -/// disp-ext-parm = token "=" value -/// | ext-token "=" ext-value -/// -/// ext-token = -/// ``` -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, ContentDisposition, DispositionType, DispositionParam, Charset}; -/// -/// let mut headers = Headers::new(); -/// headers.set(ContentDisposition { -/// disposition: DispositionType::Attachment, -/// parameters: vec![DispositionParam::Filename( -/// Charset::Iso_8859_1, // The character set for the bytes of the filename -/// None, // The optional language tag (see `language-tag` crate) -/// b"\xa9 Copyright 1989.txt".to_vec() // the actual bytes of the filename -/// )] -/// }); -/// ``` -#[derive(Clone, Debug, PartialEq)] -pub struct ContentDisposition { - /// The disposition - pub disposition: DispositionType, - /// Disposition parameters - pub parameters: Vec, -} - -impl Header for ContentDisposition { - fn header_name() -> &'static str { - "Content-Disposition" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - parsing::from_one_raw_str(raw).and_then(|s: String| { - let mut sections = s.split(';'); - let disposition = match sections.next() { - Some(s) => s.trim(), - None => return Err(::Error::Header), - }; - - let mut cd = ContentDisposition { - disposition: if UniCase(&*disposition) == UniCase("inline") { - DispositionType::Inline - } else if UniCase(&*disposition) == UniCase("attachment") { - DispositionType::Attachment - } else { - DispositionType::Ext(disposition.to_owned()) - }, - parameters: Vec::new(), - }; - - for section in sections { - let mut parts = section.splitn(2, '='); - - let key = if let Some(key) = parts.next() { - key.trim() - } else { - return Err(::Error::Header); - }; - - let val = if let Some(val) = parts.next() { - val.trim() - } else { - return Err(::Error::Header); - }; - - cd.parameters.push( - if UniCase(&*key) == UniCase("filename") { - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), None, - val.trim_matches('"').as_bytes().to_owned()) - } else if UniCase(&*key) == UniCase("filename*") { - let extended_value = try!(parse_extended_value(val)); - DispositionParam::Filename(extended_value.charset, extended_value.language_tag, extended_value.value) - } else { - DispositionParam::Ext(key.to_owned(), val.trim_matches('"').to_owned()) - } - ); - } - - Ok(cd) - }) - } -} - -impl HeaderFormat for ContentDisposition { - #[inline] - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self, f) - } -} - -impl fmt::Display for ContentDisposition { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.disposition { - DispositionType::Inline => try!(write!(f, "inline")), - DispositionType::Attachment => try!(write!(f, "attachment")), - DispositionType::Ext(ref s) => try!(write!(f, "{}", s)), - } - for param in &self.parameters { - match *param { - DispositionParam::Filename(ref charset, ref opt_lang, ref bytes) => { - let mut use_simple_format: bool = false; - if opt_lang.is_none() { - if let Charset::Ext(ref ext) = *charset { - if UniCase(&**ext) == UniCase("utf-8") { - use_simple_format = true; - } - } - } - if use_simple_format { - try!(write!(f, "; filename=\"{}\"", - match String::from_utf8(bytes.clone()) { - Ok(s) => s, - Err(_) => return Err(fmt::Error), - })); - } else { - try!(write!(f, "; filename*={}'", charset)); - if let Some(ref lang) = *opt_lang { - try!(write!(f, "{}", lang)); - }; - try!(write!(f, "'")); - try!(f.write_str( - &percent_encoding::percent_encode(bytes, HTTP_VALUE).to_string())) - } - }, - DispositionParam::Ext(ref k, ref v) => try!(write!(f, "; {}=\"{}\"", k, v)), - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::{ContentDisposition,DispositionType,DispositionParam}; - use ::header::Header; - use ::header::shared::Charset; - - #[test] - fn test_parse_header() { - assert!(ContentDisposition::parse_header([b"".to_vec()].as_ref()).is_err()); - - let a = [b"form-data; dummy=3; name=upload;\r\n filename=\"sample.png\"".to_vec()]; - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let b = ContentDisposition { - disposition: DispositionType::Ext("form-data".to_owned()), - parameters: vec![ - DispositionParam::Ext("dummy".to_owned(), "3".to_owned()), - DispositionParam::Ext("name".to_owned(), "upload".to_owned()), - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - "sample.png".bytes().collect()) ] - }; - assert_eq!(a, b); - - let a = [b"attachment; filename=\"image.jpg\"".to_vec()]; - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let b = ContentDisposition { - disposition: DispositionType::Attachment, - parameters: vec![ - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - "image.jpg".bytes().collect()) ] - }; - assert_eq!(a, b); - - let a = [b"attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates".to_vec()]; - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let b = ContentDisposition { - disposition: DispositionType::Attachment, - parameters: vec![ - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - vec![0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, - 0xe2, 0x82, 0xac, 0x20, b'r', b'a', b't', b'e', b's']) ] - }; - assert_eq!(a, b); - } - - #[test] - fn test_display() { - let a = [b"attachment; filename*=UTF-8'en'%C2%A3%20and%20%E2%82%AC%20rates".to_vec()]; - let as_string = ::std::str::from_utf8(&(a[0])).unwrap(); - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let display_rendered = format!("{}",a); - assert_eq!(as_string, display_rendered); - - let a = [b"attachment; filename*=UTF-8''black%20and%20white.csv".to_vec()]; - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let display_rendered = format!("{}",a); - assert_eq!("attachment; filename=\"black and white.csv\"".to_owned(), display_rendered); - - let a = [b"attachment; filename=colourful.csv".to_vec()]; - let a: ContentDisposition = ContentDisposition::parse_header(a.as_ref()).unwrap(); - let display_rendered = format!("{}",a); - assert_eq!("attachment; filename=\"colourful.csv\"".to_owned(), display_rendered); - } -} diff --git a/third_party/rust/hyper/src/header/common/content_encoding.rs b/third_party/rust/hyper/src/header/common/content_encoding.rs deleted file mode 100644 index 1b2f9d96e7aa..000000000000 --- a/third_party/rust/hyper/src/header/common/content_encoding.rs +++ /dev/null @@ -1,50 +0,0 @@ -use header::Encoding; - -header! { - /// `Content-Encoding` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-3.1.2.2) - /// - /// The `Content-Encoding` header field indicates what content codings - /// have been applied to the representation, beyond those inherent in the - /// media type, and thus what decoding mechanisms have to be applied in - /// order to obtain data in the media type referenced by the Content-Type - /// header field. Content-Encoding is primarily used to allow a - /// representation's data to be compressed without losing the identity of - /// its underlying media type. - /// - /// # ABNF - /// ```plain - /// Content-Encoding = 1#content-coding - /// ``` - /// - /// # Example values - /// * `gzip` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, ContentEncoding, Encoding}; - /// - /// let mut headers = Headers::new(); - /// headers.set(ContentEncoding(vec![Encoding::Chunked])); - /// ``` - /// ``` - /// use hyper::header::{Headers, ContentEncoding, Encoding}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// ContentEncoding(vec![ - /// Encoding::Gzip, - /// Encoding::Chunked, - /// ]) - /// ); - /// ``` - (ContentEncoding, "Content-Encoding") => (Encoding)+ - - test_content_encoding { - /// Testcase from the RFC - test_header!(test1, vec![b"gzip"], Some(ContentEncoding(vec![Encoding::Gzip]))); - } -} - -bench_header!(single, ContentEncoding, { vec![b"gzip".to_vec()] }); -bench_header!(multiple, ContentEncoding, { vec![b"gzip, deflate".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/content_language.rs b/third_party/rust/hyper/src/header/common/content_language.rs deleted file mode 100644 index a6564c33a847..000000000000 --- a/third_party/rust/hyper/src/header/common/content_language.rs +++ /dev/null @@ -1,59 +0,0 @@ -use language_tags::LanguageTag; -use header::QualityItem; - -header! { - /// `Content-Language` header, defined in - /// [RFC7231](https://tools.ietf.org/html/rfc7231#section-3.1.3.2) - /// - /// The `Content-Language` header field describes the natural language(s) - /// of the intended audience for the representation. Note that this - /// might not be equivalent to all the languages used within the - /// representation. - /// - /// # ABNF - /// ```plain - /// Content-Language = 1#language-tag - /// ``` - /// - /// # Example values - /// * `da` - /// * `mi, en` - /// - /// # Examples - /// ``` - /// # extern crate hyper; - /// # #[macro_use] extern crate language_tags; - /// # use hyper::header::{Headers, ContentLanguage, qitem}; - /// # - /// # fn main() { - /// let mut headers = Headers::new(); - /// headers.set( - /// ContentLanguage(vec![ - /// qitem(langtag!(en)), - /// ]) - /// ); - /// # } - /// ``` - /// ``` - /// # extern crate hyper; - /// # #[macro_use] extern crate language_tags; - /// # use hyper::header::{Headers, ContentLanguage, qitem}; - /// # - /// # fn main() { - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// ContentLanguage(vec![ - /// qitem(langtag!(da)), - /// qitem(langtag!(en;;;GB)), - /// ]) - /// ); - /// # } - /// ``` - (ContentLanguage, "Content-Language") => (QualityItem)+ - - test_content_language { - test_header!(test1, vec![b"da"]); - test_header!(test2, vec![b"mi, en"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/content_length.rs b/third_party/rust/hyper/src/header/common/content_length.rs deleted file mode 100644 index f5effe4f7a93..000000000000 --- a/third_party/rust/hyper/src/header/common/content_length.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::fmt; - -use header::{HeaderFormat, Header, parsing}; - -/// `Content-Length` header, defined in -/// [RFC7230](http://tools.ietf.org/html/rfc7230#section-3.3.2) -/// -/// When a message does not have a `Transfer-Encoding` header field, a -/// Content-Length header field can provide the anticipated size, as a -/// decimal number of octets, for a potential payload body. For messages -/// that do include a payload body, the Content-Length field-value -/// provides the framing information necessary for determining where the -/// body (and message) ends. For messages that do not include a payload -/// body, the Content-Length indicates the size of the selected -/// representation. -/// -/// # ABNF -/// ```plain -/// Content-Length = 1*DIGIT -/// ``` -/// -/// # Example values -/// * `3495` -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, ContentLength}; -/// -/// let mut headers = Headers::new(); -/// headers.set(ContentLength(1024u64)); -/// ``` -#[derive(Clone, Copy, Debug, PartialEq)] -pub struct ContentLength(pub u64); - -impl Header for ContentLength { - #[inline] - fn header_name() -> &'static str { - "Content-Length" - } - fn parse_header(raw: &[Vec]) -> ::Result { - // If multiple Content-Length headers were sent, everything can still - // be alright if they all contain the same value, and all parse - // correctly. If not, then it's an error. - raw.iter() - .map(::std::ops::Deref::deref) - .map(parsing::from_raw_str) - .fold(None, |prev, x| { - match (prev, x) { - (None, x) => Some(x), - (e@Some(Err(_)), _ ) => e, - (Some(Ok(prev)), Ok(x)) if prev == x => Some(Ok(prev)), - _ => Some(Err(::Error::Header)) - } - }) - .unwrap_or(Err(::Error::Header)) - .map(ContentLength) - } -} - -impl HeaderFormat for ContentLength { - #[inline] - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - -impl fmt::Display for ContentLength { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - -__hyper__deref!(ContentLength => u64); - -__hyper__tm!(ContentLength, tests { - // Testcase from RFC - test_header!(test1, vec![b"3495"], Some(HeaderField(3495))); - - test_header!(test_invalid, vec![b"34v95"], None); - - // Can't use the test_header macro because "5, 5" gets cleaned to "5". - #[test] - fn test_duplicates() { - let parsed = HeaderField::parse_header(&[b"5"[..].into(), - b"5"[..].into()]).unwrap(); - assert_eq!(parsed, HeaderField(5)); - assert_eq!(format!("{}", parsed), "5"); - } - - test_header!(test_duplicates_vary, vec![b"5", b"6", b"5"], None); -}); - -bench_header!(bench, ContentLength, { vec![b"42349984".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/content_range.rs b/third_party/rust/hyper/src/header/common/content_range.rs deleted file mode 100644 index 2d9a965b33d6..000000000000 --- a/third_party/rust/hyper/src/header/common/content_range.rs +++ /dev/null @@ -1,189 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; - -header! { - /// `Content-Range` header, defined in - /// [RFC7233](http://tools.ietf.org/html/rfc7233#section-4.2) - (ContentRange, "Content-Range") => [ContentRangeSpec] - - test_content_range { - test_header!(test_bytes, - vec![b"bytes 0-499/500"], - Some(ContentRange(ContentRangeSpec::Bytes { - range: Some((0, 499)), - instance_length: Some(500) - }))); - - test_header!(test_bytes_unknown_len, - vec![b"bytes 0-499/*"], - Some(ContentRange(ContentRangeSpec::Bytes { - range: Some((0, 499)), - instance_length: None - }))); - - test_header!(test_bytes_unknown_range, - vec![b"bytes */500"], - Some(ContentRange(ContentRangeSpec::Bytes { - range: None, - instance_length: Some(500) - }))); - - test_header!(test_unregistered, - vec![b"seconds 1-2"], - Some(ContentRange(ContentRangeSpec::Unregistered { - unit: "seconds".to_owned(), - resp: "1-2".to_owned() - }))); - - test_header!(test_no_len, - vec![b"bytes 0-499"], - None::); - - test_header!(test_only_unit, - vec![b"bytes"], - None::); - - test_header!(test_end_less_than_start, - vec![b"bytes 499-0/500"], - None::); - - test_header!(test_blank, - vec![b""], - None::); - - test_header!(test_bytes_many_spaces, - vec![b"bytes 1-2/500 3"], - None::); - - test_header!(test_bytes_many_slashes, - vec![b"bytes 1-2/500/600"], - None::); - - test_header!(test_bytes_many_dashes, - vec![b"bytes 1-2-3/500"], - None::); - - } -} - - -/// Content-Range, described in [RFC7233](https://tools.ietf.org/html/rfc7233#section-4.2) -/// -/// # ABNF -/// ```plain -/// Content-Range = byte-content-range -/// / other-content-range -/// -/// byte-content-range = bytes-unit SP -/// ( byte-range-resp / unsatisfied-range ) -/// -/// byte-range-resp = byte-range "/" ( complete-length / "*" ) -/// byte-range = first-byte-pos "-" last-byte-pos -/// unsatisfied-range = "*/" complete-length -/// -/// complete-length = 1*DIGIT -/// -/// other-content-range = other-range-unit SP other-range-resp -/// other-range-resp = *CHAR -/// ``` -#[derive(PartialEq, Clone, Debug)] -pub enum ContentRangeSpec { - /// Byte range - Bytes { - /// First and last bytes of the range, omitted if request could not be - /// satisfied - range: Option<(u64, u64)>, - - /// Total length of the instance, can be omitted if unknown - instance_length: Option - }, - - /// Custom range, with unit not registered at IANA - Unregistered { - /// other-range-unit - unit: String, - - /// other-range-resp - resp: String - } -} - -fn split_in_two(s: &str, separator: char) -> Option<(&str, &str)> { - let mut iter = s.splitn(2, separator); - match (iter.next(), iter.next()) { - (Some(a), Some(b)) => Some((a, b)), - _ => None - } -} - -impl FromStr for ContentRangeSpec { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - let res = match split_in_two(s, ' ') { - Some(("bytes", resp)) => { - let (range, instance_length) = try!(split_in_two(resp, '/').ok_or(::Error::Header)); - - let instance_length = if instance_length == "*" { - None - } else { - Some(try!(instance_length.parse().map_err(|_| ::Error::Header))) - }; - - let range = if range == "*" { - None - } else { - let (first_byte, last_byte) = try!(split_in_two(range, '-').ok_or(::Error::Header)); - let first_byte = try!(first_byte.parse().map_err(|_| ::Error::Header)); - let last_byte = try!(last_byte.parse().map_err(|_| ::Error::Header)); - if last_byte < first_byte { - return Err(::Error::Header); - } - Some((first_byte, last_byte)) - }; - - ContentRangeSpec::Bytes { - range: range, - instance_length: instance_length - } - } - Some((unit, resp)) => { - ContentRangeSpec::Unregistered { - unit: unit.to_owned(), - resp: resp.to_owned() - } - } - _ => return Err(::Error::Header) - }; - Ok(res) - } -} - -impl Display for ContentRangeSpec { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ContentRangeSpec::Bytes { range, instance_length } => { - try!(f.write_str("bytes ")); - match range { - Some((first_byte, last_byte)) => { - try!(write!(f, "{}-{}", first_byte, last_byte)); - }, - None => { - try!(f.write_str("*")); - } - }; - try!(f.write_str("/")); - if let Some(v) = instance_length { - write!(f, "{}", v) - } else { - f.write_str("*") - } - } - ContentRangeSpec::Unregistered { ref unit, ref resp } => { - try!(f.write_str(&unit)); - try!(f.write_str(" ")); - f.write_str(&resp) - } - } - } -} diff --git a/third_party/rust/hyper/src/header/common/content_type.rs b/third_party/rust/hyper/src/header/common/content_type.rs deleted file mode 100644 index 73ead5830546..000000000000 --- a/third_party/rust/hyper/src/header/common/content_type.rs +++ /dev/null @@ -1,97 +0,0 @@ -use mime::Mime; - -header! { - /// `Content-Type` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-3.1.1.5) - /// - /// The `Content-Type` header field indicates the media type of the - /// associated representation: either the representation enclosed in the - /// message payload or the selected representation, as determined by the - /// message semantics. The indicated media type defines both the data - /// format and how that data is intended to be processed by a recipient, - /// within the scope of the received message semantics, after any content - /// codings indicated by Content-Encoding are decoded. - /// - /// # ABNF - /// ```plain - /// Content-Type = media-type - /// ``` - /// - /// # Example values - /// * `text/html; charset=ISO-8859-4` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, ContentType}; - /// use hyper::mime::{Mime, TopLevel, SubLevel}; - /// - /// let mut headers = Headers::new(); - /// - /// headers.set( - /// ContentType(Mime(TopLevel::Text, SubLevel::Html, vec![])) - /// ); - /// ``` - /// ``` - /// use hyper::header::{Headers, ContentType}; - /// use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; - /// - /// let mut headers = Headers::new(); - /// - /// headers.set( - /// ContentType(Mime(TopLevel::Application, SubLevel::Json, - /// vec![(Attr::Charset, Value::Utf8)])) - /// ); - /// ``` - (ContentType, "Content-Type") => [Mime] - - test_content_type { - test_header!( - test1, - // FIXME: Should be b"text/html; charset=ISO-8859-4" but mime crate lowercases - // the whole value so parsing and formatting the value gives a different result - vec![b"text/html; charset=iso-8859-4"], - Some(HeaderField(Mime( - TopLevel::Text, - SubLevel::Html, - vec![(Attr::Charset, Value::Ext("iso-8859-4".to_owned()))])))); - } -} - -impl ContentType { - /// A constructor to easily create a `Content-Type: application/json` header. - #[inline] - pub fn json() -> ContentType { - ContentType(mime!(Application/Json)) - } - - /// A constructor to easily create a `Content-Type: text/plain; charset=utf-8` header. - #[inline] - pub fn plaintext() -> ContentType { - ContentType(mime!(Text/Plain; Charset=Utf8)) - } - - /// A constructor to easily create a `Content-Type: text/html; charset=utf-8` header. - #[inline] - pub fn html() -> ContentType { - ContentType(mime!(Text/Html; Charset=Utf8)) - } - - /// A constructor to easily create a `Content-Type: application/www-form-url-encoded` header. - #[inline] - pub fn form_url_encoded() -> ContentType { - ContentType(mime!(Application/WwwFormUrlEncoded)) - } - /// A constructor to easily create a `Content-Type: image/jpeg` header. - #[inline] - pub fn jpeg() -> ContentType { - ContentType(mime!(Image/Jpeg)) - } - - /// A constructor to easily create a `Content-Type: image/png` header. - #[inline] - pub fn png() -> ContentType { - ContentType(mime!(Image/Png)) - } -} - -bench_header!(bench, ContentType, { vec![b"application/json; charset=utf-8".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/cookie.rs b/third_party/rust/hyper/src/header/common/cookie.rs deleted file mode 100644 index e3c3335db4b8..000000000000 --- a/third_party/rust/hyper/src/header/common/cookie.rs +++ /dev/null @@ -1,70 +0,0 @@ -use header::{Header, HeaderFormat}; -use std::fmt::{self, Display}; -use std::str::from_utf8; - -/// `Cookie` header, defined in [RFC6265](http://tools.ietf.org/html/rfc6265#section-5.4) -/// -/// If the user agent does attach a Cookie header field to an HTTP -/// request, the user agent must send the cookie-string -/// as the value of the header field. -/// -/// When the user agent generates an HTTP request, the user agent MUST NOT -/// attach more than one Cookie header field. -/// -/// # Example values -/// * `SID=31d4d96e407aad42` -/// * `SID=31d4d96e407aad42; lang=en-US` -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, Cookie}; -/// -/// let mut headers = Headers::new(); -/// -/// headers.set( -/// Cookie(vec![ -/// String::from("foo=bar") -/// ]) -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct Cookie(pub Vec); - -__hyper__deref!(Cookie => Vec); - -impl Header for Cookie { - fn header_name() -> &'static str { - "Cookie" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - let mut cookies = Vec::with_capacity(raw.len()); - for cookies_raw in raw.iter() { - let cookies_str = try!(from_utf8(&cookies_raw[..])); - for cookie_str in cookies_str.split(';') { - cookies.push(cookie_str.trim().to_owned()) - } - } - - if !cookies.is_empty() { - Ok(Cookie(cookies)) - } else { - Err(::Error::Header) - } - } -} - -impl HeaderFormat for Cookie { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - let cookies = &self.0; - for (i, cookie) in cookies.iter().enumerate() { - if i != 0 { - try!(f.write_str("; ")); - } - try!(Display::fmt(&cookie, f)); - } - Ok(()) - } -} - -bench_header!(bench, Cookie, { vec![b"foo=bar; baz=quux".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/date.rs b/third_party/rust/hyper/src/header/common/date.rs deleted file mode 100644 index e0a9e58c8104..000000000000 --- a/third_party/rust/hyper/src/header/common/date.rs +++ /dev/null @@ -1,40 +0,0 @@ -use header::HttpDate; - -header! { - /// `Date` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.1.1.2) - /// - /// The `Date` header field represents the date and time at which the - /// message was originated. - /// - /// # ABNF - /// ```plain - /// Date = HTTP-date - /// ``` - /// - /// # Example values - /// * `Tue, 15 Nov 1994 08:12:31 GMT` - /// - /// # Example - /// ``` - /// # extern crate time; - /// # extern crate hyper; - /// # fn main() { - /// // extern crate time; - /// - /// use hyper::header::{Headers, Date, HttpDate}; - /// use time; - /// - /// let mut headers = Headers::new(); - /// headers.set(Date(HttpDate(time::now()))); - /// # } - /// ``` - (Date, "Date") => [HttpDate] - - test_date { - test_header!(test1, vec![b"Tue, 15 Nov 1994 08:12:31 GMT"]); - } -} - -bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); -bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); -bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/etag.rs b/third_party/rust/hyper/src/header/common/etag.rs deleted file mode 100644 index 068c8599fdc4..000000000000 --- a/third_party/rust/hyper/src/header/common/etag.rs +++ /dev/null @@ -1,89 +0,0 @@ -use header::EntityTag; - -header! { - /// `ETag` header, defined in [RFC7232](http://tools.ietf.org/html/rfc7232#section-2.3) - /// - /// The `ETag` header field in a response provides the current entity-tag - /// for the selected representation, as determined at the conclusion of - /// handling the request. An entity-tag is an opaque validator for - /// differentiating between multiple representations of the same - /// resource, regardless of whether those multiple representations are - /// due to resource state changes over time, content negotiation - /// resulting in multiple representations being valid at the same time, - /// or both. An entity-tag consists of an opaque quoted string, possibly - /// prefixed by a weakness indicator. - /// - /// # ABNF - /// ```plain - /// ETag = entity-tag - /// ``` - /// - /// # Example values - /// * `"xyzzy"` - /// * `W/"xyzzy"` - /// * `""` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, ETag, EntityTag}; - /// - /// let mut headers = Headers::new(); - /// headers.set(ETag(EntityTag::new(false, "xyzzy".to_owned()))); - /// ``` - /// ``` - /// use hyper::header::{Headers, ETag, EntityTag}; - /// - /// let mut headers = Headers::new(); - /// headers.set(ETag(EntityTag::new(true, "xyzzy".to_owned()))); - /// ``` - (ETag, "ETag") => [EntityTag] - - test_etag { - // From the RFC - test_header!(test1, - vec![b"\"xyzzy\""], - Some(ETag(EntityTag::new(false, "xyzzy".to_owned())))); - test_header!(test2, - vec![b"W/\"xyzzy\""], - Some(ETag(EntityTag::new(true, "xyzzy".to_owned())))); - test_header!(test3, - vec![b"\"\""], - Some(ETag(EntityTag::new(false, "".to_owned())))); - // Own tests - test_header!(test4, - vec![b"\"foobar\""], - Some(ETag(EntityTag::new(false, "foobar".to_owned())))); - test_header!(test5, - vec![b"\"\""], - Some(ETag(EntityTag::new(false, "".to_owned())))); - test_header!(test6, - vec![b"W/\"weak-etag\""], - Some(ETag(EntityTag::new(true, "weak-etag".to_owned())))); - test_header!(test7, - vec![b"W/\"\x65\x62\""], - Some(ETag(EntityTag::new(true, "\u{0065}\u{0062}".to_owned())))); - test_header!(test8, - vec![b"W/\"\""], - Some(ETag(EntityTag::new(true, "".to_owned())))); - test_header!(test9, - vec![b"no-dquotes"], - None::); - test_header!(test10, - vec![b"w/\"the-first-w-is-case-sensitive\""], - None::); - test_header!(test11, - vec![b""], - None::); - test_header!(test12, - vec![b"\"unmatched-dquotes1"], - None::); - test_header!(test13, - vec![b"unmatched-dquotes2\""], - None::); - test_header!(test14, - vec![b"matched-\"dquotes\""], - None::); - } -} - -bench_header!(bench, ETag, { vec![b"W/\"nonemptytag\"".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/expect.rs b/third_party/rust/hyper/src/header/common/expect.rs deleted file mode 100644 index 2aa9ad5fb750..000000000000 --- a/third_party/rust/hyper/src/header/common/expect.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::fmt; -use std::str; - -use unicase::UniCase; - -use header::{Header, HeaderFormat}; - -/// The `Expect` header. -/// -/// > The "Expect" header field in a request indicates a certain set of -/// > behaviors (expectations) that need to be supported by the server in -/// > order to properly handle this request. The only such expectation -/// > defined by this specification is 100-continue. -/// > -/// > Expect = "100-continue" -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, Expect}; -/// let mut headers = Headers::new(); -/// headers.set(Expect::Continue); -/// ``` -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum Expect { - /// The value `100-continue`. - Continue -} - -const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue"); - -impl Header for Expect { - fn header_name() -> &'static str { - "Expect" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - if raw.len() == 1 { - let text = unsafe { - // safe because: - // 1. we just checked raw.len == 1 - // 2. we don't actually care if it's utf8, we just want to - // compare the bytes with the "case" normalized. If it's not - // utf8, then the byte comparison will fail, and we'll return - // None. No big deal. - str::from_utf8_unchecked(raw.get_unchecked(0)) - }; - if UniCase(text) == EXPECT_CONTINUE { - Ok(Expect::Continue) - } else { - Err(::Error::Header) - } - } else { - Err(::Error::Header) - } - } -} - -impl HeaderFormat for Expect { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for Expect { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("100-continue") - } -} diff --git a/third_party/rust/hyper/src/header/common/expires.rs b/third_party/rust/hyper/src/header/common/expires.rs deleted file mode 100644 index 839798b21db1..000000000000 --- a/third_party/rust/hyper/src/header/common/expires.rs +++ /dev/null @@ -1,45 +0,0 @@ -use header::HttpDate; - -header! { - /// `Expires` header, defined in [RFC7234](http://tools.ietf.org/html/rfc7234#section-5.3) - /// - /// The `Expires` header field gives the date/time after which the - /// response is considered stale. - /// - /// The presence of an Expires field does not imply that the original - /// resource will change or cease to exist at, before, or after that - /// time. - /// - /// # ABNF - /// ```plain - /// Expires = HTTP-date - /// ``` - /// - /// # Example values - /// * `Thu, 01 Dec 1994 16:00:00 GMT` - /// - /// # Example - /// ``` - /// # extern crate hyper; - /// # extern crate time; - /// # fn main() { - /// // extern crate time; - /// - /// use hyper::header::{Headers, Expires, HttpDate}; - /// use time::{self, Duration}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Expires(HttpDate(time::now() + Duration::days(1)))); - /// # } - /// ``` - (Expires, "Expires") => [HttpDate] - - test_expires { - // Testcase from RFC - test_header!(test1, vec![b"Thu, 01 Dec 1994 16:00:00 GMT"]); - } -} - -bench_header!(imf_fixdate, Expires, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); -bench_header!(rfc_850, Expires, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); -bench_header!(asctime, Expires, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/from.rs b/third_party/rust/hyper/src/header/common/from.rs deleted file mode 100644 index 69e5174476a4..000000000000 --- a/third_party/rust/hyper/src/header/common/from.rs +++ /dev/null @@ -1,26 +0,0 @@ -header! { - /// `From` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.1) - /// - /// The `From` header field contains an Internet email address for a - /// human user who controls the requesting user agent. The address ought - /// to be machine-usable. - /// # ABNF - /// ```plain - /// From = mailbox - /// mailbox = - /// ``` - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, From}; - /// - /// let mut headers = Headers::new(); - /// headers.set(From("webmaster@example.org".to_owned())); - /// ``` - // FIXME: Maybe use mailbox? - (From, "From") => [String] - - test_from { - test_header!(test1, vec![b"webmaster@example.org"]); - } -} diff --git a/third_party/rust/hyper/src/header/common/host.rs b/third_party/rust/hyper/src/header/common/host.rs deleted file mode 100644 index 933d23da460e..000000000000 --- a/third_party/rust/hyper/src/header/common/host.rs +++ /dev/null @@ -1,145 +0,0 @@ -use header::{Header, HeaderFormat}; -use std::fmt; -use std::str::FromStr; -use header::parsing::from_one_raw_str; -use url::idna::domain_to_unicode; - -/// The `Host` header. -/// -/// HTTP/1.1 requires that all requests include a `Host` header, and so hyper -/// client requests add one automatically. -/// -/// Currently is just a String, but it should probably become a better type, -/// like `url::Host` or something. -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Host}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Host{ -/// hostname: "hyper.rs".to_owned(), -/// port: None, -/// } -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, Host}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Host{ -/// hostname: "hyper.rs".to_owned(), -/// port: Some(8080), -/// } -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct Host { - /// The hostname, such a example.domain. - pub hostname: String, - /// An optional port number. - pub port: Option -} - -impl Header for Host { - fn header_name() -> &'static str { - "Host" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - from_one_raw_str(raw) - } -} - -impl HeaderFormat for Host { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.port { - None | Some(80) | Some(443) => f.write_str(&self.hostname[..]), - Some(port) => write!(f, "{}:{}", self.hostname, port) - } - } -} - -impl fmt::Display for Host { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.fmt_header(f) - } -} - -impl FromStr for Host { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - let idx = s.rfind(':'); - let port = idx.and_then( - |idx| s[idx + 1..].parse().ok() - ); - let hostname_encoded = match port { - None => s, - Some(_) => &s[..idx.unwrap()] - }; - - let hostname = if hostname_encoded.starts_with("[") { - if !hostname_encoded.ends_with("]") { - return Err(::Error::Header) - } - hostname_encoded.to_owned() - } else { - let (hostname, res) = domain_to_unicode(hostname_encoded); - if res.is_err() { - return Err(::Error::Header) - } - hostname - }; - - Ok(Host { - hostname: hostname, - port: port - }) - } -} - -#[cfg(test)] -mod tests { - use super::Host; - use header::Header; - - - #[test] - fn test_host() { - let host = Header::parse_header([b"foo.com".to_vec()].as_ref()); - assert_eq!(host.ok(), Some(Host { - hostname: "foo.com".to_owned(), - port: None - })); - - - let host = Header::parse_header([b"foo.com:8080".to_vec()].as_ref()); - assert_eq!(host.ok(), Some(Host { - hostname: "foo.com".to_owned(), - port: Some(8080) - })); - - let host = Header::parse_header([b"foo.com".to_vec()].as_ref()); - assert_eq!(host.ok(), Some(Host { - hostname: "foo.com".to_owned(), - port: None - })); - - let host = Header::parse_header([b"[::1]:8080".to_vec()].as_ref()); - assert_eq!(host.ok(), Some(Host { - hostname: "[::1]".to_owned(), - port: Some(8080) - })); - - let host = Header::parse_header([b"[::1]".to_vec()].as_ref()); - assert_eq!(host.ok(), Some(Host { - hostname: "[::1]".to_owned(), - port: None - })); - } -} - -bench_header!(bench, Host, { vec![b"foo.com:3000".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/if_match.rs b/third_party/rust/hyper/src/header/common/if_match.rs deleted file mode 100644 index 4788453ec4c7..000000000000 --- a/third_party/rust/hyper/src/header/common/if_match.rs +++ /dev/null @@ -1,69 +0,0 @@ -use header::EntityTag; - -header! { - /// `If-Match` header, defined in - /// [RFC7232](https://tools.ietf.org/html/rfc7232#section-3.1) - /// - /// The `If-Match` header field makes the request method conditional on - /// the recipient origin server either having at least one current - /// representation of the target resource, when the field-value is "*", - /// or having a current representation of the target resource that has an - /// entity-tag matching a member of the list of entity-tags provided in - /// the field-value. - /// - /// An origin server MUST use the strong comparison function when - /// comparing entity-tags for `If-Match`, since the client - /// intends this precondition to prevent the method from being applied if - /// there have been any changes to the representation data. - /// - /// # ABNF - /// ```plain - /// If-Match = "*" / 1#entity-tag - /// ``` - /// - /// # Example values - /// * `"xyzzy"` - /// * "xyzzy", "r2d2xxxx", "c3piozzzz" - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, IfMatch}; - /// - /// let mut headers = Headers::new(); - /// headers.set(IfMatch::Any); - /// ``` - /// ``` - /// use hyper::header::{Headers, IfMatch, EntityTag}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// IfMatch::Items(vec![ - /// EntityTag::new(false, "xyzzy".to_owned()), - /// EntityTag::new(false, "foobar".to_owned()), - /// EntityTag::new(false, "bazquux".to_owned()), - /// ]) - /// ); - /// ``` - (IfMatch, "If-Match") => {Any / (EntityTag)+} - - test_if_match { - test_header!( - test1, - vec![b"\"xyzzy\""], - Some(HeaderField::Items( - vec![EntityTag::new(false, "xyzzy".to_owned())]))); - test_header!( - test2, - vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\""], - Some(HeaderField::Items( - vec![EntityTag::new(false, "xyzzy".to_owned()), - EntityTag::new(false, "r2d2xxxx".to_owned()), - EntityTag::new(false, "c3piozzzz".to_owned())]))); - test_header!(test3, vec![b"*"], Some(IfMatch::Any)); - } -} - -bench_header!(star, IfMatch, { vec![b"*".to_vec()] }); -bench_header!(single , IfMatch, { vec![b"\"xyzzy\"".to_vec()] }); -bench_header!(multi, IfMatch, - { vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/if_modified_since.rs b/third_party/rust/hyper/src/header/common/if_modified_since.rs deleted file mode 100644 index 7a0aee2d1b4f..000000000000 --- a/third_party/rust/hyper/src/header/common/if_modified_since.rs +++ /dev/null @@ -1,45 +0,0 @@ -use header::HttpDate; - -header! { - /// `If-Modified-Since` header, defined in - /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-3.3) - /// - /// The `If-Modified-Since` header field makes a GET or HEAD request - /// method conditional on the selected representation's modification date - /// being more recent than the date provided in the field-value. - /// Transfer of the selected representation's data is avoided if that - /// data has not changed. - /// - /// # ABNF - /// ```plain - /// If-Unmodified-Since = HTTP-date - /// ``` - /// - /// # Example values - /// * `Sat, 29 Oct 1994 19:43:31 GMT` - /// - /// # Example - /// ``` - /// # extern crate hyper; - /// # extern crate time; - /// # fn main() { - /// // extern crate time; - /// - /// use hyper::header::{Headers, IfModifiedSince, HttpDate}; - /// use time::{self, Duration}; - /// - /// let mut headers = Headers::new(); - /// headers.set(IfModifiedSince(HttpDate(time::now() - Duration::days(1)))); - /// # } - /// ``` - (IfModifiedSince, "If-Modified-Since") => [HttpDate] - - test_if_modified_since { - // Testcase from RFC - test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); - } -} - -bench_header!(imf_fixdate, IfModifiedSince, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); -bench_header!(rfc_850, IfModifiedSince, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); -bench_header!(asctime, IfModifiedSince, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/if_none_match.rs b/third_party/rust/hyper/src/header/common/if_none_match.rs deleted file mode 100644 index 734845df483a..000000000000 --- a/third_party/rust/hyper/src/header/common/if_none_match.rs +++ /dev/null @@ -1,83 +0,0 @@ -use header::EntityTag; - -header! { - /// `If-None-Match` header, defined in - /// [RFC7232](https://tools.ietf.org/html/rfc7232#section-3.2) - /// - /// The `If-None-Match` header field makes the request method conditional - /// on a recipient cache or origin server either not having any current - /// representation of the target resource, when the field-value is "*", - /// or having a selected representation with an entity-tag that does not - /// match any of those listed in the field-value. - /// - /// A recipient MUST use the weak comparison function when comparing - /// entity-tags for If-None-Match (Section 2.3.2), since weak entity-tags - /// can be used for cache validation even if there have been changes to - /// the representation data. - /// - /// # ABNF - /// ```plain - /// If-None-Match = "*" / 1#entity-tag - /// ``` - /// - /// # Example values - /// * `"xyzzy"` - /// * `W/"xyzzy"` - /// * `"xyzzy", "r2d2xxxx", "c3piozzzz"` - /// * `W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz"` - /// * `*` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, IfNoneMatch}; - /// - /// let mut headers = Headers::new(); - /// headers.set(IfNoneMatch::Any); - /// ``` - /// ``` - /// use hyper::header::{Headers, IfNoneMatch, EntityTag}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// IfNoneMatch::Items(vec![ - /// EntityTag::new(false, "xyzzy".to_owned()), - /// EntityTag::new(false, "foobar".to_owned()), - /// EntityTag::new(false, "bazquux".to_owned()), - /// ]) - /// ); - /// ``` - (IfNoneMatch, "If-None-Match") => {Any / (EntityTag)+} - - test_if_none_match { - test_header!(test1, vec![b"\"xyzzy\""]); - test_header!(test2, vec![b"W/\"xyzzy\""]); - test_header!(test3, vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\""]); - test_header!(test4, vec![b"W/\"xyzzy\", W/\"r2d2xxxx\", W/\"c3piozzzz\""]); - test_header!(test5, vec![b"*"]); - } -} - -#[cfg(test)] -mod tests { - use super::IfNoneMatch; - use header::Header; - use header::EntityTag; - - #[test] - fn test_if_none_match() { - let mut if_none_match: ::Result; - - if_none_match = Header::parse_header([b"*".to_vec()].as_ref()); - assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Any)); - - if_none_match = Header::parse_header([b"\"foobar\", W/\"weak-etag\"".to_vec()].as_ref()); - let mut entities: Vec = Vec::new(); - let foobar_etag = EntityTag::new(false, "foobar".to_owned()); - let weak_etag = EntityTag::new(true, "weak-etag".to_owned()); - entities.push(foobar_etag); - entities.push(weak_etag); - assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Items(entities))); - } -} - -bench_header!(bench, IfNoneMatch, { vec![b"W/\"nonemptytag\"".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/if_range.rs b/third_party/rust/hyper/src/header/common/if_range.rs deleted file mode 100644 index 7399b3a40c3b..000000000000 --- a/third_party/rust/hyper/src/header/common/if_range.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::fmt::{self, Display}; -use header::{self, Header, HeaderFormat, EntityTag, HttpDate}; - -/// `If-Range` header, defined in [RFC7233](http://tools.ietf.org/html/rfc7233#section-3.2) -/// -/// If a client has a partial copy of a representation and wishes to have -/// an up-to-date copy of the entire representation, it could use the -/// Range header field with a conditional GET (using either or both of -/// If-Unmodified-Since and If-Match.) However, if the precondition -/// fails because the representation has been modified, the client would -/// then have to make a second request to obtain the entire current -/// representation. -/// -/// The `If-Range` header field allows a client to \"short-circuit\" the -/// second request. Informally, its meaning is as follows: if the -/// representation is unchanged, send me the part(s) that I am requesting -/// in Range; otherwise, send me the entire representation. -/// -/// # ABNF -/// ```plain -/// If-Range = entity-tag / HTTP-date -/// ``` -/// -/// # Example values -/// * `Sat, 29 Oct 1994 19:43:31 GMT` -/// * `\"xyzzy\"` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, IfRange, EntityTag}; -/// -/// let mut headers = Headers::new(); -/// headers.set(IfRange::EntityTag(EntityTag::new(false, "xyzzy".to_owned()))); -/// ``` -/// ``` -/// # extern crate hyper; -/// # extern crate time; -/// # fn main() { -/// // extern crate time; -/// -/// use hyper::header::{Headers, IfRange, HttpDate}; -/// use time::{self, Duration}; -/// -/// let mut headers = Headers::new(); -/// headers.set(IfRange::Date(HttpDate(time::now() - Duration::days(1)))); -/// # } -/// ``` -#[derive(Clone, Debug, PartialEq)] -pub enum IfRange { - /// The entity-tag the client has of the resource - EntityTag(EntityTag), - /// The date when the client retrieved the resource - Date(HttpDate), -} - -impl Header for IfRange { - fn header_name() -> &'static str { - "If-Range" - } - fn parse_header(raw: &[Vec]) -> ::Result { - let etag: ::Result = header::parsing::from_one_raw_str(raw); - if etag.is_ok() { - return Ok(IfRange::EntityTag(etag.unwrap())); - } - let date: ::Result = header::parsing::from_one_raw_str(raw); - if date.is_ok() { - return Ok(IfRange::Date(date.unwrap())); - } - Err(::Error::Header) - } -} - -impl HeaderFormat for IfRange { - fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - match *self { - IfRange::EntityTag(ref x) => Display::fmt(x, f), - IfRange::Date(ref x) => Display::fmt(x, f), - } - } -} - -impl Display for IfRange { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.fmt_header(f) - } -} - -#[cfg(test)] -mod test_if_range { - use std::str; - use header::*; - use super::IfRange as HeaderField; - test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); - test_header!(test2, vec![b"\"xyzzy\""]); - test_header!(test3, vec![b"this-is-invalid"], None::); -} diff --git a/third_party/rust/hyper/src/header/common/if_unmodified_since.rs b/third_party/rust/hyper/src/header/common/if_unmodified_since.rs deleted file mode 100644 index 9912416966d0..000000000000 --- a/third_party/rust/hyper/src/header/common/if_unmodified_since.rs +++ /dev/null @@ -1,45 +0,0 @@ -use header::HttpDate; - -header! { - /// `If-Unmodified-Since` header, defined in - /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-3.4) - /// - /// The `If-Unmodified-Since` header field makes the request method - /// conditional on the selected representation's last modification date - /// being earlier than or equal to the date provided in the field-value. - /// This field accomplishes the same purpose as If-Match for cases where - /// the user agent does not have an entity-tag for the representation. - /// - /// # ABNF - /// ```plain - /// If-Unmodified-Since = HTTP-date - /// ``` - /// - /// # Example values - /// * `Sat, 29 Oct 1994 19:43:31 GMT` - /// - /// # Example - /// ``` - /// # extern crate hyper; - /// # extern crate time; - /// # fn main() { - /// // extern crate time; - /// - /// use hyper::header::{Headers, IfUnmodifiedSince, HttpDate}; - /// use time::{self, Duration}; - /// - /// let mut headers = Headers::new(); - /// headers.set(IfUnmodifiedSince(HttpDate(time::now() - Duration::days(1)))); - /// # } - /// ``` - (IfUnmodifiedSince, "If-Unmodified-Since") => [HttpDate] - - test_if_unmodified_since { - // Testcase from RFC - test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); - } -} - -bench_header!(imf_fixdate, IfUnmodifiedSince, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); -bench_header!(rfc_850, IfUnmodifiedSince, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); -bench_header!(asctime, IfUnmodifiedSince, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/last-event-id.rs b/third_party/rust/hyper/src/header/common/last-event-id.rs deleted file mode 100644 index 2c82d36342a3..000000000000 --- a/third_party/rust/hyper/src/header/common/last-event-id.rs +++ /dev/null @@ -1,30 +0,0 @@ -header! { - /// `Last-Event-ID` header, defined in - /// [RFC3864](https://html.spec.whatwg.org/multipage/references.html#refsRFC3864) - /// - /// The `Last-Event-ID` header contains information about - /// the last event in an http interaction so that it's easier to - /// track of event state. This is helpful when working - /// with [Server-Sent-Events](http://www.html5rocks.com/en/tutorials/eventsource/basics/). If the connection were to be dropped, for example, it'd - /// be useful to let the server know what the last event you - /// recieved was. - /// - /// The spec is a String with the id of the last event, it can be - /// an empty string which acts a sort of "reset". - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, LastEventID}; - /// - /// let mut headers = Headers::new(); - /// headers.set(LastEventID("1".to_owned())); - /// ``` - (LastEventID, "Last-Event-ID") => [String] - - test_last_event_id { - // Initial state - test_header!(test1, vec![b""]); - // Own testcase - test_header!(test2, vec![b"1"], Some(LastEventID("1".to_owned()))); - } -} diff --git a/third_party/rust/hyper/src/header/common/last_modified.rs b/third_party/rust/hyper/src/header/common/last_modified.rs deleted file mode 100644 index 24fa1c397cc1..000000000000 --- a/third_party/rust/hyper/src/header/common/last_modified.rs +++ /dev/null @@ -1,43 +0,0 @@ -use header::HttpDate; - -header! { - /// `Last-Modified` header, defined in - /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-2.2) - /// - /// The `Last-Modified` header field in a response provides a timestamp - /// indicating the date and time at which the origin server believes the - /// selected representation was last modified, as determined at the - /// conclusion of handling the request. - /// - /// # ABNF - /// ```plain - /// Expires = HTTP-date - /// ``` - /// - /// # Example values - /// * `Sat, 29 Oct 1994 19:43:31 GMT` - /// - /// # Example - /// ``` - /// # extern crate hyper; - /// # extern crate time; - /// # fn main() { - /// // extern crate time; - /// - /// use hyper::header::{Headers, LastModified, HttpDate}; - /// use time::{self, Duration}; - /// - /// let mut headers = Headers::new(); - /// headers.set(LastModified(HttpDate(time::now() - Duration::days(1)))); - /// # } - /// ``` - (LastModified, "Last-Modified") => [HttpDate] - - test_last_modified { - // Testcase from RFC - test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]);} -} - -bench_header!(imf_fixdate, LastModified, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); -bench_header!(rfc_850, LastModified, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); -bench_header!(asctime, LastModified, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/link.rs b/third_party/rust/hyper/src/header/common/link.rs deleted file mode 100644 index dec949d6ab5c..000000000000 --- a/third_party/rust/hyper/src/header/common/link.rs +++ /dev/null @@ -1,1110 +0,0 @@ -use std::fmt; -use std::borrow::Cow; -use std::str::FromStr; -use std::ascii::AsciiExt; - -use mime::Mime; -use language_tags::LanguageTag; - -use header::parsing; -use header::{Header, HeaderFormat}; - -/// The `Link` header, defined in -/// [RFC5988](http://tools.ietf.org/html/rfc5988#section-5) -/// -/// # ABNF -/// ```plain -/// Link = "Link" ":" #link-value -/// link-value = "<" URI-Reference ">" *( ";" link-param ) -/// link-param = ( ( "rel" "=" relation-types ) -/// | ( "anchor" "=" <"> URI-Reference <"> ) -/// | ( "rev" "=" relation-types ) -/// | ( "hreflang" "=" Language-Tag ) -/// | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) ) -/// | ( "title" "=" quoted-string ) -/// | ( "title*" "=" ext-value ) -/// | ( "type" "=" ( media-type | quoted-mt ) ) -/// | ( link-extension ) ) -/// link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] ) -/// | ( ext-name-star "=" ext-value ) -/// ext-name-star = parmname "*" ; reserved for RFC2231-profiled -/// ; extensions. Whitespace NOT -/// ; allowed in between. -/// ptoken = 1*ptokenchar -/// ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "(" -/// | ")" | "*" | "+" | "-" | "." | "/" | DIGIT -/// | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA -/// | "[" | "]" | "^" | "_" | "`" | "{" | "|" -/// | "}" | "~" -/// media-type = type-name "/" subtype-name -/// quoted-mt = <"> media-type <"> -/// relation-types = relation-type -/// | <"> relation-type *( 1*SP relation-type ) <"> -/// relation-type = reg-rel-type | ext-rel-type -/// reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" ) -/// ext-rel-type = URI -/// ``` -/// -/// # Example values -/// -/// `Link: ; rel="previous"; -/// title="previous chapter"` -/// -/// `Link: ; rel="previous"; title*=UTF-8'de'letztes%20Kapitel, -/// ; rel="next"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Link, LinkValue, RelationType}; -/// -/// let link_value = LinkValue::new("http://example.com/TheBook/chapter2") -/// .push_rel(RelationType::Previous) -/// .set_title("previous chapter"); -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Link::new(vec![link_value]) -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct Link { - /// A list of the `link-value`s of the Link entity-header. - values: Vec -} - -/// A single `link-value` of a `Link` header, based on: -/// [RFC5988](http://tools.ietf.org/html/rfc5988#section-5) -#[derive(Clone, PartialEq, Debug)] -pub struct LinkValue { - /// Target IRI: `link-value`. - link: Cow<'static, str>, - - /// Forward Relation Types: `rel`. - rel: Option>, - - /// Context IRI: `anchor`. - anchor: Option, - - /// Reverse Relation Types: `rev`. - rev: Option>, - - /// Hint on the language of the result of dereferencing - /// the link: `hreflang`. - href_lang: Option>, - - /// Destination medium or media: `media`. - media_desc: Option>, - - /// Label of the destination of a Link: `title`. - title: Option, - - /// The `title` encoded in a different charset: `title*`. - title_star: Option, - - /// Hint on the media type of the result of dereferencing - /// the link: `type`. - media_type: Option, -} - -/// A Media Descriptors Enum based on: -/// https://www.w3.org/TR/html401/types.html#h-6.13 -#[derive(Clone, PartialEq, Debug)] -pub enum MediaDesc { - /// screen. - Screen, - /// tty. - Tty, - /// tv. - Tv, - /// projection. - Projection, - /// handheld. - Handheld, - /// print. - Print, - /// braille. - Braille, - /// aural. - Aural, - /// all. - All, - /// Unrecognized media descriptor extension. - Extension(String) -} - -/// A Link Relation Type Enum based on: -/// [RFC5988](https://tools.ietf.org/html/rfc5988#section-6.2.2) -#[derive(Clone, PartialEq, Debug)] -pub enum RelationType { - /// alternate. - Alternate, - /// appendix. - Appendix, - /// bookmark. - Bookmark, - /// chapter. - Chapter, - /// contents. - Contents, - /// copyright. - Copyright, - /// current. - Current, - /// describedby. - DescribedBy, - /// edit. - Edit, - /// edit-media. - EditMedia, - /// enclosure. - Enclosure, - /// first. - First, - /// glossary. - Glossary, - /// help. - Help, - /// hub. - Hub, - /// index. - Index, - /// last. - Last, - /// latest-version. - LatestVersion, - /// license. - License, - /// next. - Next, - /// next-archive. - NextArchive, - /// payment. - Payment, - /// prev. - Prev, - /// predecessor-version. - PredecessorVersion, - /// previous. - Previous, - /// prev-archive. - PrevArchive, - /// related. - Related, - /// replies. - Replies, - /// section. - Section, - /// self. - RelationTypeSelf, - /// service. - Service, - /// start. - Start, - /// stylesheet. - Stylesheet, - /// subsection. - Subsection, - /// successor-version. - SuccessorVersion, - /// up. - Up, - /// versionHistory. - VersionHistory, - /// via. - Via, - /// working-copy. - WorkingCopy, - /// working-copy-of. - WorkingCopyOf, - /// ext-rel-type. - ExtRelType(String) -} - -//////////////////////////////////////////////////////////////////////////////// -// Struct methods -//////////////////////////////////////////////////////////////////////////////// - -impl Link { - /// Create `Link` from a `Vec`. - pub fn new(link_values: Vec) -> Link { - Link { values: link_values } - } - - /// Get the `Link` header's `LinkValue`s. - pub fn values(&self) -> &[LinkValue] { - self.values.as_ref() - } - - /// Add a `LinkValue` instance to the `Link` header's values. - pub fn push_value(&mut self, link_value: LinkValue) { - self.values.push(link_value); - } -} - -impl LinkValue { - /// Create `LinkValue` from URI-Reference. - pub fn new(uri: T) -> LinkValue - where T: Into> { - LinkValue { - link: uri.into(), - rel: None, - anchor: None, - rev: None, - href_lang: None, - media_desc: None, - title: None, - title_star: None, - media_type: None, - } - } - - /// Get the `LinkValue`'s value. - pub fn link(&self) -> &str { - self.link.as_ref() - } - - /// Get the `LinkValue`'s `rel` parameter(s). - pub fn rel(&self) -> Option<&[RelationType]> { - self.rel.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `anchor` parameter. - pub fn anchor(&self) -> Option<&str> { - self.anchor.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `rev` parameter(s). - pub fn rev(&self) -> Option<&[RelationType]> { - self.rev.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `hreflang` parameter(s). - pub fn href_lang(&self) -> Option<&[LanguageTag]> { - self.href_lang.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `media` parameter(s). - pub fn media_desc(&self) -> Option<&[MediaDesc]> { - self.media_desc.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `title` parameter. - pub fn title(&self) -> Option<&str> { - self.title.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `title*` parameter. - pub fn title_star(&self) -> Option<&str> { - self.title_star.as_ref().map(AsRef::as_ref) - } - - /// Get the `LinkValue`'s `type` parameter. - pub fn media_type(&self) -> Option<&Mime> { - self.media_type.as_ref() - } - - /// Add a `RelationType` to the `LinkValue`'s `rel` parameter. - pub fn push_rel(mut self, rel: RelationType) -> LinkValue { - let mut v = self.rel.take().unwrap_or(Vec::new()); - - v.push(rel); - - self.rel = Some(v); - - self - } - - /// Set `LinkValue`'s `anchor` parameter. - pub fn set_anchor>(mut self, anchor: T) -> LinkValue { - self.anchor = Some(anchor.into()); - - self - } - - /// Add a `RelationType` to the `LinkValue`'s `rev` parameter. - pub fn push_rev(mut self, rev: RelationType) -> LinkValue { - let mut v = self.rev.take().unwrap_or(Vec::new()); - - v.push(rev); - - self.rev = Some(v); - - self - } - - /// Add a `LanguageTag` to the `LinkValue`'s `hreflang` parameter. - pub fn push_href_lang(mut self, language_tag: LanguageTag) -> LinkValue { - let mut v = self.href_lang.take().unwrap_or(Vec::new()); - - v.push(language_tag); - - self.href_lang = Some(v); - - self - } - - /// Add a `MediaDesc` to the `LinkValue`'s `media_desc` parameter. - pub fn push_media_desc(mut self, media_desc: MediaDesc) -> LinkValue { - let mut v = self.media_desc.take().unwrap_or(Vec::new()); - - v.push(media_desc); - - self.media_desc = Some(v); - - self - } - - /// Set `LinkValue`'s `title` parameter. - pub fn set_title>(mut self, title: T) -> LinkValue { - self.title = Some(title.into()); - - self - } - - /// Set `LinkValue`'s `title*` parameter. - pub fn set_title_star>(mut self, title_star: T) -> LinkValue { - self.title_star = Some(title_star.into()); - - self - } - - /// Set `LinkValue`'s `type` parameter. - pub fn set_media_type(mut self, media_type: Mime) -> LinkValue { - self.media_type = Some(media_type); - - self - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Trait implementations -//////////////////////////////////////////////////////////////////////////////// - -impl Header for Link { - fn header_name() -> &'static str { - static NAME: &'static str = "Link"; - NAME - } - - fn parse_header(raw: &[Vec]) -> ::Result { - // If more that one `Link` headers are present in a request's - // headers they are combined in a single `Link` header containing - // all the `link-value`s present in each of those `Link` headers. - raw.iter() - .map(|v| parsing::from_raw_str::(&v)) - .fold(None, |p, c| match (p, c) { - (None, c) => Some(c), - (e @ Some(Err(_)), _) => e, - (Some(Ok(mut p)), Ok(c)) => { - p.values.extend(c.values); - - Some(Ok(p)) - } - _ => Some(Err(::Error::Header)), - }) - .unwrap_or(Err(::Error::Header)) - } -} -impl HeaderFormat for Link { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt_delimited(f, self.values.as_slice(), ", ", ("", "")) - } -} - -impl fmt::Display for Link { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.fmt_header(f) - } -} - -impl fmt::Display for LinkValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "<{}>", self.link)); - - if let Some(ref rel) = self.rel { - try!(fmt_delimited(f, rel.as_slice(), " ", ("; rel=\"", "\""))); - } - if let Some(ref anchor) = self.anchor { - try!(write!(f, "; anchor=\"{}\"", anchor)); - } - if let Some(ref rev) = self.rev { - try!(fmt_delimited(f, rev.as_slice(), " ", ("; rev=\"", "\""))); - } - if let Some(ref href_lang) = self.href_lang { - for tag in href_lang { - try!(write!(f, "; hreflang={}", tag)); - } - } - if let Some(ref media_desc) = self.media_desc { - try!(fmt_delimited(f, media_desc.as_slice(), ", ", ("; media=\"", "\""))); - } - if let Some(ref title) = self.title { - try!(write!(f, "; title=\"{}\"", title)); - } - if let Some(ref title_star) = self.title_star { - try!(write!(f, "; title*={}", title_star)); - } - if let Some(ref media_type) = self.media_type { - try!(write!(f, "; type=\"{}\"", media_type)); - } - - Ok(()) - } -} - -impl FromStr for Link { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - // Create a split iterator with delimiters: `;`, `,` - let link_split = SplitAsciiUnquoted::new(s, ";,"); - - let mut link_values: Vec = Vec::new(); - - // Loop over the splits parsing the Link header into - // a `Vec` - for segment in link_split { - // Parse the `Target IRI` - // https://tools.ietf.org/html/rfc5988#section-5.1 - if segment.trim().starts_with('<') { - link_values.push( - match verify_and_trim(segment.trim(), (b'<', b'>')) { - Err(_) => return Err(::Error::Header), - Ok(s) => { - LinkValue { - link: s.to_owned().into(), - rel: None, - anchor: None, - rev: None, - href_lang: None, - media_desc: None, - title: None, - title_star: None, - media_type: None, - } - }, - } - ); - } else { - // Parse the current link-value's parameters - let mut link_param_split = segment.splitn(2, '='); - - let link_param_name = match link_param_split.next() { - None => return Err(::Error::Header), - Some(p) => p.trim(), - }; - - let link_header = match link_values.last_mut() { - None => return Err(::Error::Header), - Some(l) => l, - }; - - if "rel".eq_ignore_ascii_case(link_param_name) { - // Parse relation type: `rel`. - // https://tools.ietf.org/html/rfc5988#section-5.3 - if link_header.rel.is_none() { - link_header.rel = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => { - s.trim_matches(|c: char| c == '"' || c.is_whitespace()) - .split(' ') - .map(|t| t.trim().parse()) - .collect::, _>>() - .or_else(|_| return Err(::Error::Header)) - .ok() - }, - }; - } - } else if "anchor".eq_ignore_ascii_case(link_param_name) { - // Parse the `Context IRI`. - // https://tools.ietf.org/html/rfc5988#section-5.2 - link_header.anchor = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { - Err(_) => return Err(::Error::Header), - Ok(a) => Some(String::from(a)), - }, - }; - } else if "rev".eq_ignore_ascii_case(link_param_name) { - // Parse relation type: `rev`. - // https://tools.ietf.org/html/rfc5988#section-5.3 - if link_header.rev.is_none() { - link_header.rev = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => { - s.trim_matches(|c: char| c == '"' || c.is_whitespace()) - .split(' ') - .map(|t| t.trim().parse()) - .collect::, _>>() - .or_else(|_| return Err(::Error::Header)) - .ok() - }, - } - } - } else if "hreflang".eq_ignore_ascii_case(link_param_name) { - // Parse target attribute: `hreflang`. - // https://tools.ietf.org/html/rfc5988#section-5.4 - let mut v = link_header.href_lang.take().unwrap_or(Vec::new()); - - v.push( - match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => match s.trim().parse() { - Err(_) => return Err(::Error::Header), - Ok(t) => t, - }, - } - ); - - link_header.href_lang = Some(v); - } else if "media".eq_ignore_ascii_case(link_param_name) { - // Parse target attribute: `media`. - // https://tools.ietf.org/html/rfc5988#section-5.4 - if link_header.media_desc.is_none() { - link_header.media_desc = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => { - s.trim_matches(|c: char| c == '"' || c.is_whitespace()) - .split(',') - .map(|t| t.trim().parse()) - .collect::, _>>() - .or_else(|_| return Err(::Error::Header)) - .ok() - }, - }; - } - } else if "title".eq_ignore_ascii_case(link_param_name) { - // Parse target attribute: `title`. - // https://tools.ietf.org/html/rfc5988#section-5.4 - if link_header.title.is_none() { - link_header.title = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { - Err(_) => return Err(::Error::Header), - Ok(t) => Some(String::from(t)), - }, - }; - } - } else if "title*".eq_ignore_ascii_case(link_param_name) { - // Parse target attribute: `title*`. - // https://tools.ietf.org/html/rfc5988#section-5.4 - // - // Definition of `ext-value`: - // https://tools.ietf.org/html/rfc5987#section-3.2.1 - if link_header.title_star.is_none() { - link_header.title_star = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => Some(String::from(s.trim())), - }; - } - } else if "type".eq_ignore_ascii_case(link_param_name) { - // Parse target attribute: `type`. - // https://tools.ietf.org/html/rfc5988#section-5.4 - if link_header.media_type.is_none() { - link_header.media_type = match link_param_split.next() { - None => return Err(::Error::Header), - Some("") => return Err(::Error::Header), - Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { - Err(_) => return Err(::Error::Header), - Ok(t) => match t.parse() { - Err(_) => return Err(::Error::Header), - Ok(m) => Some(m), - }, - }, - - }; - } - } else { - return Err(::Error::Header); - } - } - } - - Ok(Link::new(link_values)) - } -} - -impl fmt::Display for MediaDesc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - MediaDesc::Screen => write!(f, "screen"), - MediaDesc::Tty => write!(f, "tty"), - MediaDesc::Tv => write!(f, "tv"), - MediaDesc::Projection => write!(f, "projection"), - MediaDesc::Handheld => write!(f, "handheld"), - MediaDesc::Print => write!(f, "print"), - MediaDesc::Braille => write!(f, "braille"), - MediaDesc::Aural => write!(f, "aural"), - MediaDesc::All => write!(f, "all"), - MediaDesc::Extension(ref other) => write!(f, "{}", other), - } - } -} - -impl FromStr for MediaDesc { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - match s { - "screen" => Ok(MediaDesc::Screen), - "tty" => Ok(MediaDesc::Tty), - "tv" => Ok(MediaDesc::Tv), - "projection" => Ok(MediaDesc::Projection), - "handheld" => Ok(MediaDesc::Handheld), - "print" => Ok(MediaDesc::Print), - "braille" => Ok(MediaDesc::Braille), - "aural" => Ok(MediaDesc::Aural), - "all" => Ok(MediaDesc::All), - _ => Ok(MediaDesc::Extension(String::from(s))), - } - } -} - -impl fmt::Display for RelationType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - RelationType::Alternate => write!(f, "alternate"), - RelationType::Appendix => write!(f, "appendix"), - RelationType::Bookmark => write!(f, "bookmark"), - RelationType::Chapter => write!(f, "chapter"), - RelationType::Contents => write!(f, "contents"), - RelationType::Copyright => write!(f, "copyright"), - RelationType::Current => write!(f, "current"), - RelationType::DescribedBy => write!(f, "describedby"), - RelationType::Edit => write!(f, "edit"), - RelationType::EditMedia => write!(f, "edit-media"), - RelationType::Enclosure => write!(f, "enclosure"), - RelationType::First => write!(f, "first"), - RelationType::Glossary => write!(f, "glossary"), - RelationType::Help => write!(f, "help"), - RelationType::Hub => write!(f, "hub"), - RelationType::Index => write!(f, "index"), - RelationType::Last => write!(f, "last"), - RelationType::LatestVersion => write!(f, "latest-version"), - RelationType::License => write!(f, "license"), - RelationType::Next => write!(f, "next"), - RelationType::NextArchive => write!(f, "next-archive"), - RelationType::Payment => write!(f, "payment"), - RelationType::Prev => write!(f, "prev"), - RelationType::PredecessorVersion => write!(f, "predecessor-version"), - RelationType::Previous => write!(f, "previous"), - RelationType::PrevArchive => write!(f, "prev-archive"), - RelationType::Related => write!(f, "related"), - RelationType::Replies => write!(f, "replies"), - RelationType::Section => write!(f, "section"), - RelationType::RelationTypeSelf => write!(f, "self"), - RelationType::Service => write!(f, "service"), - RelationType::Start => write!(f, "start"), - RelationType::Stylesheet => write!(f, "stylesheet"), - RelationType::Subsection => write!(f, "subsection"), - RelationType::SuccessorVersion => write!(f, "successor-version"), - RelationType::Up => write!(f, "up"), - RelationType::VersionHistory => write!(f, "version-history"), - RelationType::Via => write!(f, "via"), - RelationType::WorkingCopy => write!(f, "working-copy"), - RelationType::WorkingCopyOf => write!(f, "working-copy-of"), - RelationType::ExtRelType(ref uri) => write!(f, "{}", uri), - } - } -} - -impl FromStr for RelationType { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - if "alternate".eq_ignore_ascii_case(s) { - Ok(RelationType::Alternate) - } else if "appendix".eq_ignore_ascii_case(s) { - Ok(RelationType::Appendix) - } else if "bookmark".eq_ignore_ascii_case(s) { - Ok(RelationType::Bookmark) - } else if "chapter".eq_ignore_ascii_case(s) { - Ok(RelationType::Chapter) - } else if "contents".eq_ignore_ascii_case(s) { - Ok(RelationType::Contents) - } else if "copyright".eq_ignore_ascii_case(s) { - Ok(RelationType::Copyright) - } else if "current".eq_ignore_ascii_case(s) { - Ok(RelationType::Current) - } else if "describedby".eq_ignore_ascii_case(s) { - Ok(RelationType::DescribedBy) - } else if "edit".eq_ignore_ascii_case(s) { - Ok(RelationType::Edit) - } else if "edit-media".eq_ignore_ascii_case(s) { - Ok(RelationType::EditMedia) - } else if "enclosure".eq_ignore_ascii_case(s) { - Ok(RelationType::Enclosure) - } else if "first".eq_ignore_ascii_case(s) { - Ok(RelationType::First) - } else if "glossary".eq_ignore_ascii_case(s) { - Ok(RelationType::Glossary) - } else if "help".eq_ignore_ascii_case(s) { - Ok(RelationType::Help) - } else if "hub".eq_ignore_ascii_case(s) { - Ok(RelationType::Hub) - } else if "index".eq_ignore_ascii_case(s) { - Ok(RelationType::Index) - } else if "last".eq_ignore_ascii_case(s) { - Ok(RelationType::Last) - } else if "latest-version".eq_ignore_ascii_case(s) { - Ok(RelationType::LatestVersion) - } else if "license".eq_ignore_ascii_case(s) { - Ok(RelationType::License) - } else if "next".eq_ignore_ascii_case(s) { - Ok(RelationType::Next) - } else if "next-archive".eq_ignore_ascii_case(s) { - Ok(RelationType::NextArchive) - } else if "payment".eq_ignore_ascii_case(s) { - Ok(RelationType::Payment) - } else if "prev".eq_ignore_ascii_case(s) { - Ok(RelationType::Prev) - } else if "predecessor-version".eq_ignore_ascii_case(s) { - Ok(RelationType::PredecessorVersion) - } else if "previous".eq_ignore_ascii_case(s) { - Ok(RelationType::Previous) - } else if "prev-archive".eq_ignore_ascii_case(s) { - Ok(RelationType::PrevArchive) - } else if "related".eq_ignore_ascii_case(s) { - Ok(RelationType::Related) - } else if "replies".eq_ignore_ascii_case(s) { - Ok(RelationType::Replies) - } else if "section".eq_ignore_ascii_case(s) { - Ok(RelationType::Section) - } else if "self".eq_ignore_ascii_case(s) { - Ok(RelationType::RelationTypeSelf) - } else if "service".eq_ignore_ascii_case(s) { - Ok(RelationType::Service) - } else if "start".eq_ignore_ascii_case(s) { - Ok(RelationType::Start) - } else if "stylesheet".eq_ignore_ascii_case(s) { - Ok(RelationType::Stylesheet) - } else if "subsection".eq_ignore_ascii_case(s) { - Ok(RelationType::Subsection) - } else if "successor-version".eq_ignore_ascii_case(s) { - Ok(RelationType::SuccessorVersion) - } else if "up".eq_ignore_ascii_case(s) { - Ok(RelationType::Up) - } else if "version-history".eq_ignore_ascii_case(s) { - Ok(RelationType::VersionHistory) - } else if "via".eq_ignore_ascii_case(s) { - Ok(RelationType::Via) - } else if "working-copy".eq_ignore_ascii_case(s) { - Ok(RelationType::WorkingCopy) - } else if "working-copy-of".eq_ignore_ascii_case(s) { - Ok(RelationType::WorkingCopyOf) - } else { - Ok(RelationType::ExtRelType(String::from(s))) - } - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Utilities -//////////////////////////////////////////////////////////////////////////////// - -struct SplitAsciiUnquoted<'a> { - src: &'a str, - pos: usize, - del: &'a str -} - -impl<'a> SplitAsciiUnquoted<'a> { - fn new(s: &'a str, d: &'a str) -> SplitAsciiUnquoted<'a> { - SplitAsciiUnquoted{ - src: s, - pos: 0, - del: d, - } - } -} - -impl<'a> Iterator for SplitAsciiUnquoted<'a> { - type Item = &'a str; - - fn next(&mut self) -> Option<&'a str> { - if self.pos < self.src.len() { - let prev_pos = self.pos; - let mut pos = self.pos; - - let mut in_quotes = false; - - for c in self.src[prev_pos..].as_bytes().iter() { - in_quotes ^= *c == b'"'; - - // Ignore `c` if we're `in_quotes`. - if !in_quotes && self.del.as_bytes().contains(c) { - break; - } - - pos += 1; - } - - self.pos = pos + 1; - - Some(&self.src[prev_pos..pos]) - } else { - None - } - } -} - -fn fmt_delimited(f: &mut fmt::Formatter, p: &[T], d: &str, b: (&str, &str)) -> fmt::Result { - if p.len() != 0 { - // Write a starting string `b.0` before the first element - try!(write!(f, "{}{}", b.0, p[0])); - - for i in &p[1..] { - // Write the next element preceded by the delimiter `d` - try!(write!(f, "{}{}", d, i)); - } - - // Write a ending string `b.1` before the first element - try!(write!(f, "{}", b.1)); - } - - Ok(()) -} - -fn verify_and_trim(s: &str, b: (u8, u8)) -> ::Result<&str> { - let length = s.len(); - let byte_array = s.as_bytes(); - - // Verify that `s` starts with `b.0` and ends with `b.1` and return - // the contained substring after triming whitespace. - if length > 1 && b.0 == byte_array[0] && b.1 == byte_array[length - 1] { - Ok(s.trim_matches( - |c: char| c == b.0 as char || c == b.1 as char || c.is_whitespace()) - ) - } else { - Err(::Error::Header) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Tests -//////////////////////////////////////////////////////////////////////////////// - -#[cfg(test)] -mod tests { - use std::fmt; - use std::fmt::Write; - - use super::{Link, LinkValue, MediaDesc, RelationType, SplitAsciiUnquoted}; - use super::{fmt_delimited, verify_and_trim}; - - use header::Header; - - use buffer::BufReader; - use mock::MockStream; - use http::h1::parse_request; - - use mime::Mime; - use mime::TopLevel::Text; - use mime::SubLevel::Plain; - - #[test] - fn test_link() { - let link_value = LinkValue::new("http://example.com/TheBook/chapter2") - .push_rel(RelationType::Previous) - .push_rev(RelationType::Next) - .set_title("previous chapter"); - - let link_header = b"; \ - rel=\"previous\"; rev=next; title=\"previous chapter\""; - - let expected_link = Link::new(vec![link_value]); - - let link = Header::parse_header(&vec![link_header.to_vec()]); - assert_eq!(link.ok(), Some(expected_link)); - } - - #[test] - fn test_link_multiple_values() { - let first_link = LinkValue::new("/TheBook/chapter2") - .push_rel(RelationType::Previous) - .set_title_star("UTF-8'de'letztes%20Kapitel"); - - let second_link = LinkValue::new("/TheBook/chapter4") - .push_rel(RelationType::Next) - .set_title_star("UTF-8'de'n%c3%a4chstes%20Kapitel"); - - let link_header = b"; \ - rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, \ - ; \ - rel=\"next\"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel"; - - let expected_link = Link::new(vec![first_link, second_link]); - - let link = Header::parse_header(&vec![link_header.to_vec()]); - assert_eq!(link.ok(), Some(expected_link)); - } - - #[test] - fn test_link_all_attributes() { - let link_value = LinkValue::new("http://example.com/TheBook/chapter2") - .push_rel(RelationType::Previous) - .set_anchor("../anchor/example/") - .push_rev(RelationType::Next) - .push_href_lang(langtag!(de)) - .push_media_desc(MediaDesc::Screen) - .set_title("previous chapter") - .set_title_star("title* unparsed") - .set_media_type(Mime(Text, Plain, vec![])); - - let link_header = b"; \ - rel=\"previous\"; anchor=\"../anchor/example/\"; \ - rev=\"next\"; hreflang=de; media=\"screen\"; \ - title=\"previous chapter\"; title*=title* unparsed; \ - type=\"text/plain\""; - - let expected_link = Link::new(vec![link_value]); - - let link = Header::parse_header(&vec![link_header.to_vec()]); - assert_eq!(link.ok(), Some(expected_link)); - } - - #[test] - fn test_link_multiple_link_headers() { - let first_link = LinkValue::new("/TheBook/chapter2") - .push_rel(RelationType::Previous) - .set_title_star("UTF-8'de'letztes%20Kapitel"); - - let second_link = LinkValue::new("/TheBook/chapter4") - .push_rel(RelationType::Next) - .set_title_star("UTF-8'de'n%c3%a4chstes%20Kapitel"); - - let third_link = LinkValue::new("http://example.com/TheBook/chapter2") - .push_rel(RelationType::Previous) - .push_rev(RelationType::Next) - .set_title("previous chapter"); - - let expected_link = Link::new(vec![first_link, second_link, third_link]); - - let mut raw = MockStream::with_input(b"GET /super_short_uri/and_whatever HTTP/1.1\r\nHost: \ - hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \ - utf8\r\nAccept-Encoding: *\r\nLink: ; \ - rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, \ - ; rel=\"next\"; title*=\ - UTF-8'de'n%c3%a4chstes%20Kapitel\r\n\ - Access-Control-Allow-Credentials: None\r\nLink: \ - ; \ - rel=\"previous\"; rev=next; title=\"previous chapter\"\ - \r\n\r\n"); - - let mut buf = BufReader::new(&mut raw); - let res = parse_request(&mut buf).unwrap(); - - let link = res.headers.get::().unwrap(); - - assert_eq!(*link, expected_link); - } - - #[test] - fn test_link_display() { - let link_value = LinkValue::new("http://example.com/TheBook/chapter2") - .push_rel(RelationType::Previous) - .set_anchor("/anchor/example/") - .push_rev(RelationType::Next) - .push_href_lang(langtag!(de)) - .push_media_desc(MediaDesc::Screen) - .set_title("previous chapter") - .set_title_star("title* unparsed") - .set_media_type(Mime(Text, Plain, vec![])); - - let link = Link::new(vec![link_value]); - - let mut link_header = String::new(); - write!(&mut link_header, "{}", link).unwrap(); - - let expected_link_header = "; \ - rel=\"previous\"; anchor=\"/anchor/example/\"; \ - rev=\"next\"; hreflang=de; media=\"screen\"; \ - title=\"previous chapter\"; title*=title* unparsed; \ - type=\"text/plain\""; - - assert_eq!(link_header, expected_link_header); - } - - #[test] - fn test_link_parsing_errors() { - let link_a = b"http://example.com/TheBook/chapter2; \ - rel=\"previous\"; rev=next; title=\"previous chapter\""; - - let mut err: Result = Header::parse_header(&vec![link_a.to_vec()]); - assert_eq!(err.is_err(), true); - - let link_b = b"; \ - =\"previous\"; rev=next; title=\"previous chapter\""; - - err = Header::parse_header(&vec![link_b.to_vec()]); - assert_eq!(err.is_err(), true); - - let link_c = b"; \ - rel=; rev=next; title=\"previous chapter\""; - - err = Header::parse_header(&vec![link_c.to_vec()]); - assert_eq!(err.is_err(), true); - - let link_d = b"; \ - rel=\"previous\"; rev=next; title="; - - err = Header::parse_header(&vec![link_d.to_vec()]); - assert_eq!(err.is_err(), true); - - let link_e = b"; \ - rel=\"previous\"; rev=next; attr=unknown"; - - err = Header::parse_header(&vec![link_e.to_vec()]); - assert_eq!(err.is_err(), true); - } - - #[test] - fn test_link_split_ascii_unquoted_iterator() { - let string = "some, text; \"and, more; in quotes\", or not"; - let mut string_split = SplitAsciiUnquoted::new(string, ";,"); - - assert_eq!(Some("some"), string_split.next()); - assert_eq!(Some(" text"), string_split.next()); - assert_eq!(Some(" \"and, more; in quotes\""), string_split.next()); - assert_eq!(Some(" or not"), string_split.next()); - assert_eq!(None, string_split.next()); - } - - #[test] - fn test_link_fmt_delimited() { - struct TestFormatterStruct<'a> { v: Vec<&'a str> }; - - impl<'a> fmt::Display for TestFormatterStruct<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt_delimited(f, self.v.as_slice(), ", ", (">>", "<<")) - } - } - - let test_formatter = TestFormatterStruct { v: vec!["first", "second"] }; - - let mut string = String::new(); - write!(&mut string, "{}", test_formatter).unwrap(); - - let expected_string = ">>first, second<<"; - - assert_eq!(string, expected_string); - } - - #[test] - fn test_link_verify_and_trim() { - let string = verify_and_trim("> some string <", (b'>', b'<')); - assert_eq!(string.ok(), Some("some string")); - - let err = verify_and_trim(" > some string <", (b'>', b'<')); - assert_eq!(err.is_err(), true); - } -} - -bench_header!(bench_link, Link, { vec![b"; rel=\"previous\"; rev=next; title=\"previous chapter\"; type=\"text/html\"; media=\"screen, tty\"".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/location.rs b/third_party/rust/hyper/src/header/common/location.rs deleted file mode 100644 index 5369e87e7aae..000000000000 --- a/third_party/rust/hyper/src/header/common/location.rs +++ /dev/null @@ -1,43 +0,0 @@ -header! { - /// `Location` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.1.2) - /// - /// The `Location` header field is used in some responses to refer to a - /// specific resource in relation to the response. The type of - /// relationship is defined by the combination of request method and - /// status code semantics. - /// - /// # ABNF - /// ```plain - /// Location = URI-reference - /// ``` - /// - /// # Example values - /// * `/People.html#tim` - /// * `http://www.example.net/index.html` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Location}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Location("/People.html#tim".to_owned())); - /// ``` - /// ``` - /// use hyper::header::{Headers, Location}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Location("http://www.example.com/index.html".to_owned())); - /// ``` - // TODO: Use URL - (Location, "Location") => [String] - - test_location { - // Testcase from RFC - test_header!(test1, vec![b"/People.html#tim"]); - test_header!(test2, vec![b"http://www.example.net/index.html"]); - } - -} - -bench_header!(bench, Location, { vec![b"http://foo.com/hello:3000".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/mod.rs b/third_party/rust/hyper/src/header/common/mod.rs deleted file mode 100644 index 3b96f5157b11..000000000000 --- a/third_party/rust/hyper/src/header/common/mod.rs +++ /dev/null @@ -1,395 +0,0 @@ -//! A Collection of Header implementations for common HTTP Headers. -//! -//! ## Mime -//! -//! Several header fields use MIME values for their contents. Keeping with the -//! strongly-typed theme, the [mime](http://seanmonstar.github.io/mime.rs) crate -//! is used, such as `ContentType(pub Mime)`. - -pub use self::accept::Accept; -pub use self::access_control_allow_credentials::AccessControlAllowCredentials; -pub use self::access_control_allow_headers::AccessControlAllowHeaders; -pub use self::access_control_allow_methods::AccessControlAllowMethods; -pub use self::access_control_allow_origin::AccessControlAllowOrigin; -pub use self::access_control_expose_headers::AccessControlExposeHeaders; -pub use self::access_control_max_age::AccessControlMaxAge; -pub use self::access_control_request_headers::AccessControlRequestHeaders; -pub use self::access_control_request_method::AccessControlRequestMethod; -pub use self::accept_charset::AcceptCharset; -pub use self::accept_encoding::AcceptEncoding; -pub use self::accept_language::AcceptLanguage; -pub use self::accept_ranges::{AcceptRanges, RangeUnit}; -pub use self::allow::Allow; -pub use self::authorization::{Authorization, Scheme, Basic, Bearer}; -pub use self::cache_control::{CacheControl, CacheDirective}; -pub use self::connection::{Connection, ConnectionOption}; -pub use self::content_disposition::{ContentDisposition, DispositionType, DispositionParam}; -pub use self::content_length::ContentLength; -pub use self::content_encoding::ContentEncoding; -pub use self::content_language::ContentLanguage; -pub use self::content_range::{ContentRange, ContentRangeSpec}; -pub use self::content_type::ContentType; -pub use self::cookie::Cookie; -pub use self::date::Date; -pub use self::etag::ETag; -pub use self::expect::Expect; -pub use self::expires::Expires; -pub use self::from::From; -pub use self::host::Host; -pub use self::if_match::IfMatch; -pub use self::if_modified_since::IfModifiedSince; -pub use self::if_none_match::IfNoneMatch; -pub use self::if_unmodified_since::IfUnmodifiedSince; -pub use self::if_range::IfRange; -pub use self::last_modified::LastModified; -pub use self::location::Location; -pub use self::origin::Origin; -pub use self::pragma::Pragma; -pub use self::prefer::{Prefer, Preference}; -pub use self::preference_applied::PreferenceApplied; -pub use self::range::{Range, ByteRangeSpec}; -pub use self::referer::Referer; -pub use self::referrer_policy::ReferrerPolicy; -pub use self::server::Server; -pub use self::set_cookie::SetCookie; -pub use self::strict_transport_security::StrictTransportSecurity; -pub use self::transfer_encoding::TransferEncoding; -pub use self::upgrade::{Upgrade, Protocol, ProtocolName}; -pub use self::user_agent::UserAgent; -pub use self::vary::Vary; -pub use self::link::{Link, LinkValue, RelationType, MediaDesc}; - -#[doc(hidden)] -#[macro_export] -macro_rules! bench_header( - ($name:ident, $ty:ty, $value:expr) => { - #[cfg(test)] - #[cfg(feature = "nightly")] - #[allow(deprecated)] - mod $name { - use test::Bencher; - use super::*; - - use header::{Header, HeaderFormatter}; - - #[bench] - fn bench_parse(b: &mut Bencher) { - let val = $value; - b.iter(|| { - let _: $ty = Header::parse_header(&val[..]).unwrap(); - }); - } - - #[bench] - fn bench_format(b: &mut Bencher) { - let val: $ty = Header::parse_header(&$value[..]).unwrap(); - let fmt = HeaderFormatter(&val); - b.iter(|| { - format!("{}", fmt); - }); - } - } - } -); - -#[doc(hidden)] -#[macro_export] -macro_rules! __hyper__deref { - ($from:ty => $to:ty) => { - impl ::std::ops::Deref for $from { - type Target = $to; - - fn deref(&self) -> &$to { - &self.0 - } - } - - impl ::std::ops::DerefMut for $from { - fn deref_mut(&mut self) -> &mut $to { - &mut self.0 - } - } - } -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __hyper__tm { - ($id:ident, $tm:ident{$($tf:item)*}) => { - #[allow(unused_imports)] - #[cfg(test)] - mod $tm{ - use std::str; - use $crate::header::*; - use $crate::mime::*; - use $crate::language_tags::*; - use $crate::method::Method; - use super::$id as HeaderField; - $($tf)* - } - - } -} - -#[doc(hidden)] -#[macro_export] -macro_rules! test_header { - ($id:ident, $raw:expr) => { - #[test] - fn $id() { - use std::ascii::AsciiExt; - let raw = $raw; - let a: Vec> = raw.iter().map(|x| x.to_vec()).collect(); - let value = HeaderField::parse_header(&a[..]); - let result = format!("{}", value.unwrap()); - let expected = String::from_utf8(raw[0].to_vec()).unwrap(); - let result_cmp: Vec = result - .to_ascii_lowercase() - .split(' ') - .map(|x| x.to_owned()) - .collect(); - let expected_cmp: Vec = expected - .to_ascii_lowercase() - .split(' ') - .map(|x| x.to_owned()) - .collect(); - assert_eq!(result_cmp.concat(), expected_cmp.concat()); - } - }; - ($id:ident, $raw:expr, $typed:expr) => { - #[test] - fn $id() { - let a: Vec> = $raw.iter().map(|x| x.to_vec()).collect(); - let val = HeaderField::parse_header(&a[..]); - let typed: Option = $typed; - // Test parsing - assert_eq!(val.ok(), typed); - // Test formatting - if typed.is_some() { - let raw = &($raw)[..]; - let mut iter = raw.iter().map(|b|str::from_utf8(&b[..]).unwrap()); - let mut joined = String::new(); - joined.push_str(iter.next().unwrap()); - for s in iter { - joined.push_str(", "); - joined.push_str(s); - } - assert_eq!(format!("{}", typed.unwrap()), joined); - } - } - } -} - -#[macro_export] -macro_rules! header { - // $a:meta: Attributes associated with the header item (usually docs) - // $id:ident: Identifier of the header - // $n:expr: Lowercase name of the header - // $nn:expr: Nice name of the header - - // List header, zero or more items - ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)*) => { - $(#[$a])* - #[derive(Clone, Debug, PartialEq)] - pub struct $id(pub Vec<$item>); - __hyper__deref!($id => Vec<$item>); - impl $crate::header::Header for $id { - fn header_name() -> &'static str { - $n - } - fn parse_header(raw: &[Vec]) -> $crate::Result { - $crate::header::parsing::from_comma_delimited(raw).map($id) - } - } - impl $crate::header::HeaderFormat for $id { - fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - $crate::header::parsing::fmt_comma_delimited(f, &self.0[..]) - } - } - impl ::std::fmt::Display for $id { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - use $crate::header::HeaderFormat; - self.fmt_header(f) - } - } - }; - // List header, one or more items - ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+) => { - $(#[$a])* - #[derive(Clone, Debug, PartialEq)] - pub struct $id(pub Vec<$item>); - __hyper__deref!($id => Vec<$item>); - impl $crate::header::Header for $id { - fn header_name() -> &'static str { - $n - } - fn parse_header(raw: &[Vec]) -> $crate::Result { - $crate::header::parsing::from_comma_delimited(raw).map($id) - } - } - impl $crate::header::HeaderFormat for $id { - fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - $crate::header::parsing::fmt_comma_delimited(f, &self.0[..]) - } - } - impl ::std::fmt::Display for $id { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - use $crate::header::HeaderFormat; - self.fmt_header(f) - } - } - }; - // Single value header - ($(#[$a:meta])*($id:ident, $n:expr) => [$value:ty]) => { - $(#[$a])* - #[derive(Clone, Debug, PartialEq)] - pub struct $id(pub $value); - __hyper__deref!($id => $value); - impl $crate::header::Header for $id { - fn header_name() -> &'static str { - $n - } - fn parse_header(raw: &[Vec]) -> $crate::Result { - $crate::header::parsing::from_one_raw_str(raw).map($id) - } - } - impl $crate::header::HeaderFormat for $id { - fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - ::std::fmt::Display::fmt(&**self, f) - } - } - impl ::std::fmt::Display for $id { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - ::std::fmt::Display::fmt(&**self, f) - } - } - }; - // List header, one or more items with "*" option - ($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+}) => { - $(#[$a])* - #[derive(Clone, Debug, PartialEq)] - pub enum $id { - /// Any value is a match - Any, - /// Only the listed items are a match - Items(Vec<$item>), - } - impl $crate::header::Header for $id { - fn header_name() -> &'static str { - $n - } - fn parse_header(raw: &[Vec]) -> $crate::Result { - // FIXME: Return None if no item is in $id::Only - if raw.len() == 1 { - if raw[0] == b"*" { - return Ok($id::Any) - } - } - $crate::header::parsing::from_comma_delimited(raw).map($id::Items) - } - } - impl $crate::header::HeaderFormat for $id { - fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - match *self { - $id::Any => f.write_str("*"), - $id::Items(ref fields) => $crate::header::parsing::fmt_comma_delimited( - f, &fields[..]) - } - } - } - impl ::std::fmt::Display for $id { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - use $crate::header::HeaderFormat; - self.fmt_header(f) - } - } - }; - - // optional test module - ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)* $tm:ident{$($tf:item)*}) => { - header! { - $(#[$a])* - ($id, $n) => ($item)* - } - - __hyper__tm! { $id, $tm { $($tf)* }} - }; - ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+ $tm:ident{$($tf:item)*}) => { - header! { - $(#[$a])* - ($id, $n) => ($item)+ - } - - __hyper__tm! { $id, $tm { $($tf)* }} - }; - ($(#[$a:meta])*($id:ident, $n:expr) => [$item:ty] $tm:ident{$($tf:item)*}) => { - header! { - $(#[$a])* - ($id, $n) => [$item] - } - - __hyper__tm! { $id, $tm { $($tf)* }} - }; - ($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+} $tm:ident{$($tf:item)*}) => { - header! { - $(#[$a])* - ($id, $n) => {Any / ($item)+} - } - - __hyper__tm! { $id, $tm { $($tf)* }} - }; -} - - -mod accept; -mod access_control_allow_credentials; -mod access_control_allow_headers; -mod access_control_allow_methods; -mod access_control_allow_origin; -mod access_control_expose_headers; -mod access_control_max_age; -mod access_control_request_headers; -mod access_control_request_method; -mod accept_charset; -mod accept_encoding; -mod accept_language; -mod accept_ranges; -mod allow; -mod authorization; -mod cache_control; -mod cookie; -mod connection; -mod content_disposition; -mod content_encoding; -mod content_language; -mod content_length; -mod content_range; -mod content_type; -mod date; -mod etag; -mod expect; -mod expires; -mod from; -mod host; -mod if_match; -mod if_modified_since; -mod if_none_match; -mod if_range; -mod if_unmodified_since; -mod last_modified; -mod location; -mod origin; -mod pragma; -mod prefer; -mod preference_applied; -mod range; -mod referer; -mod referrer_policy; -mod server; -mod set_cookie; -mod strict_transport_security; -mod transfer_encoding; -mod upgrade; -mod user_agent; -mod vary; -mod link; diff --git a/third_party/rust/hyper/src/header/common/origin.rs b/third_party/rust/hyper/src/header/common/origin.rs deleted file mode 100644 index 3fbbe05cfa5e..000000000000 --- a/third_party/rust/hyper/src/header/common/origin.rs +++ /dev/null @@ -1,119 +0,0 @@ -use header::{Header, Host, HeaderFormat}; -use std::fmt; -use std::str::FromStr; -use header::parsing::from_one_raw_str; - -/// The `Origin` header. -/// -/// The `Origin` header is a version of the `Referer` header that is used for all HTTP fetches and `POST`s whose CORS flag is set. -/// This header is often used to inform recipients of the security context of where the request was initiated. -/// -/// -/// Following the spec, https://fetch.spec.whatwg.org/#origin-header, the value of this header is composed of -/// a String (scheme), header::Host (host/port) -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Origin}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Origin::new("http", "hyper.rs", None) -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, Origin}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Origin::new("https", "wikipedia.org", Some(443)) -/// ); -/// ``` - -#[derive(Clone, Debug)] -pub struct Origin { - /// The scheme, such as http or https - pub scheme: String, - /// The host, such as Host{hostname: "hyper.rs".to_owned(), port: None} - pub host: Host, -} - -impl Origin { - /// Creates a new `Origin` header. - pub fn new, H: Into>(scheme: S, hostname: H, port: Option) -> Origin{ - Origin { - scheme: scheme.into(), - host: Host { - hostname: hostname.into(), - port: port - } - } - } -} - -impl Header for Origin { - fn header_name() -> &'static str { - static NAME: &'static str = "Origin"; - NAME - } - - fn parse_header(raw: &[Vec]) -> ::Result { - from_one_raw_str(raw) - } -} - -impl FromStr for Origin { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - let idx = match s.find("://") { - Some(idx) => idx, - None => return Err(::Error::Header) - }; - // idx + 3 because thats how long "://" is - let (scheme, etc) = (&s[..idx], &s[idx + 3..]); - let host = try!(Host::from_str(etc)); - - - Ok(Origin{ - scheme: scheme.to_owned(), - host: host - }) - } -} - -impl HeaderFormat for Origin { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for Origin { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}://{}", self.scheme, self.host) - } -} - -impl PartialEq for Origin { - fn eq(&self, other: &Origin) -> bool { - self.scheme == other.scheme && self.host == other.host - } -} - - -#[cfg(test)] -mod tests { - use super::Origin; - use header::Header; - - #[test] - fn test_origin() { - let origin = Header::parse_header([b"http://foo.com".to_vec()].as_ref()); - assert_eq!(origin.ok(), Some(Origin::new("http", "foo.com", None))); - - let origin = Header::parse_header([b"https://foo.com:443".to_vec()].as_ref()); - assert_eq!(origin.ok(), Some(Origin::new("https", "foo.com", Some(443)))); - } -} - -bench_header!(bench, Origin, { vec![b"https://foo.com".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/pragma.rs b/third_party/rust/hyper/src/header/common/pragma.rs deleted file mode 100644 index 5acf4b926e21..000000000000 --- a/third_party/rust/hyper/src/header/common/pragma.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::fmt; -use std::ascii::AsciiExt; - -use header::{Header, HeaderFormat, parsing}; - -/// The `Pragma` header defined by HTTP/1.0. -/// -/// > The "Pragma" header field allows backwards compatibility with -/// > HTTP/1.0 caches, so that clients can specify a "no-cache" request -/// > that they will understand (as Cache-Control was not defined until -/// > HTTP/1.1). When the Cache-Control header field is also present and -/// > understood in a request, Pragma is ignored. - -/// > In HTTP/1.0, Pragma was defined as an extensible field for -/// > implementation-specified directives for recipients. This -/// > specification deprecates such extensions to improve interoperability. -/// -/// Spec: https://tools.ietf.org/html/rfc7234#section-5.4 -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Pragma}; -/// -/// let mut headers = Headers::new(); -/// headers.set(Pragma::NoCache); -/// ``` -/// ``` -/// use hyper::header::{Headers, Pragma}; -/// -/// let mut headers = Headers::new(); -/// headers.set(Pragma::Ext("foobar".to_owned())); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub enum Pragma { - /// Corresponds to the `no-cache` value. - NoCache, - /// Every value other than `no-cache`. - Ext(String), -} - -impl Header for Pragma { - fn header_name() -> &'static str { - "Pragma" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - parsing::from_one_raw_str(raw).and_then(|s: String| { - let slice = &s.to_ascii_lowercase()[..]; - match slice { - "no-cache" => Ok(Pragma::NoCache), - _ => Ok(Pragma::Ext(s)), - } - }) - } -} - -impl HeaderFormat for Pragma { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for Pragma { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - Pragma::NoCache => "no-cache", - Pragma::Ext(ref string) => &string[..], - }) - } -} - -#[test] -fn test_parse_header() { - let a: Pragma = Header::parse_header([b"no-cache".to_vec()].as_ref()).unwrap(); - let b = Pragma::NoCache; - assert_eq!(a, b); - let c: Pragma = Header::parse_header([b"FoObar".to_vec()].as_ref()).unwrap(); - let d = Pragma::Ext("FoObar".to_owned()); - assert_eq!(c, d); - let e: ::Result = Header::parse_header([b"".to_vec()].as_ref()); - assert_eq!(e.ok(), None); -} diff --git a/third_party/rust/hyper/src/header/common/prefer.rs b/third_party/rust/hyper/src/header/common/prefer.rs deleted file mode 100644 index a72effa09b18..000000000000 --- a/third_party/rust/hyper/src/header/common/prefer.rs +++ /dev/null @@ -1,209 +0,0 @@ -use std::fmt; -use std::str::FromStr; -use header::{Header, HeaderFormat}; -use header::parsing::{from_comma_delimited, fmt_comma_delimited}; - -/// `Prefer` header, defined in [RFC7240](http://tools.ietf.org/html/rfc7240) -/// -/// The `Prefer` header field is HTTP header field that can be used by a -/// client to request that certain behaviors be employed by a server -/// while processing a request. -/// -/// # ABNF -/// ```plain -/// Prefer = "Prefer" ":" 1#preference -/// preference = token [ BWS "=" BWS word ] -/// *( OWS ";" [ OWS parameter ] ) -/// parameter = token [ BWS "=" BWS word ] -/// ``` -/// -/// # Example values -/// * `respond-async` -/// * `return=minimal` -/// * `wait=30` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Prefer, Preference}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Prefer(vec![Preference::RespondAsync]) -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, Prefer, Preference}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// Prefer(vec![ -/// Preference::RespondAsync, -/// Preference::ReturnRepresentation, -/// Preference::Wait(10u32), -/// Preference::Extension("foo".to_owned(), -/// "bar".to_owned(), -/// vec![]), -/// ]) -/// ); -/// ``` -#[derive(PartialEq, Clone, Debug)] -pub struct Prefer(pub Vec); - -__hyper__deref!(Prefer => Vec); - -impl Header for Prefer { - fn header_name() -> &'static str { - "Prefer" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - let preferences = try!(from_comma_delimited(raw)); - if !preferences.is_empty() { - Ok(Prefer(preferences)) - } else { - Err(::Error::Header) - } - } -} - -impl HeaderFormat for Prefer { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for Prefer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt_comma_delimited(f, &self[..]) - } -} - -/// Prefer contains a list of these preferences. -#[derive(PartialEq, Clone, Debug)] -pub enum Preference { - /// "respond-async" - RespondAsync, - /// "return=representation" - ReturnRepresentation, - /// "return=minimal" - ReturnMinimal, - /// "handling=strict" - HandlingStrict, - /// "handling=leniant" - HandlingLeniant, - /// "wait=delta" - Wait(u32), - - /// Extension preferences. Always has a value, if none is specified it is - /// just "". A preference can also have a list of parameters. - Extension(String, String, Vec<(String, String)>) -} - -impl fmt::Display for Preference { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Preference::*; - fmt::Display::fmt(match *self { - RespondAsync => "respond-async", - ReturnRepresentation => "return=representation", - ReturnMinimal => "return=minimal", - HandlingStrict => "handling=strict", - HandlingLeniant => "handling=leniant", - - Wait(secs) => return write!(f, "wait={}", secs), - - Extension(ref name, ref value, ref params) => { - try!(write!(f, "{}", name)); - if value != "" { try!(write!(f, "={}", value)); } - if params.len() > 0 { - for &(ref name, ref value) in params { - try!(write!(f, "; {}", name)); - if value != "" { try!(write!(f, "={}", value)); } - } - } - return Ok(()); - } - }, f) - } -} - -impl FromStr for Preference { - type Err = Option<::Err>; - fn from_str(s: &str) -> Result::Err>> { - use self::Preference::*; - let mut params = s.split(';').map(|p| { - let mut param = p.splitn(2, '='); - match (param.next(), param.next()) { - (Some(name), Some(value)) => (name.trim(), value.trim().trim_matches('"')), - (Some(name), None) => (name.trim(), ""), - // This can safely be unreachable because the [`splitn`][1] - // function (used above) will always have at least one value. - // - // [1]: http://doc.rust-lang.org/std/primitive.str.html#method.splitn - _ => { unreachable!(); } - } - }); - match params.nth(0) { - Some(param) => { - let rest: Vec<(String, String)> = params.map(|(l, r)| (l.to_owned(), r.to_owned())).collect(); - match param { - ("respond-async", "") => if rest.len() == 0 { Ok(RespondAsync) } else { Err(None) }, - ("return", "representation") => if rest.len() == 0 { Ok(ReturnRepresentation) } else { Err(None) }, - ("return", "minimal") => if rest.len() == 0 { Ok(ReturnMinimal) } else { Err(None) }, - ("handling", "strict") => if rest.len() == 0 { Ok(HandlingStrict) } else { Err(None) }, - ("handling", "leniant") => if rest.len() == 0 { Ok(HandlingLeniant) } else { Err(None) }, - ("wait", secs) => if rest.len() == 0 { secs.parse().map(Wait).map_err(Some) } else { Err(None) }, - (left, right) => Ok(Extension(left.to_owned(), right.to_owned(), rest)) - } - }, - None => Err(None) - } - } -} - -#[cfg(test)] -mod tests { - use header::Header; - use super::*; - - #[test] - fn test_parse_multiple_headers() { - let prefer = Header::parse_header(&[b"respond-async, return=representation".to_vec()]); - assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::RespondAsync, - Preference::ReturnRepresentation]))) - } - - #[test] - fn test_parse_argument() { - let prefer = Header::parse_header(&[b"wait=100, handling=leniant, respond-async".to_vec()]); - assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::Wait(100), - Preference::HandlingLeniant, - Preference::RespondAsync]))) - } - - #[test] - fn test_parse_quote_form() { - let prefer = Header::parse_header(&[b"wait=\"200\", handling=\"strict\"".to_vec()]); - assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::Wait(200), - Preference::HandlingStrict]))) - } - - #[test] - fn test_parse_extension() { - let prefer = Header::parse_header(&[b"foo, bar=baz, baz; foo; bar=baz, bux=\"\"; foo=\"\", buz=\"some parameter\"".to_vec()]); - assert_eq!(prefer.ok(), Some(Prefer(vec![ - Preference::Extension("foo".to_owned(), "".to_owned(), vec![]), - Preference::Extension("bar".to_owned(), "baz".to_owned(), vec![]), - Preference::Extension("baz".to_owned(), "".to_owned(), vec![("foo".to_owned(), "".to_owned()), ("bar".to_owned(), "baz".to_owned())]), - Preference::Extension("bux".to_owned(), "".to_owned(), vec![("foo".to_owned(), "".to_owned())]), - Preference::Extension("buz".to_owned(), "some parameter".to_owned(), vec![])]))) - } - - #[test] - fn test_fail_with_args() { - let prefer: ::Result = Header::parse_header(&[b"respond-async; foo=bar".to_vec()]); - assert_eq!(prefer.ok(), None); - } -} - -bench_header!(normal, - Prefer, { vec![b"respond-async, return=representation".to_vec(), b"wait=100".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/preference_applied.rs b/third_party/rust/hyper/src/header/common/preference_applied.rs deleted file mode 100644 index a6b789fa3d7e..000000000000 --- a/third_party/rust/hyper/src/header/common/preference_applied.rs +++ /dev/null @@ -1,107 +0,0 @@ -use std::fmt; -use header::{Header, HeaderFormat, Preference}; -use header::parsing::{from_comma_delimited, fmt_comma_delimited}; - -/// `Preference-Applied` header, defined in [RFC7240](http://tools.ietf.org/html/rfc7240) -/// -/// The `Preference-Applied` response header may be included within a -/// response message as an indication as to which `Prefer` header tokens were -/// honored by the server and applied to the processing of a request. -/// -/// # ABNF -/// ```plain -/// Preference-Applied = "Preference-Applied" ":" 1#applied-pref -/// applied-pref = token [ BWS "=" BWS word ] -/// ``` -/// -/// # Example values -/// * `respond-async` -/// * `return=minimal` -/// * `wait=30` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, PreferenceApplied, Preference}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// PreferenceApplied(vec![Preference::RespondAsync]) -/// ); -/// ``` -/// ``` -/// use hyper::header::{Headers, PreferenceApplied, Preference}; -/// -/// let mut headers = Headers::new(); -/// headers.set( -/// PreferenceApplied(vec![ -/// Preference::RespondAsync, -/// Preference::ReturnRepresentation, -/// Preference::Wait(10u32), -/// Preference::Extension("foo".to_owned(), -/// "bar".to_owned(), -/// vec![]), -/// ]) -/// ); -/// ``` -#[derive(PartialEq, Clone, Debug)] -pub struct PreferenceApplied(pub Vec); - -__hyper__deref!(PreferenceApplied => Vec); - -impl Header for PreferenceApplied { - fn header_name() -> &'static str { - "Preference-Applied" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - let preferences = try!(from_comma_delimited(raw)); - if !preferences.is_empty() { - Ok(PreferenceApplied(preferences)) - } else { - Err(::Error::Header) - } - } -} - -impl HeaderFormat for PreferenceApplied { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for PreferenceApplied { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - //TODO: format this without allocating a Vec and cloning contents - let preferences: Vec<_> = self.0.iter().map(|pref| match pref { - // The spec ignores parameters in `Preferences-Applied` - &Preference::Extension(ref name, ref value, _) => Preference::Extension( - name.to_owned(), - value.to_owned(), - vec![] - ), - preference @ _ => preference.clone() - }).collect(); - fmt_comma_delimited(f, &preferences) - } -} - -#[cfg(test)] -mod tests { - use header::{HeaderFormat, Preference}; - use super::*; - - #[test] - fn test_format_ignore_parameters() { - assert_eq!( - format!("{}", &PreferenceApplied(vec![Preference::Extension( - "foo".to_owned(), - "bar".to_owned(), - vec![("bar".to_owned(), "foo".to_owned()), ("buz".to_owned(), "".to_owned())] - )]) as &(HeaderFormat + Send + Sync)), - "foo=bar".to_owned() - ); - } -} - -bench_header!(normal, - PreferenceApplied, { vec![b"respond-async, return=representation".to_vec(), b"wait=100".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/range.rs b/third_party/rust/hyper/src/header/common/range.rs deleted file mode 100644 index 4eea1bcc46ef..000000000000 --- a/third_party/rust/hyper/src/header/common/range.rs +++ /dev/null @@ -1,288 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; - -use header::{Header, HeaderFormat}; -use header::parsing::{from_one_raw_str, from_comma_delimited}; - -/// `Range` header, defined in [RFC7233](https://tools.ietf.org/html/rfc7233#section-3.1) -/// -/// The "Range" header field on a GET request modifies the method -/// semantics to request transfer of only one or more subranges of the -/// selected representation data, rather than the entire selected -/// representation data. -/// -/// # ABNF -/// ```plain -/// Range = byte-ranges-specifier / other-ranges-specifier -/// other-ranges-specifier = other-range-unit "=" other-range-set -/// other-range-set = 1*VCHAR -/// -/// bytes-unit = "bytes" -/// -/// byte-ranges-specifier = bytes-unit "=" byte-range-set -/// byte-range-set = 1#(byte-range-spec / suffix-byte-range-spec) -/// byte-range-spec = first-byte-pos "-" [last-byte-pos] -/// first-byte-pos = 1*DIGIT -/// last-byte-pos = 1*DIGIT -/// ``` -/// -/// # Example values -/// * `bytes=1000-` -/// * `bytes=-2000` -/// * `bytes=0-1,30-40` -/// * `bytes=0-10,20-90,-100` -/// * `custom_unit=0-123` -/// * `custom_unit=xxx-yyy` -/// -/// # Examples -/// ``` -/// use hyper::header::{Headers, Range, ByteRangeSpec}; -/// -/// let mut headers = Headers::new(); -/// headers.set(Range::Bytes( -/// vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::AllFrom(200)] -/// )); -/// -/// headers.clear(); -/// headers.set(Range::Unregistered("letters".to_owned(), "a-f".to_owned())); -/// ``` -/// ``` -/// use hyper::header::{Headers, Range}; -/// -/// let mut headers = Headers::new(); -/// headers.set(Range::bytes(1, 100)); -/// -/// headers.clear(); -/// headers.set(Range::bytes_multi(vec![(1, 100), (200, 300)])); -/// ``` -#[derive(PartialEq, Clone, Debug)] -pub enum Range { - /// Byte range - Bytes(Vec), - /// Custom range, with unit not registered at IANA - /// (`other-range-unit`: String , `other-range-set`: String) - Unregistered(String, String) -} - -/// Each `Range::Bytes` header can contain one or more `ByteRangeSpecs`. -/// Each `ByteRangeSpec` defines a range of bytes to fetch -#[derive(PartialEq, Clone, Debug)] -pub enum ByteRangeSpec { - /// Get all bytes between x and y ("x-y") - FromTo(u64, u64), - /// Get all bytes starting from x ("x-") - AllFrom(u64), - /// Get last x bytes ("-x") - Last(u64) -} - -impl Range { - /// Get the most common byte range header ("bytes=from-to") - pub fn bytes(from: u64, to: u64) -> Range { - Range::Bytes(vec![ByteRangeSpec::FromTo(from, to)]) - } - - /// Get byte range header with multiple subranges - /// ("bytes=from1-to1,from2-to2,fromX-toX") - pub fn bytes_multi(ranges: Vec<(u64, u64)>) -> Range { - Range::Bytes(ranges.iter().map(|r| ByteRangeSpec::FromTo(r.0, r.1)).collect()) - } -} - - -impl fmt::Display for ByteRangeSpec { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ByteRangeSpec::FromTo(from, to) => write!(f, "{}-{}", from, to), - ByteRangeSpec::Last(pos) => write!(f, "-{}", pos), - ByteRangeSpec::AllFrom(pos) => write!(f, "{}-", pos), - } - } -} - - -impl fmt::Display for Range { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Range::Bytes(ref ranges) => { - try!(write!(f, "bytes=")); - - for (i, range) in ranges.iter().enumerate() { - if i != 0 { - try!(f.write_str(",")); - } - try!(Display::fmt(range, f)); - } - Ok(()) - }, - Range::Unregistered(ref unit, ref range_str) => { - write!(f, "{}={}", unit, range_str) - }, - } - } -} - -impl FromStr for Range { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - let mut iter = s.splitn(2, "="); - - match (iter.next(), iter.next()) { - (Some("bytes"), Some(ranges)) => { - match from_comma_delimited(&[ranges]) { - Ok(ranges) => { - if ranges.is_empty() { - return Err(::Error::Header); - } - Ok(Range::Bytes(ranges)) - }, - Err(_) => Err(::Error::Header) - } - } - (Some(unit), Some(range_str)) if unit != "" && range_str != "" => { - Ok(Range::Unregistered(unit.to_owned(), range_str.to_owned())) - - }, - _ => Err(::Error::Header) - } - } -} - -impl FromStr for ByteRangeSpec { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - let mut parts = s.splitn(2, "-"); - - match (parts.next(), parts.next()) { - (Some(""), Some(end)) => { - end.parse().or(Err(::Error::Header)).map(ByteRangeSpec::Last) - }, - (Some(start), Some("")) => { - start.parse().or(Err(::Error::Header)).map(ByteRangeSpec::AllFrom) - }, - (Some(start), Some(end)) => { - match (start.parse(), end.parse()) { - (Ok(start), Ok(end)) if start <= end => Ok(ByteRangeSpec::FromTo(start, end)), - _ => Err(::Error::Header) - } - }, - _ => Err(::Error::Header) - } - } -} - -impl Header for Range { - - fn header_name() -> &'static str { - "Range" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - from_one_raw_str(raw) - } -} - -impl HeaderFormat for Range { - - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(self, f) - } - -} - -#[test] -fn test_parse_bytes_range_valid() { - let r: Range = Header::parse_header(&[b"bytes=1-100".to_vec()]).unwrap(); - let r2: Range = Header::parse_header(&[b"bytes=1-100,-".to_vec()]).unwrap(); - let r3 = Range::bytes(1, 100); - assert_eq!(r, r2); - assert_eq!(r2, r3); - - let r: Range = Header::parse_header(&[b"bytes=1-100,200-".to_vec()]).unwrap(); - let r2: Range = Header::parse_header(&[b"bytes= 1-100 , 101-xxx, 200- ".to_vec()]).unwrap(); - let r3 = Range::Bytes( - vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::AllFrom(200)] - ); - assert_eq!(r, r2); - assert_eq!(r2, r3); - - let r: Range = Header::parse_header(&[b"bytes=1-100,-100".to_vec()]).unwrap(); - let r2: Range = Header::parse_header(&[b"bytes=1-100, ,,-100".to_vec()]).unwrap(); - let r3 = Range::Bytes( - vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::Last(100)] - ); - assert_eq!(r, r2); - assert_eq!(r2, r3); - - let r: Range = Header::parse_header(&[b"custom=1-100,-100".to_vec()]).unwrap(); - let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned()); - assert_eq!(r, r2); - -} - -#[test] -fn test_parse_unregistered_range_valid() { - let r: Range = Header::parse_header(&[b"custom=1-100,-100".to_vec()]).unwrap(); - let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned()); - assert_eq!(r, r2); - - let r: Range = Header::parse_header(&[b"custom=abcd".to_vec()]).unwrap(); - let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned()); - assert_eq!(r, r2); - - let r: Range = Header::parse_header(&[b"custom=xxx-yyy".to_vec()]).unwrap(); - let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned()); - assert_eq!(r, r2); -} - -#[test] -fn test_parse_invalid() { - let r: ::Result = Header::parse_header(&[b"bytes=1-a,-".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"bytes=1-2-3".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"abc".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"bytes=1-100=".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"bytes=".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"custom=".to_vec()]); - assert_eq!(r.ok(), None); - - let r: ::Result = Header::parse_header(&[b"=1-100".to_vec()]); - assert_eq!(r.ok(), None); -} - -#[test] -fn test_fmt() { - use header::Headers; - - let mut headers = Headers::new(); - - headers.set( - Range::Bytes( - vec![ByteRangeSpec::FromTo(0, 1000), ByteRangeSpec::AllFrom(2000)] - )); - assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n"); - - headers.clear(); - headers.set(Range::Bytes(vec![])); - - assert_eq!(&headers.to_string(), "Range: bytes=\r\n"); - - headers.clear(); - headers.set(Range::Unregistered("custom".to_owned(), "1-xxx".to_owned())); - - assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n"); -} - -bench_header!(bytes_multi, Range, { vec![b"bytes=1-1001,2001-3001,10001-".to_vec()]}); -bench_header!(custom_unit, Range, { vec![b"other=0-100000".to_vec()]}); diff --git a/third_party/rust/hyper/src/header/common/referer.rs b/third_party/rust/hyper/src/header/common/referer.rs deleted file mode 100644 index 2c7bf14bf1ad..000000000000 --- a/third_party/rust/hyper/src/header/common/referer.rs +++ /dev/null @@ -1,41 +0,0 @@ -header! { - /// `Referer` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.2) - /// - /// The `Referer` [sic] header field allows the user agent to specify a - /// URI reference for the resource from which the target URI was obtained - /// (i.e., the "referrer", though the field name is misspelled). A user - /// agent MUST NOT include the fragment and userinfo components of the - /// URI reference, if any, when generating the Referer field value. - /// - /// # ABNF - /// ```plain - /// Referer = absolute-URI / partial-URI - /// ``` - /// - /// # Example values - /// * `http://www.example.org/hypertext/Overview.html` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Referer}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Referer("/People.html#tim".to_owned())); - /// ``` - /// ``` - /// use hyper::header::{Headers, Referer}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Referer("http://www.example.com/index.html".to_owned())); - /// ``` - // TODO Use URL - (Referer, "Referer") => [String] - - test_referer { - // Testcase from the RFC - test_header!(test1, vec![b"http://www.example.org/hypertext/Overview.html"]); - } -} - -bench_header!(bench, Referer, { vec![b"http://foo.com/hello:3000".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/referrer_policy.rs b/third_party/rust/hyper/src/header/common/referrer_policy.rs deleted file mode 100644 index 1091a219b0aa..000000000000 --- a/third_party/rust/hyper/src/header/common/referrer_policy.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::fmt; -use std::ascii::AsciiExt; - -use header::{Header, HeaderFormat, parsing}; - -/// `Referrer-Policy` header, part of -/// [Referrer Policy](https://www.w3.org/TR/referrer-policy/#referrer-policy-header) -/// -/// The `Referrer-Policy` HTTP header specifies the referrer -/// policy that the user agent applies when determining what -/// referrer information should be included with requests made, -/// and with browsing contexts created from the context of the -/// protected resource. -/// -/// # ABNF -/// ```plain -/// Referrer-Policy: 1#policy-token -/// policy-token = "no-referrer" / "no-referrer-when-downgrade" -/// / "same-origin" / "origin" -/// / "origin-when-cross-origin" / "unsafe-url" -/// ``` -/// -/// # Example values -/// * `no-referrer` -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, ReferrerPolicy}; -/// -/// let mut headers = Headers::new(); -/// headers.set(ReferrerPolicy::NoReferrer); -/// ``` -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum ReferrerPolicy { - /// `no-referrer` - NoReferrer, - /// `no-referrer-when-downgrade` - NoReferrerWhenDowngrade, - /// `same-origin` - SameOrigin, - /// `origin` - Origin, - /// `origin-when-cross-origin` - OriginWhenCrossOrigin, - /// `unsafe-url` - UnsafeUrl, - /// `strict-origin` - StrictOrigin, - ///`strict-origin-when-cross-origin` - StrictOriginWhenCrossOrigin, -} - -impl Header for ReferrerPolicy { - fn header_name() -> &'static str { - static NAME: &'static str = "Referrer-Policy"; - NAME - } - - fn parse_header(raw: &[Vec]) -> ::Result { - use self::ReferrerPolicy::*; - // See https://www.w3.org/TR/referrer-policy/#determine-policy-for-token - let headers: Vec = try!(parsing::from_comma_delimited(raw)); - - for h in headers.iter().rev() { - let slice = &h.to_ascii_lowercase()[..]; - match slice { - "no-referrer" | "never" => return Ok(NoReferrer), - "no-referrer-when-downgrade" | "default" => return Ok(NoReferrerWhenDowngrade), - "same-origin" => return Ok(SameOrigin), - "origin" => return Ok(Origin), - "origin-when-cross-origin" => return Ok(OriginWhenCrossOrigin), - "strict-origin" => return Ok(StrictOrigin), - "strict-origin-when-cross-origin" => return Ok(StrictOriginWhenCrossOrigin), - "unsafe-url" | "always" => return Ok(UnsafeUrl), - _ => continue, - } - } - - Err(::Error::Header) - } -} - -impl HeaderFormat for ReferrerPolicy { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for ReferrerPolicy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::ReferrerPolicy::*; - f.write_str(match *self { - NoReferrer => "no-referrer", - NoReferrerWhenDowngrade => "no-referrer-when-downgrade", - SameOrigin => "same-origin", - Origin => "origin", - OriginWhenCrossOrigin => "origin-when-cross-origin", - StrictOrigin => "strict-origin", - StrictOriginWhenCrossOrigin => "strict-origin-when-cross-origin", - UnsafeUrl => "unsafe-url", - }) - } -} - -#[test] -fn test_parse_header() { - let a: ReferrerPolicy = Header::parse_header([b"origin".to_vec()].as_ref()).unwrap(); - let b = ReferrerPolicy::Origin; - assert_eq!(a, b); - let e: ::Result = Header::parse_header([b"foobar".to_vec()].as_ref()); - assert!(e.is_err()); -} - -#[test] -fn test_rightmost_header() { - let a: ReferrerPolicy = Header::parse_header(&["same-origin, origin, foobar".into()]).unwrap(); - let b = ReferrerPolicy::Origin; - assert_eq!(a, b); -} diff --git a/third_party/rust/hyper/src/header/common/server.rs b/third_party/rust/hyper/src/header/common/server.rs deleted file mode 100644 index 8b6c90837fd8..000000000000 --- a/third_party/rust/hyper/src/header/common/server.rs +++ /dev/null @@ -1,36 +0,0 @@ -header! { - /// `Server` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.2) - /// - /// The `Server` header field contains information about the software - /// used by the origin server to handle the request, which is often used - /// by clients to help identify the scope of reported interoperability - /// problems, to work around or tailor requests to avoid particular - /// server limitations, and for analytics regarding server or operating - /// system use. An origin server MAY generate a Server field in its - /// responses. - /// - /// # ABNF - /// ```plain - /// Server = product *( RWS ( product / comment ) ) - /// ``` - /// - /// # Example values - /// * `CERN/3.0 libwww/2.17` - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, Server}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Server("hyper/0.5.2".to_owned())); - /// ``` - // TODO: Maybe parse as defined in the spec? - (Server, "Server") => [String] - - test_server { - // Testcase from RFC - test_header!(test1, vec![b"CERN/3.0 libwww/2.17"]); - } -} - -bench_header!(bench, Server, { vec![b"Some String".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/set_cookie.rs b/third_party/rust/hyper/src/header/common/set_cookie.rs deleted file mode 100644 index 6848786a7adc..000000000000 --- a/third_party/rust/hyper/src/header/common/set_cookie.rs +++ /dev/null @@ -1,121 +0,0 @@ -use header::{Header, HeaderFormat}; -use std::fmt::{self}; -use std::str::from_utf8; - - -/// `Set-Cookie` header, defined [RFC6265](http://tools.ietf.org/html/rfc6265#section-4.1) -/// -/// The Set-Cookie HTTP response header is used to send cookies from the -/// server to the user agent. -/// -/// Informally, the Set-Cookie response header contains the header name -/// "Set-Cookie" followed by a ":" and a cookie. Each cookie begins with -/// a name-value-pair, followed by zero or more attribute-value pairs. -/// -/// # ABNF -/// ```plain -/// set-cookie-header = "Set-Cookie:" SP set-cookie-string -/// set-cookie-string = cookie-pair *( ";" SP cookie-av ) -/// cookie-pair = cookie-name "=" cookie-value -/// cookie-name = token -/// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) -/// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E -/// ; US-ASCII characters excluding CTLs, -/// ; whitespace DQUOTE, comma, semicolon, -/// ; and backslash -/// token = -/// -/// cookie-av = expires-av / max-age-av / domain-av / -/// path-av / secure-av / httponly-av / -/// extension-av -/// expires-av = "Expires=" sane-cookie-date -/// sane-cookie-date = -/// max-age-av = "Max-Age=" non-zero-digit *DIGIT -/// ; In practice, both expires-av and max-age-av -/// ; are limited to dates representable by the -/// ; user agent. -/// non-zero-digit = %x31-39 -/// ; digits 1 through 9 -/// domain-av = "Domain=" domain-value -/// domain-value = -/// ; defined in [RFC1034], Section 3.5, as -/// ; enhanced by [RFC1123], Section 2.1 -/// path-av = "Path=" path-value -/// path-value = -/// secure-av = "Secure" -/// httponly-av = "HttpOnly" -/// extension-av = -/// ``` -/// -/// # Example values -/// * `SID=31d4d96e407aad42` -/// * `lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT` -/// * `lang=; Expires=Sun, 06 Nov 1994 08:49:37 GMT` -/// * `lang=en-US; Path=/; Domain=example.com` -/// -/// # Example -/// ``` -/// use hyper::header::{Headers, SetCookie}; -/// -/// let mut headers = Headers::new(); -/// -/// headers.set( -/// SetCookie(vec![ -/// String::from("foo=bar; Path=/path; Domain=example.com") -/// ]) -/// ); -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct SetCookie(pub Vec); - -__hyper__deref!(SetCookie => Vec); - -impl Header for SetCookie { - fn header_name() -> &'static str { - "Set-Cookie" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - let mut set_cookies = Vec::with_capacity(raw.len()); - for set_cookies_raw in raw { - if let Ok(s) = from_utf8(&set_cookies_raw[..]) { - set_cookies.push(s.trim().to_owned()); - } - } - - if !set_cookies.is_empty() { - Ok(SetCookie(set_cookies)) - } else { - Err(::Error::Header) - } - } - -} - -impl HeaderFormat for SetCookie { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.0.len() == 1 { - write!(f, "{}", &self.0[0]) - } else { - panic!("SetCookie with multiple cookies cannot be used with fmt_header, must use fmt_multi_header"); - } - } - - fn fmt_multi_header(&self, f: &mut ::header::MultilineFormatter) -> fmt::Result { - for cookie in &self.0 { - try!(f.fmt_line(cookie)); - } - Ok(()) - } -} - -#[test] -fn test_set_cookie_fmt() { - use ::header::Headers; - let mut headers = Headers::new(); - headers.set(SetCookie(vec![ - "foo=bar".into(), - "baz=quux".into(), - ])); - assert_eq!(headers.to_string(), "Set-Cookie: foo=bar\r\nSet-Cookie: baz=quux\r\n"); -} diff --git a/third_party/rust/hyper/src/header/common/strict_transport_security.rs b/third_party/rust/hyper/src/header/common/strict_transport_security.rs deleted file mode 100644 index e80503a3ce5f..000000000000 --- a/third_party/rust/hyper/src/header/common/strict_transport_security.rs +++ /dev/null @@ -1,201 +0,0 @@ -use std::fmt; -use std::str::{self, FromStr}; - -use unicase::UniCase; - -use header::{Header, HeaderFormat, parsing}; - -/// `StrictTransportSecurity` header, defined in [RFC6797](https://tools.ietf.org/html/rfc6797) -/// -/// This specification defines a mechanism enabling web sites to declare -/// themselves accessible only via secure connections and/or for users to be -/// able to direct their user agent(s) to interact with given sites only over -/// secure connections. This overall policy is referred to as HTTP Strict -/// Transport Security (HSTS). The policy is declared by web sites via the -/// Strict-Transport-Security HTTP response header field and/or by other means, -/// such as user agent configuration, for example. -/// -/// # ABNF -/// -/// ```plain -/// [ directive ] *( ";" [ directive ] ) -/// -/// directive = directive-name [ "=" directive-value ] -/// directive-name = token -/// directive-value = token | quoted-string -/// -/// ``` -/// -/// # Example values -/// * `max-age=31536000` -/// * `max-age=15768000 ; includeSubDomains` -/// -/// # Example -/// ``` -/// # extern crate hyper; -/// # fn main() { -/// use hyper::header::{Headers, StrictTransportSecurity}; -/// -/// let mut headers = Headers::new(); -/// -/// headers.set( -/// StrictTransportSecurity::including_subdomains(31536000u64) -/// ); -/// # } -/// ``` -#[derive(Clone, PartialEq, Debug)] -pub struct StrictTransportSecurity { - /// Signals the UA that the HSTS Policy applies to this HSTS Host as well as - /// any subdomains of the host's domain name. - pub include_subdomains: bool, - - /// Specifies the number of seconds, after the reception of the STS header - /// field, during which the UA regards the host (from whom the message was - /// received) as a Known HSTS Host. - pub max_age: u64 -} - -impl StrictTransportSecurity { - /// Create an STS header that includes subdomains - pub fn including_subdomains(max_age: u64) -> StrictTransportSecurity { - StrictTransportSecurity { - max_age: max_age, - include_subdomains: true - } - } - - /// Create an STS header that excludes subdomains - pub fn excluding_subdomains(max_age: u64) -> StrictTransportSecurity { - StrictTransportSecurity { - max_age: max_age, - include_subdomains: false - } - } -} - -enum Directive { - MaxAge(u64), - IncludeSubdomains, - Unknown -} - -impl FromStr for StrictTransportSecurity { - type Err = ::Error; - - fn from_str(s: &str) -> ::Result { - s.split(';') - .map(str::trim) - .map(|sub| if UniCase(sub) == UniCase("includeSubdomains") { - Ok(Directive::IncludeSubdomains) - } else { - let mut sub = sub.splitn(2, '='); - match (sub.next(), sub.next()) { - (Some(left), Some(right)) - if UniCase(left.trim()) == UniCase("max-age") => { - right - .trim() - .trim_matches('"') - .parse() - .map(Directive::MaxAge) - }, - _ => Ok(Directive::Unknown) - } - }) - .fold(Ok((None, None)), |res, dir| match (res, dir) { - (Ok((None, sub)), Ok(Directive::MaxAge(age))) => Ok((Some(age), sub)), - (Ok((age, None)), Ok(Directive::IncludeSubdomains)) => Ok((age, Some(()))), - (Ok((Some(_), _)), Ok(Directive::MaxAge(_))) => Err(::Error::Header), - (Ok((_, Some(_))), Ok(Directive::IncludeSubdomains)) => Err(::Error::Header), - (_, Err(_)) => Err(::Error::Header), - (res, _) => res - }) - .and_then(|res| match res { - (Some(age), sub) => Ok(StrictTransportSecurity { - max_age: age, - include_subdomains: sub.is_some() - }), - _ => Err(::Error::Header) - }) - } -} - -impl Header for StrictTransportSecurity { - fn header_name() -> &'static str { - "Strict-Transport-Security" - } - - fn parse_header(raw: &[Vec]) -> ::Result { - parsing::from_one_raw_str(raw) - } -} - -impl HeaderFormat for StrictTransportSecurity { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for StrictTransportSecurity { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.include_subdomains { - write!(f, "max-age={}; includeSubdomains", self.max_age) - } else { - write!(f, "max-age={}", self.max_age) - } - } -} - -#[cfg(test)] -mod tests { - use super::StrictTransportSecurity; - use header::Header; - - #[test] - fn test_parse_max_age() { - let h = Header::parse_header(&[b"max-age=31536000".to_vec()][..]); - assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); - } - - #[test] - fn test_parse_max_age_no_value() { - let h: ::Result = Header::parse_header(&[b"max-age".to_vec()][..]); - assert!(h.is_err()); - } - - #[test] - fn test_parse_quoted_max_age() { - let h = Header::parse_header(&[b"max-age=\"31536000\"".to_vec()][..]); - assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); - } - - #[test] - fn test_parse_spaces_max_age() { - let h = Header::parse_header(&[b"max-age = 31536000".to_vec()][..]); - assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); - } - - #[test] - fn test_parse_include_subdomains() { - let h = Header::parse_header(&[b"max-age=15768000 ; includeSubDomains".to_vec()][..]); - assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: true, max_age: 15768000u64 })); - } - - #[test] - fn test_parse_no_max_age() { - let h: ::Result = Header::parse_header(&[b"includeSubDomains".to_vec()][..]); - assert!(h.is_err()); - } - - #[test] - fn test_parse_max_age_nan() { - let h: ::Result = Header::parse_header(&[b"max-age = derp".to_vec()][..]); - assert!(h.is_err()); - } - - #[test] - fn test_parse_duplicate_directives() { - assert!(StrictTransportSecurity::parse_header(&[b"max-age=100; max-age=5; max-age=0".to_vec()][..]).is_err()); - } -} - -bench_header!(bench, StrictTransportSecurity, { vec![b"max-age=15768000 ; includeSubDomains".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/transfer_encoding.rs b/third_party/rust/hyper/src/header/common/transfer_encoding.rs deleted file mode 100644 index 13ae54a34c89..000000000000 --- a/third_party/rust/hyper/src/header/common/transfer_encoding.rs +++ /dev/null @@ -1,53 +0,0 @@ -use header::Encoding; - -header! { - /// `Transfer-Encoding` header, defined in - /// [RFC7230](http://tools.ietf.org/html/rfc7230#section-3.3.1) - /// - /// The `Transfer-Encoding` header field lists the transfer coding names - /// corresponding to the sequence of transfer codings that have been (or - /// will be) applied to the payload body in order to form the message - /// body. - /// - /// # ABNF - /// ```plain - /// Transfer-Encoding = 1#transfer-coding - /// ``` - /// - /// # Example values - /// * `gzip, chunked` - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, TransferEncoding, Encoding}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// TransferEncoding(vec![ - /// Encoding::Gzip, - /// Encoding::Chunked, - /// ]) - /// ); - /// ``` - (TransferEncoding, "Transfer-Encoding") => (Encoding)+ - - transfer_encoding { - test_header!( - test1, - vec![b"gzip, chunked"], - Some(HeaderField( - vec![Encoding::Gzip, Encoding::Chunked] - ))); - // Issue: #683 - test_header!( - test2, - vec![b"chunked", b"chunked"], - Some(HeaderField( - vec![Encoding::Chunked, Encoding::Chunked] - ))); - - } -} - -bench_header!(normal, TransferEncoding, { vec![b"chunked, gzip".to_vec()] }); -bench_header!(ext, TransferEncoding, { vec![b"ext".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/upgrade.rs b/third_party/rust/hyper/src/header/common/upgrade.rs deleted file mode 100644 index 7f2a6132106f..000000000000 --- a/third_party/rust/hyper/src/header/common/upgrade.rs +++ /dev/null @@ -1,158 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; -use unicase::UniCase; - -header! { - /// `Upgrade` header, defined in [RFC7230](http://tools.ietf.org/html/rfc7230#section-6.7) - /// - /// The `Upgrade` header field is intended to provide a simple mechanism - /// for transitioning from HTTP/1.1 to some other protocol on the same - /// connection. A client MAY send a list of protocols in the Upgrade - /// header field of a request to invite the server to switch to one or - /// more of those protocols, in order of descending preference, before - /// sending the final response. A server MAY ignore a received Upgrade - /// header field if it wishes to continue using the current protocol on - /// that connection. Upgrade cannot be used to insist on a protocol - /// change. - /// - /// # ABNF - /// ```plain - /// Upgrade = 1#protocol - /// - /// protocol = protocol-name ["/" protocol-version] - /// protocol-name = token - /// protocol-version = token - /// ``` - /// - /// # Example values - /// * `HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11` - /// - /// # Examples - /// ``` - /// use hyper::header::{Headers, Upgrade, Protocol, ProtocolName}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)])); - /// ``` - /// ``` - /// use hyper::header::{Headers, Upgrade, Protocol, ProtocolName}; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Upgrade(vec![ - /// Protocol::new(ProtocolName::Http, Some("2.0".to_owned())), - /// Protocol::new(ProtocolName::Unregistered("SHTTP".to_owned()), - /// Some("1.3".to_owned())), - /// Protocol::new(ProtocolName::Unregistered("IRC".to_owned()), - /// Some("6.9".to_owned())), - /// ]) - /// ); - /// ``` - (Upgrade, "Upgrade") => (Protocol)+ - - test_upgrade { - // Testcase from the RFC - test_header!( - test1, - vec![b"HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11"], - Some(Upgrade(vec![ - Protocol::new(ProtocolName::Http, Some("2.0".to_owned())), - Protocol::new(ProtocolName::Unregistered("SHTTP".to_owned()), - Some("1.3".to_owned())), - Protocol::new(ProtocolName::Unregistered("IRC".to_owned()), Some("6.9".to_owned())), - Protocol::new(ProtocolName::Unregistered("RTA".to_owned()), Some("x11".to_owned())), - ]))); - // Own tests - test_header!( - test2, vec![b"websocket"], - Some(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)]))); - #[test] - fn test3() { - let x: ::Result = Header::parse_header(&[b"WEbSOCKet".to_vec()]); - assert_eq!(x.ok(), Some(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)]))); - } - } -} - -/// A protocol name used to identify a spefic protocol. Names are case-sensitive -/// except for the `WebSocket` value. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ProtocolName { - /// `HTTP` value, Hypertext Transfer Protocol - Http, - /// `TLS` value, Transport Layer Security [RFC2817](http://tools.ietf.org/html/rfc2817) - Tls, - /// `WebSocket` value, matched case insensitively,Web Socket Protocol - /// [RFC6455](http://tools.ietf.org/html/rfc6455) - WebSocket, - /// `h2c` value, HTTP/2 over cleartext TCP - H2c, - /// Any other protocol name not known to hyper - Unregistered(String), -} - -impl FromStr for ProtocolName { - type Err = (); - fn from_str(s: &str) -> Result { - Ok(match s { - "HTTP" => ProtocolName::Http, - "TLS" => ProtocolName::Tls, - "h2c" => ProtocolName::H2c, - _ => { - if UniCase(s) == UniCase("websocket") { - ProtocolName::WebSocket - } else { - ProtocolName::Unregistered(s.to_owned()) - } - } - }) - } -} - -impl Display for ProtocolName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - ProtocolName::Http => "HTTP", - ProtocolName::Tls => "TLS", - ProtocolName::WebSocket => "websocket", - ProtocolName::H2c => "h2c", - ProtocolName::Unregistered(ref s) => s, - }) - } -} - -/// Protocols that appear in the `Upgrade` header field -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct Protocol { - /// The protocol identifier - pub name: ProtocolName, - /// The optional version of the protocol, often in the format "DIGIT.DIGIT" (e.g.. "1.2") - pub version: Option, -} - -impl Protocol { - /// Creates a new Protocol with the given name and version - pub fn new(name: ProtocolName, version: Option) -> Protocol { - Protocol { name: name, version: version } - } -} - -impl FromStr for Protocol { - type Err =(); - fn from_str(s: &str) -> Result { - let mut parts = s.splitn(2, '/'); - Ok(Protocol::new(try!(parts.next().unwrap().parse()), parts.next().map(|x| x.to_owned()))) - } -} - -impl Display for Protocol { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(fmt::Display::fmt(&self.name, f)); - if let Some(ref version) = self.version { - try!(write!(f, "/{}", version)); - } - Ok(()) - } -} - -bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] }); diff --git a/third_party/rust/hyper/src/header/common/user_agent.rs b/third_party/rust/hyper/src/header/common/user_agent.rs deleted file mode 100644 index 90e5bd00fed3..000000000000 --- a/third_party/rust/hyper/src/header/common/user_agent.rs +++ /dev/null @@ -1,42 +0,0 @@ -header! { - /// `User-Agent` header, defined in - /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.3) - /// - /// The `User-Agent` header field contains information about the user - /// agent originating the request, which is often used by servers to help - /// identify the scope of reported interoperability problems, to work - /// around or tailor responses to avoid particular user agent - /// limitations, and for analytics regarding browser or operating system - /// use. A user agent SHOULD send a User-Agent field in each request - /// unless specifically configured not to do so. - /// - /// # ABNF - /// ```plain - /// User-Agent = product *( RWS ( product / comment ) ) - /// product = token ["/" product-version] - /// product-version = token - /// ``` - /// - /// # Example values - /// * `CERN-LineMode/2.15 libwww/2.17b3` - /// * `Bunnies` - /// - /// # Notes - /// * The parser does not split the value - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, UserAgent}; - /// - /// let mut headers = Headers::new(); - /// headers.set(UserAgent("hyper/0.5.2".to_owned())); - /// ``` - (UserAgent, "User-Agent") => [String] - - test_user_agent { - // Testcase from RFC - test_header!(test1, vec![b"CERN-LineMode/2.15 libwww/2.17b3"]); - // Own testcase - test_header!(test2, vec![b"Bunnies"], Some(UserAgent("Bunnies".to_owned()))); - } -} diff --git a/third_party/rust/hyper/src/header/common/vary.rs b/third_party/rust/hyper/src/header/common/vary.rs deleted file mode 100644 index d9113e90461a..000000000000 --- a/third_party/rust/hyper/src/header/common/vary.rs +++ /dev/null @@ -1,66 +0,0 @@ -use unicase::UniCase; - -header! { - /// `Vary` header, defined in [RFC7231](https://tools.ietf.org/html/rfc7231#section-7.1.4) - /// - /// The "Vary" header field in a response describes what parts of a - /// request message, aside from the method, Host header field, and - /// request target, might influence the origin server's process for - /// selecting and representing this response. The value consists of - /// either a single asterisk ("*") or a list of header field names - /// (case-insensitive). - /// - /// # ABNF - /// ```plain - /// Vary = "*" / 1#field-name - /// ``` - /// - /// # Example values - /// * `accept-encoding, accept-language` - /// - /// # Example - /// ``` - /// use hyper::header::{Headers, Vary}; - /// - /// let mut headers = Headers::new(); - /// headers.set(Vary::Any); - /// ``` - /// - /// # Example - /// ``` - /// # extern crate hyper; - /// # extern crate unicase; - /// # fn main() { - /// // extern crate unicase; - /// - /// use hyper::header::{Headers, Vary}; - /// use unicase::UniCase; - /// - /// let mut headers = Headers::new(); - /// headers.set( - /// Vary::Items(vec![ - /// UniCase("accept-encoding".to_owned()), - /// UniCase("accept-language".to_owned()), - /// ]) - /// ); - /// # } - /// ``` - (Vary, "Vary") => {Any / (UniCase)+} - - test_vary { - test_header!(test1, vec![b"accept-encoding, accept-language"]); - - #[test] - fn test2() { - let mut vary: ::Result; - - vary = Header::parse_header([b"*".to_vec()].as_ref()); - assert_eq!(vary.ok(), Some(Vary::Any)); - - vary = Header::parse_header([b"etag,cookie,allow".to_vec()].as_ref()); - assert_eq!(vary.ok(), Some(Vary::Items(vec!["eTag".parse().unwrap(), - "cookIE".parse().unwrap(), - "AlLOw".parse().unwrap(),]))); - } - } -} diff --git a/third_party/rust/hyper/src/header/internals/cell.rs b/third_party/rust/hyper/src/header/internals/cell.rs deleted file mode 100644 index 817a68fb1d51..000000000000 --- a/third_party/rust/hyper/src/header/internals/cell.rs +++ /dev/null @@ -1,204 +0,0 @@ -use std::any::{Any, TypeId}; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::mem; -use std::ops::Deref; - -pub struct OptCell(UnsafeCell>); - -impl OptCell { - #[inline] - pub fn new(val: Option) -> OptCell { - OptCell(UnsafeCell::new(val)) - } - - #[inline] - pub fn set(&self, val: T) { - unsafe { - let opt = self.0.get(); - debug_assert!((*opt).is_none()); - *opt = Some(val) - } - } - - #[inline] - pub unsafe fn get_mut(&mut self) -> &mut T { - let opt = &mut *self.0.get(); - opt.as_mut().unwrap() - } -} - -impl Deref for OptCell { - type Target = Option; - #[inline] - fn deref(&self) -> &Option { - unsafe { &*self.0.get() } - } -} - -impl Clone for OptCell { - #[inline] - fn clone(&self) -> OptCell { - OptCell::new((**self).clone()) - } -} - -pub struct PtrMapCell(UnsafeCell>>); - -#[derive(Clone, Debug)] -enum PtrMap { - Empty, - One(TypeId, T), - Many(HashMap) -} - -impl PtrMapCell { - #[inline] - pub fn new() -> PtrMapCell { - PtrMapCell(UnsafeCell::new(PtrMap::Empty)) - } - - #[inline] - pub fn get(&self, key: TypeId) -> Option<&V> { - let map = unsafe { &*self.0.get() }; - match *map { - PtrMap::Empty => None, - PtrMap::One(id, ref v) => if id == key { - Some(v) - } else { - None - }, - PtrMap::Many(ref hm) => hm.get(&key) - }.map(|val| &**val) - } - - #[inline] - pub fn get_mut(&mut self, key: TypeId) -> Option<&mut V> { - let map = unsafe { &mut *self.0.get() }; - match *map { - PtrMap::Empty => None, - PtrMap::One(id, ref mut v) => if id == key { - Some(v) - } else { - None - }, - PtrMap::Many(ref mut hm) => hm.get_mut(&key) - }.map(|val| &mut **val) - } - - #[inline] - pub unsafe fn insert(&self, key: TypeId, val: Box) { - let map = &mut *self.0.get(); - match *map { - PtrMap::Empty => *map = PtrMap::One(key, val), - PtrMap::One(..) => { - let one = mem::replace(map, PtrMap::Empty); - match one { - PtrMap::One(id, one) => { - debug_assert!(id != key); - let mut hm = HashMap::with_capacity(2); - hm.insert(id, one); - hm.insert(key, val); - mem::replace(map, PtrMap::Many(hm)); - }, - _ => unreachable!() - } - }, - PtrMap::Many(ref mut hm) => { hm.insert(key, val); } - } - } - - #[inline] - pub unsafe fn one(&self) -> &V { - let map = &*self.0.get(); - match *map { - PtrMap::One(_, ref one) => one, - _ => panic!("not PtrMap::One value, {:?}", *map) - } - } -} - -impl Clone for PtrMapCell where Box: Clone { - #[inline] - fn clone(&self) -> PtrMapCell { - let cell = PtrMapCell::new(); - unsafe { - *cell.0.get() = (&*self.0.get()).clone() - } - cell - } -} - -#[cfg(test)] -mod test { - use std::any::TypeId; - use super::*; - - #[test] - fn test_opt_cell_set() { - let one:OptCell = OptCell::new(None); - one.set(1); - assert_eq!(*one,Some(1)); - } - - #[test] - fn test_opt_cell_clone() { - let one:OptCell = OptCell::new(Some(3)); - let stored = *one.clone(); - assert_eq!(stored,Some(3)); - } - - - #[test] - fn test_ptr_map_cell_none() { - let type_id = TypeId::of::(); - let pm:PtrMapCell = PtrMapCell::new(); - assert_eq!(pm.get(type_id),None); - } - - #[test] - fn test_ptr_map_cell_one() { - let type_id = TypeId::of::(); - let pm:PtrMapCell = PtrMapCell::new(); - unsafe { pm.insert(type_id, Box::new("a".to_string())); } - assert_eq!(pm.get(type_id), Some(&"a".to_string())); - assert_eq!(unsafe {pm.one()}, "a"); - } - - #[test] - fn test_ptr_map_cell_two() { - let type_id = TypeId::of::(); - let type_id2 = TypeId::of::>(); - let pm:PtrMapCell = PtrMapCell::new(); - unsafe { pm.insert(type_id, Box::new("a".to_string())); } - unsafe { pm.insert(type_id2, Box::new("b".to_string())); } - assert_eq!(pm.get(type_id), Some(&"a".to_string())); - assert_eq!(pm.get(type_id2), Some(&"b".to_string())); - } - - #[test] - fn test_ptr_map_cell_many() { - let id1 = TypeId::of::(); - let id2 = TypeId::of::>(); - let id3 = TypeId::of::>(); - let pm:PtrMapCell = PtrMapCell::new(); - unsafe { pm.insert(id1, Box::new("a".to_string())); } - unsafe { pm.insert(id2, Box::new("b".to_string())); } - unsafe { pm.insert(id3, Box::new("c".to_string())); } - assert_eq!(pm.get(id1), Some(&"a".to_string())); - assert_eq!(pm.get(id2), Some(&"b".to_string())); - assert_eq!(pm.get(id3), Some(&"c".to_string())); - } - - - #[test] - fn test_ptr_map_cell_clone() { - let type_id = TypeId::of::(); - let pm:PtrMapCell = PtrMapCell::new(); - unsafe { pm.insert(type_id, Box::new("a".to_string())); } - let cloned = pm.clone(); - assert_eq!(cloned.get(type_id), Some(&"a".to_string())); - } - -} diff --git a/third_party/rust/hyper/src/header/internals/item.rs b/third_party/rust/hyper/src/header/internals/item.rs deleted file mode 100644 index 7a75dc24e7d1..000000000000 --- a/third_party/rust/hyper/src/header/internals/item.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::any::Any; -use std::any::TypeId; -use std::fmt; -use std::str::from_utf8; - -use super::cell::{OptCell, PtrMapCell}; -use header::{Header, HeaderFormat, MultilineFormatter}; - - -#[derive(Clone)] -pub struct Item { - raw: OptCell>>, - typed: PtrMapCell -} - -impl Item { - #[inline] - pub fn new_raw(data: Vec>) -> Item { - Item { - raw: OptCell::new(Some(data)), - typed: PtrMapCell::new(), - } - } - - #[inline] - pub fn new_typed(ty: Box) -> Item { - let map = PtrMapCell::new(); - unsafe { map.insert((*ty).get_type(), ty); } - Item { - raw: OptCell::new(None), - typed: map, - } - } - - #[inline] - pub fn raw_mut(&mut self) -> &mut Vec> { - self.raw(); - self.typed = PtrMapCell::new(); - unsafe { - self.raw.get_mut() - } - } - - pub fn raw(&self) -> &[Vec] { - if let Some(ref raw) = *self.raw { - return &raw[..]; - } - - let raw = vec![unsafe { self.typed.one() }.to_string().into_bytes()]; - self.raw.set(raw); - - let raw = self.raw.as_ref().unwrap(); - &raw[..] - } - - pub fn typed(&self) -> Option<&H> { - let tid = TypeId::of::(); - match self.typed.get(tid) { - Some(val) => Some(val), - None => { - match parse::(self.raw.as_ref().expect("item.raw must exist")) { - Ok(typed) => { - unsafe { self.typed.insert(tid, typed); } - self.typed.get(tid) - }, - Err(_) => None - } - } - }.map(|typed| unsafe { typed.downcast_ref_unchecked() }) - } - - pub fn typed_mut(&mut self) -> Option<&mut H> { - let tid = TypeId::of::(); - if self.typed.get_mut(tid).is_none() { - match parse::(self.raw.as_ref().expect("item.raw must exist")) { - Ok(typed) => { - unsafe { self.typed.insert(tid, typed); } - }, - Err(_) => () - } - } - if self.raw.is_some() && self.typed.get_mut(tid).is_some() { - self.raw = OptCell::new(None); - } - self.typed.get_mut(tid).map(|typed| unsafe { typed.downcast_mut_unchecked() }) - } - - pub fn write_h1(&self, f: &mut MultilineFormatter) -> fmt::Result { - match *self.raw { - Some(ref raw) => { - for part in raw.iter() { - match from_utf8(&part[..]) { - Ok(s) => { - try!(f.fmt_line(&s)); - }, - Err(_) => { - error!("raw header value is not utf8, value={:?}", part); - return Err(fmt::Error); - } - } - } - Ok(()) - }, - None => { - let typed = unsafe { self.typed.one() }; - typed.fmt_multi_header(f) - } - } - } -} - -#[inline] -fn parse(raw: &Vec>) -> - ::Result> { - Header::parse_header(&raw[..]).map(|h: H| { - // FIXME: Use Type ascription - let h: Box = Box::new(h); - h - }) -} - diff --git a/third_party/rust/hyper/src/header/internals/mod.rs b/third_party/rust/hyper/src/header/internals/mod.rs deleted file mode 100644 index 89a655d203ec..000000000000 --- a/third_party/rust/hyper/src/header/internals/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub use self::item::Item; -pub use self::vec_map::{VecMap, Entry}; - -mod cell; -mod item; -mod vec_map; diff --git a/third_party/rust/hyper/src/header/mod.rs b/third_party/rust/hyper/src/header/mod.rs deleted file mode 100644 index 3c35dd9737db..000000000000 --- a/third_party/rust/hyper/src/header/mod.rs +++ /dev/null @@ -1,929 +0,0 @@ -//! Headers container, and common header fields. -//! -//! hyper has the opinion that Headers should be strongly-typed, because that's -//! why we're using Rust in the first place. To set or get any header, an object -//! must implement the `Header` trait from this module. Several common headers -//! are already provided, such as `Host`, `ContentType`, `UserAgent`, and others. -//! -//! # Why Typed? -//! -//! Or, why not stringly-typed? Types give the following advantages: -//! -//! - More difficult to typo, since typos in types should be caught by the compiler -//! - Parsing to a proper type by default -//! -//! # Defining Custom Headers -//! -//! Hyper provides many of the most commonly used headers in HTTP. If -//! you need to define a custom header, it's easy to do while still taking -//! advantage of the type system. Hyper includes a `header!` macro for defining -//! many wrapper-style headers. -//! -//! ``` -//! #[macro_use] extern crate hyper; -//! use hyper::header::Headers; -//! header! { (XRequestGuid, "X-Request-Guid") => [String] } -//! -//! fn main () { -//! let mut headers = Headers::new(); -//! -//! headers.set(XRequestGuid("a proper guid".to_owned())) -//! } -//! ``` -//! -//! This works well for simple "string" headers. But the header system -//! actually involves 2 parts: parsing, and formatting. If you need to -//! customize either part, you can do so. -//! -//! ## `Header` and `HeaderFormat` -//! -//! Consider a Do Not Track header. It can be true or false, but it represents -//! that via the numerals `1` and `0`. -//! -//! ``` -//! use std::fmt; -//! use hyper::header::{Header, HeaderFormat}; -//! -//! #[derive(Debug, Clone, Copy)] -//! struct Dnt(bool); -//! -//! impl Header for Dnt { -//! fn header_name() -> &'static str { -//! "DNT" -//! } -//! -//! fn parse_header(raw: &[Vec]) -> hyper::Result { -//! if raw.len() == 1 { -//! let line = &raw[0]; -//! if line.len() == 1 { -//! let byte = line[0]; -//! match byte { -//! b'0' => return Ok(Dnt(true)), -//! b'1' => return Ok(Dnt(false)), -//! _ => () -//! } -//! } -//! } -//! Err(hyper::Error::Header) -//! } -//! } -//! -//! impl HeaderFormat for Dnt { -//! fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { -//! if self.0 { -//! f.write_str("1") -//! } else { -//! f.write_str("0") -//! } -//! } -//! } -//! ``` -use std::any::Any; -use std::borrow::{Cow, ToOwned}; -//use std::collections::HashMap; -//use std::collections::hash_map::{Iter, Entry}; -use std::iter::{FromIterator, IntoIterator}; -use std::ops::{Deref, DerefMut}; -use std::{mem, fmt}; - -use {httparse, traitobject}; -use typeable::Typeable; -use unicase::UniCase; - -use self::internals::{Item, VecMap, Entry}; -use self::sealed::Sealed; - -pub use self::shared::*; -pub use self::common::*; - -mod common; -mod internals; -mod shared; -pub mod parsing; - -type HeaderName = UniCase; - -/// A trait for any object that will represent a header field and value. -/// -/// This trait represents the construction and identification of headers, -/// and contains trait-object unsafe methods. -pub trait Header: Clone + Any + Send + Sync { - /// Returns the name of the header field this belongs to. - /// - /// This will become an associated constant once available. - fn header_name() -> &'static str; - /// Parse a header from a raw stream of bytes. - /// - /// It's possible that a request can include a header field more than once, - /// and in that case, the slice will have a length greater than 1. However, - /// it's not necessarily the case that a Header is *allowed* to have more - /// than one field value. If that's the case, you **should** return `None` - /// if `raw.len() > 1`. - fn parse_header(raw: &[Vec]) -> ::Result; - -} - -/// A trait for any object that will represent a header field and value. -/// -/// This trait represents the formatting of a `Header` for output to a TcpStream. -pub trait HeaderFormat: fmt::Debug + HeaderClone + Any + Typeable + Send + Sync { - /// Format a header to be output into a TcpStream. - /// - /// This method is not allowed to introduce an Err not produced - /// by the passed-in Formatter. - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result; - - /// Formats a header over multiple lines. - /// - /// The main example here is `Set-Cookie`, which requires that every - /// cookie being set be specified in a separate line. - /// - /// The API here is still being explored, so this is hidden by default. - /// The passed in formatter doesn't have any public methods, so it would - /// be quite difficult to depend on this externally. - #[doc(hidden)] - #[inline] - fn fmt_multi_header(&self, f: &mut MultilineFormatter) -> fmt::Result { - f.fmt_line(&FmtHeader(self)) - } -} - -#[doc(hidden)] -#[allow(missing_debug_implementations)] -pub struct MultilineFormatter<'a, 'b: 'a>(Multi<'a, 'b>); - -enum Multi<'a, 'b: 'a> { - Line(&'a str, &'a mut fmt::Formatter<'b>), - Join(bool, &'a mut fmt::Formatter<'b>), -} - -impl<'a, 'b> MultilineFormatter<'a, 'b> { - fn fmt_line(&mut self, line: &fmt::Display) -> fmt::Result { - use std::fmt::Write; - match self.0 { - Multi::Line(ref name, ref mut f) => { - try!(f.write_str(*name)); - try!(f.write_str(": ")); - try!(write!(NewlineReplacer(*f), "{}", line)); - f.write_str("\r\n") - }, - Multi::Join(ref mut first, ref mut f) => { - if !*first { - try!(f.write_str(", ")); - } else { - *first = false; - } - write!(NewlineReplacer(*f), "{}", line) - } - } - } -} - -// Internal helper to wrap fmt_header into a fmt::Display -struct FmtHeader<'a, H: ?Sized + 'a>(&'a H); - -impl<'a, H: HeaderFormat + ?Sized + 'a> fmt::Display for FmtHeader<'a, H> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt_header(f) - } -} - -struct ValueString<'a>(&'a Item); - -impl<'a> fmt::Display for ValueString<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.write_h1(&mut MultilineFormatter(Multi::Join(true, f))) - } -} - -struct NewlineReplacer<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); - -impl<'a, 'b> fmt::Write for NewlineReplacer<'a, 'b> { - fn write_str(&mut self, s: &str) -> fmt::Result { - let mut since = 0; - for (i, &byte) in s.as_bytes().iter().enumerate() { - if byte == b'\r' || byte == b'\n' { - try!(self.0.write_str(&s[since..i])); - try!(self.0.write_str(" ")); - since = i + 1; - } - } - if since < s.len() { - self.0.write_str(&s[since..]) - } else { - Ok(()) - } - } -} - -/// Internal implementation detail. -/// -/// This trait is automatically implemented for all types that implement -/// `HeaderFormat + Clone`. No methods are exposed, and so is not useful -/// outside this crate. -pub trait HeaderClone: Sealed {} -impl HeaderClone for T {} - -mod sealed { - use super::HeaderFormat; - - #[doc(hidden)] - pub trait Sealed { - #[doc(hidden)] - fn clone_box(&self) -> Box; - } - - #[doc(hidden)] - impl Sealed for T { - #[inline] - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } - } -} - -impl HeaderFormat + Send + Sync { - #[inline] - unsafe fn downcast_ref_unchecked(&self) -> &T { - mem::transmute(traitobject::data(self)) - } - - #[inline] - unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { - mem::transmute(traitobject::data_mut(self)) - } -} - -impl Clone for Box { - #[inline] - fn clone(&self) -> Box { - self.clone_box() - } -} - -#[inline] -fn header_name() -> &'static str { - ::header_name() -} - -/// A map of header fields on requests and responses. -#[derive(Clone)] -pub struct Headers { - //data: HashMap - data: VecMap, -} - -impl Headers { - - /// Creates a new, empty headers map. - pub fn new() -> Headers { - Headers { - data: VecMap::new() - } - } - - #[doc(hidden)] - pub fn from_raw(raw: &[httparse::Header]) -> ::Result { - let mut headers = Headers::new(); - for header in raw { - trace!("raw header: {:?}={:?}", header.name, &header.value[..]); - let name = UniCase(CowStr(Cow::Owned(header.name.to_owned()))); - let item = match headers.data.entry(name) { - Entry::Vacant(entry) => entry.insert(Item::new_raw(vec![])), - Entry::Occupied(entry) => entry.into_mut() - }; - let trim = header.value.iter().rev().take_while(|&&x| x == b' ').count(); - let value = &header.value[.. header.value.len() - trim]; - item.raw_mut().push(value.to_vec()); - } - Ok(headers) - } - - /// Set a header field to the corresponding value. - /// - /// The field is determined by the type of the value being set. - pub fn set(&mut self, value: H) { - trace!("Headers.set( {:?}, {:?} )", header_name::(), value); - self.data.insert(UniCase(CowStr(Cow::Borrowed(header_name::()))), - Item::new_typed(Box::new(value))); - } - - /// Access the raw value of a header. - /// - /// Prefer to use the typed getters instead. - /// - /// Example: - /// - /// ``` - /// # use hyper::header::Headers; - /// # let mut headers = Headers::new(); - /// let raw_content_type = headers.get_raw("content-type"); - /// ``` - pub fn get_raw(&self, name: &str) -> Option<&[Vec]> { - self.data - .get(&UniCase(CowStr(Cow::Borrowed(unsafe { mem::transmute::<&str, &str>(name) })))) - .map(Item::raw) - } - - /// Set the raw value of a header, bypassing any typed headers. - /// - /// Note: This will completely replace any current value for this - /// header name. - /// - /// Example: - /// - /// ``` - /// # use hyper::header::Headers; - /// # let mut headers = Headers::new(); - /// headers.set_raw("content-length", vec![b"5".to_vec()]); - /// ``` - pub fn set_raw>>(&mut self, name: K, - value: Vec>) { - let name = name.into(); - trace!("Headers.set_raw( {:?}, {:?} )", name, value); - self.data.insert(UniCase(CowStr(name)), Item::new_raw(value)); - } - - /// Append a value to raw value of this header. - /// - /// If a header already contains a value, this will add another line to it. - /// - /// If a header doesnot exist for this name, a new one will be created with - /// the value. - /// - /// Example: - /// - /// ``` - /// # use hyper::header::Headers; - /// # let mut headers = Headers::new(); - /// headers.append_raw("x-foo", b"bar".to_vec()); - /// headers.append_raw("x-foo", b"quux".to_vec()); - /// ``` - pub fn append_raw>>(&mut self, name: K, value: Vec) { - let name = name.into(); - trace!("Headers.append_raw( {:?}, {:?} )", name, value); - let name = UniCase(CowStr(name)); - if let Some(item) = self.data.get_mut(&name) { - item.raw_mut().push(value); - return; - } - self.data.insert(name, Item::new_raw(vec![value])); - } - - /// Remove a header set by set_raw - pub fn remove_raw(&mut self, name: &str) { - trace!("Headers.remove_raw( {:?} )", name); - self.data.remove( - &UniCase(CowStr(Cow::Borrowed(unsafe { mem::transmute::<&str, &str>(name) }))) - ); - } - - /// Get a reference to the header field's value, if it exists. - pub fn get(&self) -> Option<&H> { - self.data.get(&UniCase(CowStr(Cow::Borrowed(header_name::())))) - .and_then(Item::typed::) - } - - /// Get a mutable reference to the header field's value, if it exists. - pub fn get_mut(&mut self) -> Option<&mut H> { - self.data.get_mut(&UniCase(CowStr(Cow::Borrowed(header_name::())))) - .and_then(Item::typed_mut::) - } - - /// Returns a boolean of whether a certain header is in the map. - /// - /// Example: - /// - /// ``` - /// # use hyper::header::Headers; - /// # use hyper::header::ContentType; - /// # let mut headers = Headers::new(); - /// let has_type = headers.has::(); - /// ``` - pub fn has(&self) -> bool { - self.data.contains_key(&UniCase(CowStr(Cow::Borrowed(header_name::())))) - } - - /// Removes a header from the map, if one existed. - /// Returns true if a header has been removed. - pub fn remove(&mut self) -> bool { - trace!("Headers.remove( {:?} )", header_name::()); - self.data.remove(&UniCase(CowStr(Cow::Borrowed(header_name::())))).is_some() - } - - /// Returns an iterator over the header fields. - pub fn iter(&self) -> HeadersItems { - HeadersItems { - inner: self.data.iter() - } - } - - /// Returns the number of headers in the map. - pub fn len(&self) -> usize { - self.data.len() - } - - /// Remove all headers from the map. - pub fn clear(&mut self) { - self.data.clear() - } -} - -impl PartialEq for Headers { - fn eq(&self, other: &Headers) -> bool { - if self.len() != other.len() { - return false; - } - - for header in self.iter() { - match other.get_raw(header.name()) { - Some(val) if val == self.get_raw(header.name()).unwrap() => {}, - _ => { return false; } - } - } - true - } -} - -impl fmt::Display for Headers { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for header in self.iter() { - try!(fmt::Display::fmt(&header, f)); - } - Ok(()) - } -} - -impl fmt::Debug for Headers { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(f.write_str("Headers { ")); - for header in self.iter() { - try!(write!(f, "{:?}, ", header)); - } - try!(f.write_str("}")); - Ok(()) - } -} - -/// An `Iterator` over the fields in a `Headers` map. -pub struct HeadersItems<'a> { - inner: ::std::slice::Iter<'a, (HeaderName, Item)> -} - -impl<'a> Iterator for HeadersItems<'a> { - type Item = HeaderView<'a>; - - fn next(&mut self) -> Option> { - self.inner.next().map(|&(ref k, ref v)| HeaderView(k, v)) - } -} - -/// Returned with the `HeadersItems` iterator. -pub struct HeaderView<'a>(&'a HeaderName, &'a Item); - -impl<'a> HeaderView<'a> { - /// Check if a HeaderView is a certain Header. - #[inline] - pub fn is(&self) -> bool { - UniCase(CowStr(Cow::Borrowed(header_name::()))) == *self.0 - } - - /// Get the Header name as a slice. - #[inline] - pub fn name(&self) -> &'a str { - self.0.as_ref() - } - - /// Cast the value to a certain Header type. - #[inline] - pub fn value(&self) -> Option<&'a H> { - self.1.typed::() - } - - /// Get just the header value as a String. - /// - /// This will join multiple values of this header with a `, `. - /// - /// **Warning:** This may not be the format that should be used to send - /// a Request or Response. - #[inline] - pub fn value_string(&self) -> String { - ValueString(self.1).to_string() - } -} - -impl<'a> fmt::Display for HeaderView<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.1.write_h1(&mut MultilineFormatter(Multi::Line(&self.0, f))) - } -} - -impl<'a> fmt::Debug for HeaderView<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl<'a> Extend> for Headers { - fn extend>>(&mut self, iter: I) { - for header in iter { - self.data.insert((*header.0).clone(), (*header.1).clone()); - } - } -} - -impl<'a> FromIterator> for Headers { - fn from_iter>>(iter: I) -> Headers { - let mut headers = Headers::new(); - headers.extend(iter); - headers - } -} - -#[deprecated(note="The semantics of formatting a HeaderFormat directly are not clear")] -impl<'a> fmt::Display for &'a (HeaderFormat + Send + Sync) { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut multi = MultilineFormatter(Multi::Join(true, f)); - self.fmt_multi_header(&mut multi) - } -} - -/// A wrapper around any Header with a Display impl that calls fmt_header. -/// -/// This can be used like so: `format!("{}", HeaderFormatter(&header))` to -/// get the 'value string' representation of this Header. -/// -/// Note: This may not necessarily be the value written to stream, such -/// as with the SetCookie header. -#[deprecated(note="The semantics of formatting a HeaderFormat directly are not clear")] -pub struct HeaderFormatter<'a, H: HeaderFormat>(pub &'a H); - -#[allow(deprecated)] -impl<'a, H: HeaderFormat> fmt::Display for HeaderFormatter<'a, H> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut multi = MultilineFormatter(Multi::Join(true, f)); - self.0.fmt_multi_header(&mut multi) - } -} - -#[allow(deprecated)] -impl<'a, H: HeaderFormat> fmt::Debug for HeaderFormatter<'a, H> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -#[derive(Clone, Hash, Eq, PartialEq, PartialOrd, Ord)] -struct CowStr(Cow<'static, str>); - -impl Deref for CowStr { - type Target = Cow<'static, str>; - - fn deref(&self) -> &Cow<'static, str> { - &self.0 - } -} - -impl fmt::Debug for CowStr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.0, f) - } -} - -impl fmt::Display for CowStr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - -impl DerefMut for CowStr { - fn deref_mut(&mut self) -> &mut Cow<'static, str> { - &mut self.0 - } -} - -impl AsRef for CowStr { - fn as_ref(&self) -> &str { - self - } -} - - -#[cfg(test)] -mod tests { - use std::fmt; - use mime::Mime; - use mime::TopLevel::Text; - use mime::SubLevel::Plain; - use super::{Headers, Header, HeaderFormat, ContentLength, ContentType, - Accept, Host, qitem}; - use httparse; - - #[cfg(feature = "nightly")] - use test::Bencher; - - // Slice.position_elem was unstable - fn index_of(slice: &[u8], byte: u8) -> Option { - for (index, &b) in slice.iter().enumerate() { - if b == byte { - return Some(index); - } - } - None - } - - macro_rules! raw { - ($($line:expr),*) => ({ - [$({ - let line = $line; - let pos = index_of(line, b':').expect("raw splits on ':', not found"); - httparse::Header { - name: ::std::str::from_utf8(&line[..pos]).unwrap(), - value: &line[pos + 2..] - } - }),*] - }) - } - - #[test] - fn test_from_raw() { - let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); - assert_eq!(headers.get(), Some(&ContentLength(10))); - } - - #[test] - fn test_content_type() { - let content_type = Header::parse_header([b"text/plain".to_vec()].as_ref()); - assert_eq!(content_type.ok(), Some(ContentType(Mime(Text, Plain, vec![])))); - } - - #[test] - fn test_accept() { - let text_plain = qitem(Mime(Text, Plain, vec![])); - let application_vendor = "application/vnd.github.v3.full+json; q=0.5".parse().unwrap(); - - let accept = Header::parse_header([b"text/plain".to_vec()].as_ref()); - assert_eq!(accept.ok(), Some(Accept(vec![text_plain.clone()]))); - - let bytevec = [b"application/vnd.github.v3.full+json; q=0.5, text/plain".to_vec()]; - let accept = Header::parse_header(bytevec.as_ref()); - assert_eq!(accept.ok(), Some(Accept(vec![application_vendor, text_plain]))); - } - - #[derive(Clone, PartialEq, Debug)] - struct CrazyLength(Option, usize); - - impl Header for CrazyLength { - fn header_name() -> &'static str { - "content-length" - } - fn parse_header(raw: &[Vec]) -> ::Result { - use std::str::from_utf8; - use std::str::FromStr; - - if raw.len() != 1 { - return Err(::Error::Header); - } - // we JUST checked that raw.len() == 1, so raw[0] WILL exist. - match match from_utf8(unsafe { &raw.get_unchecked(0)[..] }) { - Ok(s) => FromStr::from_str(s).ok(), - Err(_) => None - }.map(|u| CrazyLength(Some(false), u)) { - Some(x) => Ok(x), - None => Err(::Error::Header), - } - } - } - - impl HeaderFormat for CrazyLength { - fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { - let CrazyLength(ref opt, ref value) = *self; - write!(f, "{:?}, {:?}", opt, value) - } - } - - #[test] - fn test_different_structs_for_same_header() { - let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); - assert_eq!(headers.get::(), Some(&ContentLength(10))); - assert_eq!(headers.get::(), Some(&CrazyLength(Some(false), 10))); - } - - #[test] - fn test_trailing_whitespace() { - let headers = Headers::from_raw(&raw!(b"Content-Length: 10 ")).unwrap(); - assert_eq!(headers.get::(), Some(&ContentLength(10))); - } - - #[test] - fn test_multiple_reads() { - let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); - let ContentLength(one) = *headers.get::().unwrap(); - let ContentLength(two) = *headers.get::().unwrap(); - assert_eq!(one, two); - } - - #[test] - fn test_different_reads() { - let headers = Headers::from_raw( - &raw!(b"Content-Length: 10", b"Content-Type: text/plain")).unwrap(); - let ContentLength(_) = *headers.get::().unwrap(); - let ContentType(_) = *headers.get::().unwrap(); - } - - #[test] - fn test_get_mutable() { - let mut headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); - *headers.get_mut::().unwrap() = ContentLength(20); - assert_eq!(headers.get_raw("content-length").unwrap(), &[b"20".to_vec()][..]); - assert_eq!(*headers.get::().unwrap(), ContentLength(20)); - } - - #[test] - fn test_headers_fmt() { - let mut headers = Headers::new(); - headers.set(ContentLength(15)); - headers.set(Host { hostname: "foo.bar".to_owned(), port: None }); - - let s = headers.to_string(); - assert!(s.contains("Host: foo.bar\r\n")); - assert!(s.contains("Content-Length: 15\r\n")); - } - - #[test] - fn test_headers_fmt_raw() { - let mut headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); - headers.set_raw("x-foo", vec![b"foo".to_vec(), b"bar".to_vec()]); - let s = headers.to_string(); - assert_eq!(s, "Content-Length: 10\r\nx-foo: foo\r\nx-foo: bar\r\n"); - } - - #[test] - fn test_set_raw() { - let mut headers = Headers::new(); - headers.set(ContentLength(10)); - headers.set_raw("content-LENGTH", vec![b"20".to_vec()]); - assert_eq!(headers.get_raw("Content-length").unwrap(), &[b"20".to_vec()][..]); - assert_eq!(headers.get(), Some(&ContentLength(20))); - } - - #[test] - fn test_append_raw() { - let mut headers = Headers::new(); - headers.set(ContentLength(10)); - headers.append_raw("content-LENGTH", b"20".to_vec()); - assert_eq!(headers.get_raw("Content-length").unwrap(), &[b"10".to_vec(), b"20".to_vec()][..]); - headers.append_raw("x-foo", b"bar".to_vec()); - assert_eq!(headers.get_raw("x-foo"), Some(&[b"bar".to_vec()][..])); - } - - #[test] - fn test_remove_raw() { - let mut headers = Headers::new(); - headers.set_raw("content-LENGTH", vec![b"20".to_vec()]); - headers.remove_raw("content-LENGTH"); - assert_eq!(headers.get_raw("Content-length"), None); - } - - #[test] - fn test_len() { - let mut headers = Headers::new(); - headers.set(ContentLength(10)); - assert_eq!(headers.len(), 1); - headers.set(ContentType(Mime(Text, Plain, vec![]))); - assert_eq!(headers.len(), 2); - // Redundant, should not increase count. - headers.set(ContentLength(20)); - assert_eq!(headers.len(), 2); - } - - #[test] - fn test_clear() { - let mut headers = Headers::new(); - headers.set(ContentLength(10)); - headers.set(ContentType(Mime(Text, Plain, vec![]))); - assert_eq!(headers.len(), 2); - headers.clear(); - assert_eq!(headers.len(), 0); - } - - #[test] - fn test_iter() { - let mut headers = Headers::new(); - headers.set(ContentLength(11)); - for header in headers.iter() { - assert!(header.is::()); - assert_eq!(header.name(), ::header_name()); - assert_eq!(header.value(), Some(&ContentLength(11))); - assert_eq!(header.value_string(), "11".to_owned()); - } - } - - #[test] - fn test_header_view_value_string() { - let mut headers = Headers::new(); - headers.set_raw("foo", vec![b"one".to_vec(), b"two".to_vec()]); - for header in headers.iter() { - assert_eq!(header.name(), "foo"); - assert_eq!(header.value_string(), "one, two"); - } - } - - #[test] - fn test_eq() { - let mut headers1 = Headers::new(); - let mut headers2 = Headers::new(); - - assert_eq!(headers1, headers2); - - headers1.set(ContentLength(11)); - headers2.set(Host {hostname: "foo.bar".to_owned(), port: None}); - assert!(headers1 != headers2); - - headers1 = Headers::new(); - headers2 = Headers::new(); - - headers1.set(ContentLength(11)); - headers2.set(ContentLength(11)); - assert_eq!(headers1, headers2); - - headers1.set(ContentLength(10)); - assert!(headers1 != headers2); - - headers1 = Headers::new(); - headers2 = Headers::new(); - - headers1.set(Host { hostname: "foo.bar".to_owned(), port: None }); - headers1.set(ContentLength(11)); - headers2.set(ContentLength(11)); - assert!(headers1 != headers2); - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_new(b: &mut Bencher) { - b.iter(|| { - let mut h = Headers::new(); - h.set(ContentLength(11)); - h - }) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_from_raw(b: &mut Bencher) { - let raw = raw!(b"Content-Length: 10"); - b.iter(|| Headers::from_raw(&raw).unwrap()) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_get(b: &mut Bencher) { - let mut headers = Headers::new(); - headers.set(ContentLength(11)); - b.iter(|| assert_eq!(headers.get::(), Some(&ContentLength(11)))) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_get_miss(b: &mut Bencher) { - let headers = Headers::new(); - b.iter(|| assert!(headers.get::().is_none())) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_set(b: &mut Bencher) { - let mut headers = Headers::new(); - b.iter(|| headers.set(ContentLength(12))) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_has(b: &mut Bencher) { - let mut headers = Headers::new(); - headers.set(ContentLength(11)); - b.iter(|| assert!(headers.has::())) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_view_is(b: &mut Bencher) { - let mut headers = Headers::new(); - headers.set(ContentLength(11)); - let mut iter = headers.iter(); - let view = iter.next().unwrap(); - b.iter(|| assert!(view.is::())) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_headers_fmt(b: &mut Bencher) { - let mut headers = Headers::new(); - headers.set(ContentLength(11)); - b.iter(|| headers.to_string()) - } -} diff --git a/third_party/rust/hyper/src/header/parsing.rs b/third_party/rust/hyper/src/header/parsing.rs deleted file mode 100644 index 38c09db5e73e..000000000000 --- a/third_party/rust/hyper/src/header/parsing.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Utility functions for Header implementations. - -use language_tags::LanguageTag; -use std::str; -use std::str::FromStr; -use std::fmt::{self, Display}; -use url::percent_encoding; - -use header::shared::Charset; - -/// Reads a single raw string when parsing a header. -pub fn from_one_raw_str(raw: &[Vec]) -> ::Result { - if raw.len() != 1 || unsafe { raw.get_unchecked(0) } == b"" { return Err(::Error::Header) } - // we JUST checked that raw.len() == 1, so raw[0] WILL exist. - from_raw_str(& unsafe { raw.get_unchecked(0) }) -} - -/// Reads a raw string into a value. -pub fn from_raw_str(raw: &[u8]) -> ::Result { - let s = try!(str::from_utf8(raw)); - T::from_str(s).or(Err(::Error::Header)) -} - -/// Reads a comma-delimited raw header into a Vec. -#[inline] -pub fn from_comma_delimited>(raw: &[S]) -> ::Result> { - let mut result = Vec::new(); - for s in raw { - let s = try!(str::from_utf8(s.as_ref())); - result.extend(s.split(',') - .filter_map(|x| match x.trim() { - "" => None, - y => Some(y) - }) - .filter_map(|x| x.parse().ok())) - } - Ok(result) -} - -/// Format an array into a comma-delimited string. -pub fn fmt_comma_delimited(f: &mut fmt::Formatter, parts: &[T]) -> fmt::Result { - for (i, part) in parts.iter().enumerate() { - if i != 0 { - try!(f.write_str(", ")); - } - try!(Display::fmt(part, f)); - } - Ok(()) -} - -/// An extended header parameter value (i.e., tagged with a character set and optionally, -/// a language), as defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). -#[derive(Clone, Debug, PartialEq)] -pub struct ExtendedValue { - /// The character set that is used to encode the `value` to a string. - pub charset: Charset, - /// The human language details of the `value`, if available. - pub language_tag: Option, - /// The parameter value, as expressed in octets. - pub value: Vec, -} - -/// Parses extended header parameter values (`ext-value`), as defined in -/// [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). -/// -/// Extended values are denoted by parameter names that end with `*`. -/// -/// ## ABNF -/// ```plain -/// ext-value = charset "'" [ language ] "'" value-chars -/// ; like RFC 2231's -/// ; (see [RFC2231], Section 7) -/// -/// charset = "UTF-8" / "ISO-8859-1" / mime-charset -/// -/// mime-charset = 1*mime-charsetc -/// mime-charsetc = ALPHA / DIGIT -/// / "!" / "#" / "$" / "%" / "&" -/// / "+" / "-" / "^" / "_" / "`" -/// / "{" / "}" / "~" -/// ; as in Section 2.3 of [RFC2978] -/// ; except that the single quote is not included -/// ; SHOULD be registered in the IANA charset registry -/// -/// language = -/// -/// value-chars = *( pct-encoded / attr-char ) -/// -/// pct-encoded = "%" HEXDIG HEXDIG -/// ; see [RFC3986], Section 2.1 -/// -/// attr-char = ALPHA / DIGIT -/// / "!" / "#" / "$" / "&" / "+" / "-" / "." -/// / "^" / "_" / "`" / "|" / "~" -/// ; token except ( "*" / "'" / "%" ) -/// ``` -pub fn parse_extended_value(val: &str) -> ::Result { - - // Break into three pieces separated by the single-quote character - let mut parts = val.splitn(3,'\''); - - // Interpret the first piece as a Charset - let charset: Charset = match parts.next() { - None => return Err(::Error::Header), - Some(n) => try!(FromStr::from_str(n)), - }; - - // Interpret the second piece as a language tag - let lang: Option = match parts.next() { - None => return Err(::Error::Header), - Some("") => None, - Some(s) => match s.parse() { - Ok(lt) => Some(lt), - Err(_) => return Err(::Error::Header), - } - }; - - // Interpret the third piece as a sequence of value characters - let value: Vec = match parts.next() { - None => return Err(::Error::Header), - Some(v) => percent_encoding::percent_decode(v.as_bytes()).collect(), - }; - - Ok(ExtendedValue { - charset: charset, - language_tag: lang, - value: value, - }) -} - -define_encode_set! { - /// This encode set is used for HTTP header values and is defined at - /// https://tools.ietf.org/html/rfc5987#section-3.2 - pub HTTP_VALUE = [percent_encoding::SIMPLE_ENCODE_SET] | { - ' ', '"', '%', '\'', '(', ')', '*', ',', '/', ':', ';', '<', '-', '>', '?', - '[', '\\', ']', '{', '}' - } -} - -impl Display for ExtendedValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let encoded_value = - percent_encoding::percent_encode(&self.value[..], HTTP_VALUE); - if let Some(ref lang) = self.language_tag { - write!(f, "{}'{}'{}", self.charset, lang, encoded_value) - } else { - write!(f, "{}''{}", self.charset, encoded_value) - } - } -} - -#[cfg(test)] -mod tests { - use header::shared::Charset; - use super::{ExtendedValue, parse_extended_value}; - - #[test] - fn test_parse_extended_value_with_encoding_and_language_tag() { - let expected_language_tag = langtag!(en); - // RFC 5987, Section 3.2.2 - // Extended notation, using the Unicode character U+00A3 (POUND SIGN) - let result = parse_extended_value("iso-8859-1'en'%A3%20rates"); - assert!(result.is_ok()); - let extended_value = result.unwrap(); - assert_eq!(Charset::Iso_8859_1, extended_value.charset); - assert!(extended_value.language_tag.is_some()); - assert_eq!(expected_language_tag, extended_value.language_tag.unwrap()); - assert_eq!(vec![163, b' ', b'r', b'a', b't', b'e', b's'], extended_value.value); - } - - #[test] - fn test_parse_extended_value_with_encoding() { - // RFC 5987, Section 3.2.2 - // Extended notation, using the Unicode characters U+00A3 (POUND SIGN) - // and U+20AC (EURO SIGN) - let result = parse_extended_value("UTF-8''%c2%a3%20and%20%e2%82%ac%20rates"); - assert!(result.is_ok()); - let extended_value = result.unwrap(); - assert_eq!(Charset::Ext("UTF-8".to_string()), extended_value.charset); - assert!(extended_value.language_tag.is_none()); - assert_eq!(vec![194, 163, b' ', b'a', b'n', b'd', b' ', 226, 130, 172, b' ', b'r', b'a', b't', b'e', b's'], extended_value.value); - } - - #[test] - fn test_parse_extended_value_missing_language_tag_and_encoding() { - // From: https://greenbytes.de/tech/tc2231/#attwithfn2231quot2 - let result = parse_extended_value("foo%20bar.html"); - assert!(result.is_err()); - } - - #[test] - fn test_parse_extended_value_partially_formatted() { - let result = parse_extended_value("UTF-8'missing third part"); - assert!(result.is_err()); - } - - #[test] - fn test_parse_extended_value_partially_formatted_blank() { - let result = parse_extended_value("blank second part'"); - assert!(result.is_err()); - } - - #[test] - fn test_fmt_extended_value_with_encoding_and_language_tag() { - let extended_value = ExtendedValue { - charset: Charset::Iso_8859_1, - language_tag: Some("en".parse().expect("Could not parse language tag")), - value: vec![163, b' ', b'r', b'a', b't', b'e', b's'], - }; - assert_eq!("ISO-8859-1'en'%A3%20rates", format!("{}", extended_value)); - } - - #[test] - fn test_fmt_extended_value_with_encoding() { - let extended_value = ExtendedValue { - charset: Charset::Ext("UTF-8".to_string()), - language_tag: None, - value: vec![194, 163, b' ', b'a', b'n', b'd', b' ', 226, 130, 172, b' ', b'r', b'a', - b't', b'e', b's'], - }; - assert_eq!("UTF-8''%C2%A3%20and%20%E2%82%AC%20rates", - format!("{}", extended_value)); - } -} diff --git a/third_party/rust/hyper/src/header/shared/charset.rs b/third_party/rust/hyper/src/header/shared/charset.rs deleted file mode 100644 index 070c3bb8e8ae..000000000000 --- a/third_party/rust/hyper/src/header/shared/charset.rs +++ /dev/null @@ -1,151 +0,0 @@ -use std::fmt::{self, Display}; -use std::str::FromStr; -use std::ascii::AsciiExt; - -use self::Charset::*; - -/// A Mime charset. -/// -/// The string representation is normalised to upper case. -/// -/// See http://www.iana.org/assignments/character-sets/character-sets.xhtml -#[derive(Clone,Debug,PartialEq)] -#[allow(non_camel_case_types)] -pub enum Charset{ - /// US ASCII - Us_Ascii, - /// ISO-8859-1 - Iso_8859_1, - /// ISO-8859-2 - Iso_8859_2, - /// ISO-8859-3 - Iso_8859_3, - /// ISO-8859-4 - Iso_8859_4, - /// ISO-8859-5 - Iso_8859_5, - /// ISO-8859-6 - Iso_8859_6, - /// ISO-8859-7 - Iso_8859_7, - /// ISO-8859-8 - Iso_8859_8, - /// ISO-8859-9 - Iso_8859_9, - /// ISO-8859-10 - Iso_8859_10, - /// Shift_JIS - Shift_Jis, - /// EUC-JP - Euc_Jp, - /// ISO-2022-KR - Iso_2022_Kr, - /// EUC-KR - Euc_Kr, - /// ISO-2022-JP - Iso_2022_Jp, - /// ISO-2022-JP-2 - Iso_2022_Jp_2, - /// ISO-8859-6-E - Iso_8859_6_E, - /// ISO-8859-6-I - Iso_8859_6_I, - /// ISO-8859-8-E - Iso_8859_8_E, - /// ISO-8859-8-I - Iso_8859_8_I, - /// GB2312 - Gb2312, - /// Big5 - Big5, - /// KOI8-R - Koi8_R, - /// An arbitrary charset specified as a string - Ext(String) -} - -impl Charset { - fn name(&self) -> &str { - match *self { - Us_Ascii => "US-ASCII", - Iso_8859_1 => "ISO-8859-1", - Iso_8859_2 => "ISO-8859-2", - Iso_8859_3 => "ISO-8859-3", - Iso_8859_4 => "ISO-8859-4", - Iso_8859_5 => "ISO-8859-5", - Iso_8859_6 => "ISO-8859-6", - Iso_8859_7 => "ISO-8859-7", - Iso_8859_8 => "ISO-8859-8", - Iso_8859_9 => "ISO-8859-9", - Iso_8859_10 => "ISO-8859-10", - Shift_Jis => "Shift-JIS", - Euc_Jp => "EUC-JP", - Iso_2022_Kr => "ISO-2022-KR", - Euc_Kr => "EUC-KR", - Iso_2022_Jp => "ISO-2022-JP", - Iso_2022_Jp_2 => "ISO-2022-JP-2", - Iso_8859_6_E => "ISO-8859-6-E", - Iso_8859_6_I => "ISO-8859-6-I", - Iso_8859_8_E => "ISO-8859-8-E", - Iso_8859_8_I => "ISO-8859-8-I", - Gb2312 => "GB2312", - Big5 => "5", - Koi8_R => "KOI8-R", - Ext(ref s) => &s - } - } -} - -impl Display for Charset { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.name()) - } -} - -impl FromStr for Charset { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - Ok(match s.to_ascii_uppercase().as_ref() { - "US-ASCII" => Us_Ascii, - "ISO-8859-1" => Iso_8859_1, - "ISO-8859-2" => Iso_8859_2, - "ISO-8859-3" => Iso_8859_3, - "ISO-8859-4" => Iso_8859_4, - "ISO-8859-5" => Iso_8859_5, - "ISO-8859-6" => Iso_8859_6, - "ISO-8859-7" => Iso_8859_7, - "ISO-8859-8" => Iso_8859_8, - "ISO-8859-9" => Iso_8859_9, - "ISO-8859-10" => Iso_8859_10, - "SHIFT-JIS" => Shift_Jis, - "EUC-JP" => Euc_Jp, - "ISO-2022-KR" => Iso_2022_Kr, - "EUC-KR" => Euc_Kr, - "ISO-2022-JP" => Iso_2022_Jp, - "ISO-2022-JP-2" => Iso_2022_Jp_2, - "ISO-8859-6-E" => Iso_8859_6_E, - "ISO-8859-6-I" => Iso_8859_6_I, - "ISO-8859-8-E" => Iso_8859_8_E, - "ISO-8859-8-I" => Iso_8859_8_I, - "GB2312" => Gb2312, - "5" => Big5, - "KOI8-R" => Koi8_R, - s => Ext(s.to_owned()) - }) - } -} - -#[test] -fn test_parse() { - assert_eq!(Us_Ascii,"us-ascii".parse().unwrap()); - assert_eq!(Us_Ascii,"US-Ascii".parse().unwrap()); - assert_eq!(Us_Ascii,"US-ASCII".parse().unwrap()); - assert_eq!(Shift_Jis,"Shift-JIS".parse().unwrap()); - assert_eq!(Ext("ABCD".to_owned()),"abcd".parse().unwrap()); -} - -#[test] -fn test_display() { - assert_eq!("US-ASCII", format!("{}", Us_Ascii)); - assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned()))); -} diff --git a/third_party/rust/hyper/src/header/shared/encoding.rs b/third_party/rust/hyper/src/header/shared/encoding.rs deleted file mode 100644 index b8bf32f12742..000000000000 --- a/third_party/rust/hyper/src/header/shared/encoding.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::fmt; -use std::str; - -pub use self::Encoding::{Chunked, Gzip, Deflate, Compress, Identity, EncodingExt}; - -/// A value to represent an encoding used in `Transfer-Encoding` -/// or `Accept-Encoding` header. -#[derive(Clone, PartialEq, Debug)] -pub enum Encoding { - /// The `chunked` encoding. - Chunked, - /// The `gzip` encoding. - Gzip, - /// The `deflate` encoding. - Deflate, - /// The `compress` encoding. - Compress, - /// The `identity` encoding. - Identity, - /// Some other encoding that is less common, can be any String. - EncodingExt(String) -} - -impl fmt::Display for Encoding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - Chunked => "chunked", - Gzip => "gzip", - Deflate => "deflate", - Compress => "compress", - Identity => "identity", - EncodingExt(ref s) => s.as_ref() - }) - } -} - -impl str::FromStr for Encoding { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - match s { - "chunked" => Ok(Chunked), - "deflate" => Ok(Deflate), - "gzip" => Ok(Gzip), - "compress" => Ok(Compress), - "identity" => Ok(Identity), - _ => Ok(EncodingExt(s.to_owned())) - } - } -} diff --git a/third_party/rust/hyper/src/header/shared/entity.rs b/third_party/rust/hyper/src/header/shared/entity.rs deleted file mode 100644 index 0d51d5cf9358..000000000000 --- a/third_party/rust/hyper/src/header/shared/entity.rs +++ /dev/null @@ -1,215 +0,0 @@ -use std::str::FromStr; -use std::fmt::{self, Display}; - -// check that each char in the slice is either: -// 1. %x21, or -// 2. in the range %x23 to %x7E, or -// 3. in the range %x80 to %xFF -fn check_slice_validity(slice: &str) -> bool { - slice.bytes().all(|c| - c == b'\x21' || (c >= b'\x23' && c <= b'\x7e') | (c >= b'\x80' && c <= b'\xff')) -} - -/// An entity tag, defined in [RFC7232](https://tools.ietf.org/html/rfc7232#section-2.3) -/// -/// An entity tag consists of a string enclosed by two literal double quotes. -/// Preceding the first double quote is an optional weakness indicator, -/// which always looks like `W/`. Examples for valid tags are `"xyzzy"` and `W/"xyzzy"`. -/// -/// # ABNF -/// ```plain -/// entity-tag = [ weak ] opaque-tag -/// weak = %x57.2F ; "W/", case-sensitive -/// opaque-tag = DQUOTE *etagc DQUOTE -/// etagc = %x21 / %x23-7E / obs-text -/// ; VCHAR except double quotes, plus obs-text -/// ``` -/// -/// # Comparison -/// To check if two entity tags are equivalent in an application always use the `strong_eq` or -/// `weak_eq` methods based on the context of the Tag. Only use `==` to check if two tags are -/// identical. -/// -/// The example below shows the results for a set of entity-tag pairs and -/// both the weak and strong comparison function results: -/// -/// | ETag 1 | ETag 2 | Strong Comparison | Weak Comparison | -/// |---------|---------|-------------------|-----------------| -/// | `W/"1"` | `W/"1"` | no match | match | -/// | `W/"1"` | `W/"2"` | no match | no match | -/// | `W/"1"` | `"1"` | no match | match | -/// | `"1"` | `"1"` | match | match | -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct EntityTag { - /// Weakness indicator for the tag - pub weak: bool, - /// The opaque string in between the DQUOTEs - tag: String -} - -impl EntityTag { - /// Constructs a new EntityTag. - /// # Panics - /// If the tag contains invalid characters. - pub fn new(weak: bool, tag: String) -> EntityTag { - assert!(check_slice_validity(&tag), "Invalid tag: {:?}", tag); - EntityTag { weak: weak, tag: tag } - } - - /// Constructs a new weak EntityTag. - /// # Panics - /// If the tag contains invalid characters. - pub fn weak(tag: String) -> EntityTag { - EntityTag::new(true, tag) - } - - /// Constructs a new strong EntityTag. - /// # Panics - /// If the tag contains invalid characters. - pub fn strong(tag: String) -> EntityTag { - EntityTag::new(false, tag) - } - - /// Get the tag. - pub fn tag(&self) -> &str { - self.tag.as_ref() - } - - /// Set the tag. - /// # Panics - /// If the tag contains invalid characters. - pub fn set_tag(&mut self, tag: String) { - assert!(check_slice_validity(&tag), "Invalid tag: {:?}", tag); - self.tag = tag - } - - /// For strong comparison two entity-tags are equivalent if both are not weak and their - /// opaque-tags match character-by-character. - pub fn strong_eq(&self, other: &EntityTag) -> bool { - !self.weak && !other.weak && self.tag == other.tag - } - - /// For weak comparison two entity-tags are equivalent if their - /// opaque-tags match character-by-character, regardless of either or - /// both being tagged as "weak". - pub fn weak_eq(&self, other: &EntityTag) -> bool { - self.tag == other.tag - } - - /// The inverse of `EntityTag.strong_eq()`. - pub fn strong_ne(&self, other: &EntityTag) -> bool { - !self.strong_eq(other) - } - - /// The inverse of `EntityTag.weak_eq()`. - pub fn weak_ne(&self, other: &EntityTag) -> bool { - !self.weak_eq(other) - } -} - -impl Display for EntityTag { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.weak { - write!(f, "W/\"{}\"", self.tag) - } else { - write!(f, "\"{}\"", self.tag) - } - } -} - -impl FromStr for EntityTag { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - let length: usize = s.len(); - let slice = &s[..]; - // Early exits if it doesn't terminate in a DQUOTE. - if !slice.ends_with('"') { - return Err(::Error::Header); - } - // The etag is weak if its first char is not a DQUOTE. - if slice.starts_with('"') && check_slice_validity(&slice[1..length-1]) { - // No need to check if the last char is a DQUOTE, - // we already did that above. - return Ok(EntityTag { weak: false, tag: slice[1..length-1].to_owned() }); - } else if slice.starts_with("W/\"") && check_slice_validity(&slice[3..length-1]) { - return Ok(EntityTag { weak: true, tag: slice[3..length-1].to_owned() }); - } - Err(::Error::Header) - } -} - -#[cfg(test)] -mod tests { - use super::EntityTag; - - #[test] - fn test_etag_parse_success() { - // Expected success - assert_eq!("\"foobar\"".parse::().unwrap(), - EntityTag::strong("foobar".to_owned())); - assert_eq!("\"\"".parse::().unwrap(), - EntityTag::strong("".to_owned())); - assert_eq!("W/\"weaktag\"".parse::().unwrap(), - EntityTag::weak("weaktag".to_owned())); - assert_eq!("W/\"\x65\x62\"".parse::().unwrap(), - EntityTag::weak("\x65\x62".to_owned())); - assert_eq!("W/\"\"".parse::().unwrap(), EntityTag::weak("".to_owned())); - } - - #[test] - fn test_etag_parse_failures() { - // Expected failures - assert!("no-dquotes".parse::().is_err()); - assert!("w/\"the-first-w-is-case-sensitive\"".parse::().is_err()); - assert!("".parse::().is_err()); - assert!("\"unmatched-dquotes1".parse::().is_err()); - assert!("unmatched-dquotes2\"".parse::().is_err()); - assert!("matched-\"dquotes\"".parse::().is_err()); - } - - #[test] - fn test_etag_fmt() { - assert_eq!(format!("{}", EntityTag::strong("foobar".to_owned())), "\"foobar\""); - assert_eq!(format!("{}", EntityTag::strong("".to_owned())), "\"\""); - assert_eq!(format!("{}", EntityTag::weak("weak-etag".to_owned())), "W/\"weak-etag\""); - assert_eq!(format!("{}", EntityTag::weak("\u{0065}".to_owned())), "W/\"\x65\""); - assert_eq!(format!("{}", EntityTag::weak("".to_owned())), "W/\"\""); - } - - #[test] - fn test_cmp() { - // | ETag 1 | ETag 2 | Strong Comparison | Weak Comparison | - // |---------|---------|-------------------|-----------------| - // | `W/"1"` | `W/"1"` | no match | match | - // | `W/"1"` | `W/"2"` | no match | no match | - // | `W/"1"` | `"1"` | no match | match | - // | `"1"` | `"1"` | match | match | - let mut etag1 = EntityTag::weak("1".to_owned()); - let mut etag2 = EntityTag::weak("1".to_owned()); - assert!(!etag1.strong_eq(&etag2)); - assert!(etag1.weak_eq(&etag2)); - assert!(etag1.strong_ne(&etag2)); - assert!(!etag1.weak_ne(&etag2)); - - etag1 = EntityTag::weak("1".to_owned()); - etag2 = EntityTag::weak("2".to_owned()); - assert!(!etag1.strong_eq(&etag2)); - assert!(!etag1.weak_eq(&etag2)); - assert!(etag1.strong_ne(&etag2)); - assert!(etag1.weak_ne(&etag2)); - - etag1 = EntityTag::weak("1".to_owned()); - etag2 = EntityTag::strong("1".to_owned()); - assert!(!etag1.strong_eq(&etag2)); - assert!(etag1.weak_eq(&etag2)); - assert!(etag1.strong_ne(&etag2)); - assert!(!etag1.weak_ne(&etag2)); - - etag1 = EntityTag::strong("1".to_owned()); - etag2 = EntityTag::strong("1".to_owned()); - assert!(etag1.strong_eq(&etag2)); - assert!(etag1.weak_eq(&etag2)); - assert!(!etag1.strong_ne(&etag2)); - assert!(!etag1.weak_ne(&etag2)); - } -} diff --git a/third_party/rust/hyper/src/header/shared/httpdate.rs b/third_party/rust/hyper/src/header/shared/httpdate.rs deleted file mode 100644 index 1a2dee813f00..000000000000 --- a/third_party/rust/hyper/src/header/shared/httpdate.rs +++ /dev/null @@ -1,91 +0,0 @@ -use std::str::FromStr; -use std::fmt::{self, Display}; - -use time; - -/// A `time::Time` with HTTP formatting and parsing -/// -// Prior to 1995, there were three different formats commonly used by -// servers to communicate timestamps. For compatibility with old -// implementations, all three are defined here. The preferred format is -// a fixed-length and single-zone subset of the date and time -// specification used by the Internet Message Format [RFC5322]. -// -// HTTP-date = IMF-fixdate / obs-date -// -// An example of the preferred format is -// -// Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate -// -// Examples of the two obsolete formats are -// -// Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format -// Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format -// -// A recipient that parses a timestamp value in an HTTP header field -// MUST accept all three HTTP-date formats. When a sender generates a -// header field that contains one or more timestamps defined as -// HTTP-date, the sender MUST generate those timestamps in the -// IMF-fixdate format. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct HttpDate(pub time::Tm); - -impl FromStr for HttpDate { - type Err = ::Error; - fn from_str(s: &str) -> ::Result { - match time::strptime(s, "%a, %d %b %Y %T %Z").or_else(|_| { - time::strptime(s, "%A, %d-%b-%y %T %Z") - }).or_else(|_| { - time::strptime(s, "%c") - }) { - Ok(t) => Ok(HttpDate(t)), - Err(_) => Err(::Error::Header), - } - } -} - -impl Display for HttpDate { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0.to_utc().rfc822(), f) - } -} - -#[cfg(test)] -mod tests { - use time::Tm; - use super::HttpDate; - - const NOV_07: HttpDate = HttpDate(Tm { - tm_nsec: 0, - tm_sec: 37, - tm_min: 48, - tm_hour: 8, - tm_mday: 7, - tm_mon: 10, - tm_year: 94, - tm_wday: 0, - tm_isdst: 0, - tm_yday: 0, - tm_utcoff: 0, - }); - - #[test] - fn test_imf_fixdate() { - assert_eq!("Sun, 07 Nov 1994 08:48:37 GMT".parse::().unwrap(), NOV_07); - } - - #[test] - fn test_rfc_850() { - assert_eq!("Sunday, 07-Nov-94 08:48:37 GMT".parse::().unwrap(), NOV_07); - } - - #[test] - fn test_asctime() { - assert_eq!("Sun Nov 7 08:48:37 1994".parse::().unwrap(), NOV_07); - } - - #[test] - fn test_no_date() { - assert!("this-is-no-date".parse::().is_err()); - } -} diff --git a/third_party/rust/hyper/src/header/shared/mod.rs b/third_party/rust/hyper/src/header/shared/mod.rs deleted file mode 100644 index d2c2355bc0cd..000000000000 --- a/third_party/rust/hyper/src/header/shared/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub use self::charset::Charset; -pub use self::encoding::Encoding; -pub use self::entity::EntityTag; -pub use self::httpdate::HttpDate; -pub use self::quality_item::{Quality, QualityItem, qitem, q}; - -mod charset; -mod encoding; -mod entity; -mod httpdate; -mod quality_item; diff --git a/third_party/rust/hyper/src/header/shared/quality_item.rs b/third_party/rust/hyper/src/header/shared/quality_item.rs deleted file mode 100644 index 70089e4d8bb3..000000000000 --- a/third_party/rust/hyper/src/header/shared/quality_item.rs +++ /dev/null @@ -1,215 +0,0 @@ -use std::cmp; -use std::default::Default; -use std::fmt; -use std::str; - -/// Represents a quality used in quality values. -/// -/// Can be created with the `q` function. -/// -/// # Implementation notes -/// -/// The quality value is defined as a number between 0 and 1 with three decimal places. This means -/// there are 1000 possible values. Since floating point numbers are not exact and the smallest -/// floating point data type (`f32`) consumes four bytes, hyper uses an `u16` value to store the -/// quality internally. For performance reasons you may set quality directly to a value between -/// 0 and 1000 e.g. `Quality(532)` matches the quality `q=0.532`. -/// -/// [RFC7231 Section 5.3.1](https://tools.ietf.org/html/rfc7231#section-5.3.1) -/// gives more information on quality values in HTTP header fields. -#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub struct Quality(pub u16); - -impl fmt::Display for Quality { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.0 { - 1000 => Ok(()), - 0 => f.write_str("; q=0"), - x => write!(f, "; q=0.{}", format!("{:03}", x).trim_right_matches('0')) - } - } -} - -impl Default for Quality { - fn default() -> Quality { - Quality(1000) - } -} - -/// Represents an item with a quality value as defined in -/// [RFC7231](https://tools.ietf.org/html/rfc7231#section-5.3.1). -#[derive(Clone, PartialEq, Debug)] -pub struct QualityItem { - /// The actual contents of the field. - pub item: T, - /// The quality (client or server preference) for the value. - pub quality: Quality, -} - -impl QualityItem { - /// Creates a new `QualityItem` from an item and a quality. - /// The item can be of any type. - /// The quality should be a value in the range [0, 1]. - pub fn new(item: T, quality: Quality) -> QualityItem { - QualityItem { - item: item, - quality: quality - } - } -} - -impl cmp::PartialOrd for QualityItem { - fn partial_cmp(&self, other: &QualityItem) -> Option { - self.quality.partial_cmp(&other.quality) - } -} - -impl fmt::Display for QualityItem { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}{}", self.item, format!("{}", self.quality)) - } -} - -impl str::FromStr for QualityItem { - type Err = ::Error; - fn from_str(s: &str) -> ::Result> { - // Set defaults used if parsing fails. - let mut raw_item = s; - let mut quality = 1f32; - - let parts: Vec<&str> = s.rsplitn(2, ';').map(|x| x.trim()).collect(); - if parts.len() == 2 { - let start = &parts[0][0..2]; - if start == "q=" || start == "Q=" { - let q_part = &parts[0][2..parts[0].len()]; - if q_part.len() > 5 { - return Err(::Error::Header); - } - match q_part.parse::() { - Ok(q_value) => { - if 0f32 <= q_value && q_value <= 1f32 { - quality = q_value; - raw_item = parts[1]; - } else { - return Err(::Error::Header); - } - }, - Err(_) => return Err(::Error::Header), - } - } - } - match raw_item.parse::() { - // we already checked above that the quality is within range - Ok(item) => Ok(QualityItem::new(item, from_f32(quality))), - Err(_) => Err(::Error::Header), - } - } -} - -fn from_f32(f: f32) -> Quality { - // this function is only used internally. A check that `f` is within range - // should be done before calling this method. Just in case, this - // debug_assert should catch if we were forgetful - debug_assert!(f >= 0f32 && f <= 1f32, "q value must be between 0.0 and 1.0"); - Quality((f * 1000f32) as u16) -} - -/// Convinience function to wrap a value in a `QualityItem` -/// Sets `q` to the default 1.0 -pub fn qitem(item: T) -> QualityItem { - QualityItem::new(item, Default::default()) -} - -/// Convenience function to create a `Quality` from a float. -pub fn q(f: f32) -> Quality { - assert!(f >= 0f32 && f <= 1f32, "q value must be between 0.0 and 1.0"); - from_f32(f) -} - -#[cfg(test)] -mod tests { - use super::*; - use super::super::encoding::*; - - #[test] - fn test_quality_item_show1() { - let x = qitem(Chunked); - assert_eq!(format!("{}", x), "chunked"); - } - #[test] - fn test_quality_item_show2() { - let x = QualityItem::new(Chunked, Quality(1)); - assert_eq!(format!("{}", x), "chunked; q=0.001"); - } - #[test] - fn test_quality_item_show3() { - // Custom value - let x = QualityItem{ - item: EncodingExt("identity".to_owned()), - quality: Quality(500), - }; - assert_eq!(format!("{}", x), "identity; q=0.5"); - } - - #[test] - fn test_quality_item_from_str1() { - let x: ::Result> = "chunked".parse(); - assert_eq!(x.unwrap(), QualityItem{ item: Chunked, quality: Quality(1000), }); - } - #[test] - fn test_quality_item_from_str2() { - let x: ::Result> = "chunked; q=1".parse(); - assert_eq!(x.unwrap(), QualityItem{ item: Chunked, quality: Quality(1000), }); - } - #[test] - fn test_quality_item_from_str3() { - let x: ::Result> = "gzip; q=0.5".parse(); - assert_eq!(x.unwrap(), QualityItem{ item: Gzip, quality: Quality(500), }); - } - #[test] - fn test_quality_item_from_str4() { - let x: ::Result> = "gzip; q=0.273".parse(); - assert_eq!(x.unwrap(), QualityItem{ item: Gzip, quality: Quality(273), }); - } - #[test] - fn test_quality_item_from_str5() { - let x: ::Result> = "gzip; q=0.2739999".parse(); - assert!(x.is_err()); - } - #[test] - fn test_quality_item_from_str6() { - let x: ::Result> = "gzip; q=2".parse(); - assert!(x.is_err()); - } - #[test] - fn test_quality_item_ordering() { - let x: QualityItem = "gzip; q=0.5".parse().ok().unwrap(); - let y: QualityItem = "gzip; q=0.273".parse().ok().unwrap(); - let comparision_result: bool = x.gt(&y); - assert!(comparision_result) - } - - #[test] - fn test_quality() { - assert_eq!(q(0.5), Quality(500)); - } - - #[test] - fn test_quality2() { - assert_eq!(format!("{}", q(0.0)), "; q=0"); - } - - #[test] - #[should_panic] // FIXME - 32-bit msvc unwinding broken - #[cfg_attr(all(target_arch="x86", target_env="msvc"), ignore)] - fn test_quality_invalid() { - q(-1.0); - } - - #[test] - #[should_panic] // FIXME - 32-bit msvc unwinding broken - #[cfg_attr(all(target_arch="x86", target_env="msvc"), ignore)] - fn test_quality_invalid2() { - q(2.0); - } -} diff --git a/third_party/rust/hyper/src/headers.rs b/third_party/rust/hyper/src/headers.rs new file mode 100644 index 000000000000..32131e022e3d --- /dev/null +++ b/third_party/rust/hyper/src/headers.rs @@ -0,0 +1,125 @@ +use bytes::BytesMut; +use http::HeaderMap; +use http::header::{CONTENT_LENGTH, TRANSFER_ENCODING}; +use http::header::{HeaderValue, OccupiedEntry, ValueIter}; + +pub fn connection_keep_alive(value: &HeaderValue) -> bool { + connection_has(value, "keep-alive") +} + +pub fn connection_close(value: &HeaderValue) -> bool { + connection_has(value, "close") +} + +fn connection_has(value: &HeaderValue, needle: &str) -> bool { + if let Ok(s) = value.to_str() { + for val in s.split(',') { + if eq_ascii(val.trim(), needle) { + return true; + } + } + } + false +} + +pub fn content_length_parse(value: &HeaderValue) -> Option { + value + .to_str() + .ok() + .and_then(|s| s.parse().ok()) +} + +pub fn content_length_parse_all(headers: &HeaderMap) -> Option { + content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) +} + +pub fn content_length_parse_all_values(values: ValueIter) -> Option { + // If multiple Content-Length headers were sent, everything can still + // be alright if they all contain the same value, and all parse + // correctly. If not, then it's an error. + + let folded = values + .fold(None, |prev, line| match prev { + Some(Ok(prev)) => { + Some(line + .to_str() + .map_err(|_| ()) + .and_then(|s| s.parse().map_err(|_| ())) + .and_then(|n| if prev == n { Ok(n) } else { Err(()) })) + }, + None => { + Some(line + .to_str() + .map_err(|_| ()) + .and_then(|s| s.parse().map_err(|_| ()))) + }, + Some(Err(())) => Some(Err(())), + }); + + if let Some(Ok(n)) = folded { + Some(n) + } else { + None + } +} + +pub fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { + headers + .entry(CONTENT_LENGTH) + .unwrap() + .or_insert_with(|| HeaderValue::from(len)); +} + +pub fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { + is_chunked(headers.get_all(TRANSFER_ENCODING).into_iter()) +} + +pub fn is_chunked(mut encodings: ValueIter) -> bool { + // chunked must always be the last encoding, according to spec + if let Some(line) = encodings.next_back() { + return is_chunked_(line); + } + + false +} + +pub fn is_chunked_(value: &HeaderValue) -> bool { + // chunked must always be the last encoding, according to spec + if let Ok(s) = value.to_str() { + if let Some(encoding) = s.rsplit(',').next() { + return eq_ascii(encoding.trim(), "chunked"); + } + } + + false +} + +pub fn add_chunked(mut entry: OccupiedEntry) { + const CHUNKED: &'static str = "chunked"; + + if let Some(line) = entry.iter_mut().next_back() { + // + 2 for ", " + let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; + let mut buf = BytesMut::with_capacity(new_cap); + buf.copy_from_slice(line.as_bytes()); + buf.copy_from_slice(b", "); + buf.copy_from_slice(CHUNKED.as_bytes()); + + *line = HeaderValue::from_shared(buf.freeze()) + .expect("original header value plus ascii is valid"); + return; + } + + entry.insert(HeaderValue::from_static(CHUNKED)); +} + +fn eq_ascii(left: &str, right: &str) -> bool { + // As of Rust 1.23, str gained this method inherently, and so the + // compiler says this trait is unused. + // + // TODO: Once our minimum Rust compiler version is >=1.23, this can be removed. + #[allow(unused, deprecated)] + use std::ascii::AsciiExt; + + left.eq_ignore_ascii_case(right) +} diff --git a/third_party/rust/hyper/src/http/h1.rs b/third_party/rust/hyper/src/http/h1.rs deleted file mode 100644 index c725064dd4eb..000000000000 --- a/third_party/rust/hyper/src/http/h1.rs +++ /dev/null @@ -1,1179 +0,0 @@ -//! Adapts the HTTP/1.1 implementation into the `HttpMessage` API. -use std::borrow::Cow; -use std::cmp::min; -use std::fmt; -use std::io::{self, Write, BufWriter, BufRead, Read}; -use std::net::Shutdown; -use std::time::Duration; - -use httparse; -use url::Position as UrlPosition; - -use buffer::BufReader; -use Error; -use header::{Headers, ContentLength, TransferEncoding}; -use header::Encoding::Chunked; -use method::{Method}; -use net::{NetworkConnector, NetworkStream}; -use status::StatusCode; -use version::HttpVersion; -use version::HttpVersion::{Http10, Http11}; -use uri::RequestUri; - -use self::HttpReader::{SizedReader, ChunkedReader, EofReader, EmptyReader}; -use self::HttpWriter::{ChunkedWriter, SizedWriter, EmptyWriter, ThroughWriter}; - -use http::{ - RawStatus, - Protocol, - HttpMessage, - RequestHead, - ResponseHead, -}; -use header; -use version; - -const MAX_INVALID_RESPONSE_BYTES: usize = 1024 * 128; - -#[derive(Debug)] -struct Wrapper { - obj: Option, -} - -impl Wrapper { - pub fn new(obj: T) -> Wrapper { - Wrapper { obj: Some(obj) } - } - - pub fn map_in_place(&mut self, f: F) where F: FnOnce(T) -> T { - let obj = self.obj.take().unwrap(); - let res = f(obj); - self.obj = Some(res); - } - - pub fn into_inner(self) -> T { self.obj.unwrap() } - pub fn as_mut(&mut self) -> &mut T { self.obj.as_mut().unwrap() } - pub fn as_ref(&self) -> &T { self.obj.as_ref().unwrap() } -} - -#[derive(Debug)] -enum Stream { - Idle(Box), - Writing(HttpWriter>>), - Reading(HttpReader>>), -} - -impl Stream { - fn writer_mut(&mut self) -> Option<&mut HttpWriter>>> { - match *self { - Stream::Writing(ref mut writer) => Some(writer), - _ => None, - } - } - fn reader_mut(&mut self) -> Option<&mut HttpReader>>> { - match *self { - Stream::Reading(ref mut reader) => Some(reader), - _ => None, - } - } - fn reader_ref(&self) -> Option<&HttpReader>>> { - match *self { - Stream::Reading(ref reader) => Some(reader), - _ => None, - } - } - - fn new(stream: Box) -> Stream { - Stream::Idle(stream) - } -} - -/// An implementation of the `HttpMessage` trait for HTTP/1.1. -#[derive(Debug)] -pub struct Http11Message { - is_proxied: bool, - method: Option, - stream: Wrapper, -} - -impl Write for Http11Message { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - match self.stream.as_mut().writer_mut() { - None => Err(io::Error::new(io::ErrorKind::Other, - "Not in a writable state")), - Some(ref mut writer) => writer.write(buf), - } - } - #[inline] - fn flush(&mut self) -> io::Result<()> { - match self.stream.as_mut().writer_mut() { - None => Err(io::Error::new(io::ErrorKind::Other, - "Not in a writable state")), - Some(ref mut writer) => writer.flush(), - } - } -} - -impl Read for Http11Message { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match self.stream.as_mut().reader_mut() { - None => Err(io::Error::new(io::ErrorKind::Other, - "Not in a readable state")), - Some(ref mut reader) => reader.read(buf), - } - } -} - -impl HttpMessage for Http11Message { - fn set_outgoing(&mut self, mut head: RequestHead) -> ::Result { - let mut res = Err(Error::from(io::Error::new( - io::ErrorKind::Other, - ""))); - let mut method = None; - let is_proxied = self.is_proxied; - self.stream.map_in_place(|stream: Stream| -> Stream { - let stream = match stream { - Stream::Idle(stream) => stream, - _ => { - res = Err(Error::from(io::Error::new( - io::ErrorKind::Other, - "Message not idle, cannot start new outgoing"))); - return stream; - }, - }; - let mut stream = BufWriter::new(stream); - - { - let uri = if is_proxied { - head.url.as_ref() - } else { - &head.url[UrlPosition::BeforePath..UrlPosition::AfterQuery] - }; - - let version = version::HttpVersion::Http11; - debug!("request line: {:?} {:?} {:?}", head.method, uri, version); - match write!(&mut stream, "{} {} {}{}", - head.method, uri, version, LINE_ENDING) { - Err(e) => { - res = Err(From::from(e)); - // TODO What should we do if the BufWriter doesn't wanna - // relinquish the stream? - return Stream::Idle(stream.into_inner().ok().unwrap()); - }, - Ok(_) => {}, - }; - } - - let stream = { - let write_headers = |mut stream: BufWriter>, head: &RequestHead| { - debug!("headers={:?}", head.headers); - match write!(&mut stream, "{}{}", head.headers, LINE_ENDING) { - Ok(_) => Ok(stream), - Err(e) => { - Err((e, stream.into_inner().unwrap())) - } - } - }; - match head.method { - Method::Get | Method::Head => { - let writer = match write_headers(stream, &head) { - Ok(w) => w, - Err(e) => { - res = Err(From::from(e.0)); - return Stream::Idle(e.1); - } - }; - EmptyWriter(writer) - }, - _ => { - let mut chunked = true; - let mut len = 0; - - match head.headers.get::() { - Some(cl) => { - chunked = false; - len = **cl; - }, - None => () - }; - - // can't do in match above, thanks borrowck - if chunked { - let encodings = match head.headers.get_mut::() { - Some(encodings) => { - //TODO: check if chunked is already in encodings. use HashSet? - encodings.push(header::Encoding::Chunked); - false - }, - None => true - }; - - if encodings { - head.headers.set( - header::TransferEncoding(vec![header::Encoding::Chunked])) - } - } - - let stream = match write_headers(stream, &head) { - Ok(s) => s, - Err(e) => { - res = Err(From::from(e.0)); - return Stream::Idle(e.1); - }, - }; - - if chunked { - ChunkedWriter(stream) - } else { - SizedWriter(stream, len) - } - } - } - }; - - method = Some(head.method.clone()); - res = Ok(head); - Stream::Writing(stream) - }); - - self.method = method; - res - } - - fn get_incoming(&mut self) -> ::Result { - try!(self.flush_outgoing()); - let method = self.method.take().unwrap_or(Method::Get); - let mut res = Err(From::from( - io::Error::new(io::ErrorKind::Other, - "Read already in progress"))); - self.stream.map_in_place(|stream| { - let stream = match stream { - Stream::Idle(stream) => stream, - _ => { - // The message was already in the reading state... - // TODO Decide what happens in case we try to get a new incoming at that point - res = Err(From::from( - io::Error::new(io::ErrorKind::Other, - "Read already in progress"))); - return stream; - } - }; - - let expected_no_content = stream.previous_response_expected_no_content(); - trace!("previous_response_expected_no_content = {}", expected_no_content); - - let mut stream = BufReader::new(stream); - - let mut invalid_bytes_read = 0; - let head; - loop { - head = match parse_response(&mut stream) { - Ok(head) => head, - Err(::Error::Version) - if expected_no_content && invalid_bytes_read < MAX_INVALID_RESPONSE_BYTES => { - trace!("expected_no_content, found content"); - invalid_bytes_read += 1; - stream.consume(1); - continue; - } - Err(e) => { - res = Err(e); - return Stream::Idle(stream.into_inner()); - } - }; - break; - } - - let raw_status = head.subject; - let headers = head.headers; - - let is_empty = !should_have_response_body(&method, raw_status.0); - stream.get_mut().set_previous_response_expected_no_content(is_empty); - // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 - // 1. HEAD reponses, and Status 1xx, 204, and 304 cannot have a body. - // 2. Status 2xx to a CONNECT cannot have a body. - // 3. Transfer-Encoding: chunked has a chunked body. - // 4. If multiple differing Content-Length headers or invalid, close connection. - // 5. Content-Length header has a sized body. - // 6. Not Client. - // 7. Read till EOF. - let reader = if is_empty { - EmptyReader(stream) - } else { - if let Some(&TransferEncoding(ref codings)) = headers.get() { - if codings.last() == Some(&Chunked) { - ChunkedReader(stream, None) - } else { - trace!("not chuncked. read till eof"); - EofReader(stream) - } - } else if let Some(&ContentLength(len)) = headers.get() { - SizedReader(stream, len) - } else if headers.has::() { - trace!("illegal Content-Length: {:?}", headers.get_raw("Content-Length")); - res = Err(Error::Header); - return Stream::Idle(stream.into_inner()); - } else { - trace!("neither Transfer-Encoding nor Content-Length"); - EofReader(stream) - } - }; - - trace!("Http11Message.reader = {:?}", reader); - - - res = Ok(ResponseHead { - headers: headers, - raw_status: raw_status, - version: head.version, - }); - - Stream::Reading(reader) - }); - res - } - - fn has_body(&self) -> bool { - match self.stream.as_ref().reader_ref() { - Some(&EmptyReader(..)) | - Some(&SizedReader(_, 0)) | - Some(&ChunkedReader(_, Some(0))) => false, - // specifically EofReader is always true - _ => true - } - } - - #[inline] - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.get_ref().set_read_timeout(dur) - } - - #[inline] - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.get_ref().set_write_timeout(dur) - } - - #[inline] - fn close_connection(&mut self) -> ::Result<()> { - try!(self.get_mut().close(Shutdown::Both)); - Ok(()) - } - - #[inline] - fn set_proxied(&mut self, val: bool) { - self.is_proxied = val; - } -} - -impl Http11Message { - /// Consumes the `Http11Message` and returns the underlying `NetworkStream`. - pub fn into_inner(self) -> Box { - match self.stream.into_inner() { - Stream::Idle(stream) => stream, - Stream::Writing(stream) => stream.into_inner().into_inner().unwrap(), - Stream::Reading(stream) => stream.into_inner().into_inner(), - } - } - - /// Gets a borrowed reference to the underlying `NetworkStream`, regardless of the state of the - /// `Http11Message`. - pub fn get_ref(&self) -> &(NetworkStream + Send) { - match *self.stream.as_ref() { - Stream::Idle(ref stream) => &**stream, - Stream::Writing(ref stream) => &**stream.get_ref().get_ref(), - Stream::Reading(ref stream) => &**stream.get_ref().get_ref() - } - } - - /// Gets a mutable reference to the underlying `NetworkStream`, regardless of the state of the - /// `Http11Message`. - pub fn get_mut(&mut self) -> &mut (NetworkStream + Send) { - match *self.stream.as_mut() { - Stream::Idle(ref mut stream) => &mut **stream, - Stream::Writing(ref mut stream) => &mut **stream.get_mut().get_mut(), - Stream::Reading(ref mut stream) => &mut **stream.get_mut().get_mut() - } - } - - /// Creates a new `Http11Message` that will use the given `NetworkStream` for communicating to - /// the peer. - pub fn with_stream(stream: Box) -> Http11Message { - Http11Message { - is_proxied: false, - method: None, - stream: Wrapper::new(Stream::new(stream)), - } - } - - /// Flushes the current outgoing content and moves the stream into the `stream` property. - /// - /// TODO It might be sensible to lift this up to the `HttpMessage` trait itself... - pub fn flush_outgoing(&mut self) -> ::Result<()> { - let mut res = Ok(()); - self.stream.map_in_place(|stream| { - let writer = match stream { - Stream::Writing(writer) => writer, - _ => { - res = Ok(()); - return stream; - }, - }; - // end() already flushes - let raw = match writer.end() { - Ok(buf) => buf.into_inner().unwrap(), - Err(e) => { - res = Err(From::from(e.0)); - return Stream::Writing(e.1); - } - }; - Stream::Idle(raw) - }); - res - } -} - -/// The `Protocol` implementation provides HTTP/1.1 messages. -pub struct Http11Protocol { - connector: Connector, -} - -impl Protocol for Http11Protocol { - fn new_message(&self, host: &str, port: u16, scheme: &str) -> ::Result> { - let stream = try!(self.connector.connect(host, port, scheme)).into(); - - Ok(Box::new(Http11Message::with_stream(stream))) - } -} - -impl Http11Protocol { - /// Creates a new `Http11Protocol` instance that will use the given `NetworkConnector` for - /// establishing HTTP connections. - pub fn with_connector(c: C) -> Http11Protocol - where C: NetworkConnector + Send + Sync + 'static, - S: NetworkStream + Send { - Http11Protocol { - connector: Connector(Box::new(ConnAdapter(c))), - } - } -} - -struct ConnAdapter(C); - -impl + Send + Sync, S: NetworkStream + Send> - NetworkConnector for ConnAdapter { - type Stream = Box; - #[inline] - fn connect(&self, host: &str, port: u16, scheme: &str) - -> ::Result> { - Ok(try!(self.0.connect(host, port, scheme)).into()) - } -} - -struct Connector(Box> + Send + Sync>); - -impl NetworkConnector for Connector { - type Stream = Box; - #[inline] - fn connect(&self, host: &str, port: u16, scheme: &str) - -> ::Result> { - Ok(try!(self.0.connect(host, port, scheme)).into()) - } -} - - -/// Readers to handle different Transfer-Encodings. -/// -/// If a message body does not include a Transfer-Encoding, it *should* -/// include a Content-Length header. -pub enum HttpReader { - /// A Reader used when a Content-Length header is passed with a positive integer. - SizedReader(R, u64), - /// A Reader used when Transfer-Encoding is `chunked`. - ChunkedReader(R, Option), - /// A Reader used for responses that don't indicate a length or chunked. - /// - /// Note: This should only used for `Response`s. It is illegal for a - /// `Request` to be made with both `Content-Length` and - /// `Transfer-Encoding: chunked` missing, as explained from the spec: - /// - /// > If a Transfer-Encoding header field is present in a response and - /// > the chunked transfer coding is not the final encoding, the - /// > message body length is determined by reading the connection until - /// > it is closed by the server. If a Transfer-Encoding header field - /// > is present in a request and the chunked transfer coding is not - /// > the final encoding, the message body length cannot be determined - /// > reliably; the server MUST respond with the 400 (Bad Request) - /// > status code and then close the connection. - EofReader(R), - /// A Reader used for messages that should never have a body. - /// - /// See https://tools.ietf.org/html/rfc7230#section-3.3.3 - EmptyReader(R), -} - -impl HttpReader { - - /// Unwraps this HttpReader and returns the underlying Reader. - pub fn into_inner(self) -> R { - match self { - SizedReader(r, _) => r, - ChunkedReader(r, _) => r, - EofReader(r) => r, - EmptyReader(r) => r, - } - } - - /// Gets a borrowed reference to the underlying Reader. - pub fn get_ref(&self) -> &R { - match *self { - SizedReader(ref r, _) => r, - ChunkedReader(ref r, _) => r, - EofReader(ref r) => r, - EmptyReader(ref r) => r, - } - } - - /// Gets a mutable reference to the underlying Reader. - pub fn get_mut(&mut self) -> &mut R { - match *self { - SizedReader(ref mut r, _) => r, - ChunkedReader(ref mut r, _) => r, - EofReader(ref mut r) => r, - EmptyReader(ref mut r) => r, - } - } -} - -impl fmt::Debug for HttpReader { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - SizedReader(_,rem) => write!(fmt, "SizedReader(remaining={:?})", rem), - ChunkedReader(_, None) => write!(fmt, "ChunkedReader(chunk_remaining=unknown)"), - ChunkedReader(_, Some(rem)) => write!(fmt, "ChunkedReader(chunk_remaining={:?})", rem), - EofReader(_) => write!(fmt, "EofReader"), - EmptyReader(_) => write!(fmt, "EmptyReader"), - } - } -} - -impl Read for HttpReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - if buf.is_empty() { - return Ok(0); - } - match *self { - SizedReader(ref mut body, ref mut remaining) => { - trace!("Sized read, remaining={:?}", remaining); - if *remaining == 0 { - Ok(0) - } else { - let to_read = min(*remaining as usize, buf.len()); - let num = try!(body.read(&mut buf[..to_read])) as u64; - trace!("Sized read: {}", num); - if num > *remaining { - *remaining = 0; - } else if num == 0 { - return Err(io::Error::new(io::ErrorKind::Other, "early eof")); - } else { - *remaining -= num; - } - Ok(num as usize) - } - }, - ChunkedReader(ref mut body, ref mut opt_remaining) => { - let mut rem = match *opt_remaining { - Some(ref rem) => *rem, - // None means we don't know the size of the next chunk - None => try!(read_chunk_size(body)) - }; - trace!("Chunked read, remaining={:?}", rem); - - if rem == 0 { - if opt_remaining.is_none() { - try!(eat(body, LINE_ENDING.as_bytes())); - } - - *opt_remaining = Some(0); - - // chunk of size 0 signals the end of the chunked stream - // if the 0 digit was missing from the stream, it would - // be an InvalidInput error instead. - trace!("end of chunked"); - - return Ok(0) - } - - let to_read = min(rem as usize, buf.len()); - let count = try!(body.read(&mut buf[..to_read])) as u64; - - if count == 0 { - *opt_remaining = Some(0); - return Err(io::Error::new(io::ErrorKind::Other, "early eof")); - } - - rem -= count; - *opt_remaining = if rem > 0 { - Some(rem) - } else { - try!(eat(body, LINE_ENDING.as_bytes())); - None - }; - Ok(count as usize) - }, - EofReader(ref mut body) => { - let r = body.read(buf); - trace!("eofread: {:?}", r); - r - }, - EmptyReader(_) => Ok(0) - } - } -} - -fn eat(rdr: &mut R, bytes: &[u8]) -> io::Result<()> { - let mut buf = [0]; - for &b in bytes.iter() { - match try!(rdr.read(&mut buf)) { - 1 if buf[0] == b => (), - _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, - "Invalid characters found")), - } - } - Ok(()) -} - -/// Chunked chunks start with 1*HEXDIGIT, indicating the size of the chunk. -fn read_chunk_size(rdr: &mut R) -> io::Result { - macro_rules! byte ( - ($rdr:ident) => ({ - let mut buf = [0]; - match try!($rdr.read(&mut buf)) { - 1 => buf[0], - _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, - "Invalid chunk size line")), - - } - }) - ); - let mut size = 0u64; - let radix = 16; - let mut in_ext = false; - let mut in_chunk_size = true; - loop { - match byte!(rdr) { - b@b'0'...b'9' if in_chunk_size => { - size *= radix; - size += (b - b'0') as u64; - }, - b@b'a'...b'f' if in_chunk_size => { - size *= radix; - size += (b + 10 - b'a') as u64; - }, - b@b'A'...b'F' if in_chunk_size => { - size *= radix; - size += (b + 10 - b'A') as u64; - }, - CR => { - match byte!(rdr) { - LF => break, - _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, - "Invalid chunk size line")) - - } - }, - // If we weren't in the extension yet, the ";" signals its start - b';' if !in_ext => { - in_ext = true; - in_chunk_size = false; - }, - // "Linear white space" is ignored between the chunk size and the - // extension separator token (";") due to the "implied *LWS rule". - b'\t' | b' ' if !in_ext & !in_chunk_size => {}, - // LWS can follow the chunk size, but no more digits can come - b'\t' | b' ' if in_chunk_size => in_chunk_size = false, - // We allow any arbitrary octet once we are in the extension, since - // they all get ignored anyway. According to the HTTP spec, valid - // extensions would have a more strict syntax: - // (token ["=" (token | quoted-string)]) - // but we gain nothing by rejecting an otherwise valid chunk size. - ext if in_ext => { - todo!("chunk extension byte={}", ext); - }, - // Finally, if we aren't in the extension and we're reading any - // other octet, the chunk size line is invalid! - _ => { - return Err(io::Error::new(io::ErrorKind::InvalidInput, - "Invalid chunk size line")); - } - } - } - trace!("chunk size={:?}", size); - Ok(size) -} - -fn should_have_response_body(method: &Method, status: u16) -> bool { - trace!("should_have_response_body({:?}, {})", method, status); - match (method, status) { - (&Method::Head, _) | - (_, 100...199) | - (_, 204) | - (_, 304) | - (&Method::Connect, 200...299) => false, - _ => true - } -} - -/// Writers to handle different Transfer-Encodings. -pub enum HttpWriter { - /// A no-op Writer, used initially before Transfer-Encoding is determined. - ThroughWriter(W), - /// A Writer for when Transfer-Encoding includes `chunked`. - ChunkedWriter(W), - /// A Writer for when Content-Length is set. - /// - /// Enforces that the body is not longer than the Content-Length header. - SizedWriter(W, u64), - /// A writer that should not write any body. - EmptyWriter(W), -} - -impl HttpWriter { - /// Unwraps the HttpWriter and returns the underlying Writer. - #[inline] - pub fn into_inner(self) -> W { - match self { - ThroughWriter(w) => w, - ChunkedWriter(w) => w, - SizedWriter(w, _) => w, - EmptyWriter(w) => w, - } - } - - /// Access the inner Writer. - #[inline] - pub fn get_ref(&self) -> &W { - match *self { - ThroughWriter(ref w) => w, - ChunkedWriter(ref w) => w, - SizedWriter(ref w, _) => w, - EmptyWriter(ref w) => w, - } - } - - /// Access the inner Writer mutably. - /// - /// Warning: You should not write to this directly, as you can corrupt - /// the state. - #[inline] - pub fn get_mut(&mut self) -> &mut W { - match *self { - ThroughWriter(ref mut w) => w, - ChunkedWriter(ref mut w) => w, - SizedWriter(ref mut w, _) => w, - EmptyWriter(ref mut w) => w, - } - } - - /// Ends the HttpWriter, and returns the underlying Writer. - /// - /// A final `write_all()` is called with an empty message, and then flushed. - /// The ChunkedWriter variant will use this to write the 0-sized last-chunk. - #[inline] - pub fn end(mut self) -> Result> { - fn inner(w: &mut W) -> io::Result<()> { - try!(w.write(&[])); - w.flush() - } - - match inner(&mut self) { - Ok(..) => Ok(self.into_inner()), - Err(e) => Err(EndError(e, self)) - } - } -} - -#[derive(Debug)] -pub struct EndError(io::Error, HttpWriter); - -impl From> for io::Error { - fn from(e: EndError) -> io::Error { - e.0 - } -} - -impl Write for HttpWriter { - #[inline] - fn write(&mut self, msg: &[u8]) -> io::Result { - match *self { - ThroughWriter(ref mut w) => w.write(msg), - ChunkedWriter(ref mut w) => { - let chunk_size = msg.len(); - trace!("chunked write, size = {:?}", chunk_size); - try!(write!(w, "{:X}{}", chunk_size, LINE_ENDING)); - try!(w.write_all(msg)); - try!(w.write_all(LINE_ENDING.as_bytes())); - Ok(msg.len()) - }, - SizedWriter(ref mut w, ref mut remaining) => { - let len = msg.len() as u64; - if len > *remaining { - let len = *remaining; - *remaining = 0; - try!(w.write_all(&msg[..len as usize])); - Ok(len as usize) - } else { - *remaining -= len; - try!(w.write_all(msg)); - Ok(len as usize) - } - }, - EmptyWriter(..) => { - if !msg.is_empty() { - error!("Cannot include a body with this kind of message"); - } - Ok(0) - } - } - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - match *self { - ThroughWriter(ref mut w) => w.flush(), - ChunkedWriter(ref mut w) => w.flush(), - SizedWriter(ref mut w, _) => w.flush(), - EmptyWriter(ref mut w) => w.flush(), - } - } -} - -impl fmt::Debug for HttpWriter { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - ThroughWriter(_) => write!(fmt, "ThroughWriter"), - ChunkedWriter(_) => write!(fmt, "ChunkedWriter"), - SizedWriter(_, rem) => write!(fmt, "SizedWriter(remaining={:?})", rem), - EmptyWriter(_) => write!(fmt, "EmptyWriter"), - } - } -} - -const MAX_HEADERS: usize = 100; - -/// Parses a request into an Incoming message head. -#[inline] -pub fn parse_request(buf: &mut BufReader) -> ::Result> { - parse::(buf) -} - -/// Parses a response into an Incoming message head. -#[inline] -pub fn parse_response(buf: &mut BufReader) -> ::Result> { - parse::(buf) -} - -fn parse, I>(rdr: &mut BufReader) -> ::Result> { - loop { - match try!(try_parse::(rdr)) { - httparse::Status::Complete((inc, len)) => { - rdr.consume(len); - return Ok(inc); - }, - _partial => () - } - let n = try!(rdr.read_into_buf()); - if n == 0 { - let buffered = rdr.get_buf().len(); - if buffered == ::buffer::MAX_BUFFER_SIZE { - return Err(Error::TooLarge); - } else { - return Err(Error::Io(io::Error::new( - io::ErrorKind::UnexpectedEof, - "end of stream before headers finished" - ))); - } - } - } -} - -fn try_parse, I>(rdr: &mut BufReader) -> TryParseResult { - let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS]; - let buf = rdr.get_buf(); - if buf.len() == 0 { - return Ok(httparse::Status::Partial); - } - trace!("try_parse({:?})", buf); - ::try_parse(&mut headers, buf) -} - -#[doc(hidden)] -trait TryParse { - type Subject; - fn try_parse<'a>(headers: &'a mut [httparse::Header<'a>], buf: &'a [u8]) -> - TryParseResult; -} - -type TryParseResult = Result, usize)>, Error>; - -impl<'a> TryParse for httparse::Request<'a, 'a> { - type Subject = (Method, RequestUri); - - fn try_parse<'b>(headers: &'b mut [httparse::Header<'b>], buf: &'b [u8]) -> - TryParseResult<(Method, RequestUri)> { - trace!("Request.try_parse([Header; {}], [u8; {}])", headers.len(), buf.len()); - let mut req = httparse::Request::new(headers); - Ok(match try!(req.parse(buf)) { - httparse::Status::Complete(len) => { - trace!("Request.try_parse Complete({})", len); - httparse::Status::Complete((Incoming { - version: if req.version.unwrap() == 1 { Http11 } else { Http10 }, - subject: ( - try!(req.method.unwrap().parse()), - try!(req.path.unwrap().parse()) - ), - headers: try!(Headers::from_raw(req.headers)) - }, len)) - }, - httparse::Status::Partial => httparse::Status::Partial - }) - } -} - -impl<'a> TryParse for httparse::Response<'a, 'a> { - type Subject = RawStatus; - - fn try_parse<'b>(headers: &'b mut [httparse::Header<'b>], buf: &'b [u8]) -> - TryParseResult { - trace!("Response.try_parse([Header; {}], [u8; {}])", headers.len(), buf.len()); - let mut res = httparse::Response::new(headers); - Ok(match try!(res.parse(buf)) { - httparse::Status::Complete(len) => { - trace!("Response.try_parse Complete({})", len); - let code = res.code.unwrap(); - let reason = match StatusCode::from_u16(code).canonical_reason() { - Some(reason) if reason == res.reason.unwrap() => Cow::Borrowed(reason), - _ => Cow::Owned(res.reason.unwrap().to_owned()) - }; - httparse::Status::Complete((Incoming { - version: if res.version.unwrap() == 1 { Http11 } else { Http10 }, - subject: RawStatus(code, reason), - headers: try!(Headers::from_raw(res.headers)) - }, len)) - }, - httparse::Status::Partial => httparse::Status::Partial - }) - } -} - -/// An Incoming Message head. Includes request/status line, and headers. -#[derive(Debug)] -pub struct Incoming { - /// HTTP version of the message. - pub version: HttpVersion, - /// Subject (request line or status line) of Incoming message. - pub subject: S, - /// Headers of the Incoming message. - pub headers: Headers -} - -/// The `\r` byte. -pub const CR: u8 = b'\r'; -/// The `\n` byte. -pub const LF: u8 = b'\n'; -/// The bytes `\r\n`. -pub const LINE_ENDING: &'static str = "\r\n"; - -#[cfg(test)] -mod tests { - use std::error::Error; - use std::io::{self, Read, Write}; - - - use buffer::BufReader; - use mock::MockStream; - use http::HttpMessage; - - use super::{read_chunk_size, parse_request, parse_response, Http11Message}; - - #[test] - fn test_write_chunked() { - use std::str::from_utf8; - let mut w = super::HttpWriter::ChunkedWriter(Vec::new()); - w.write_all(b"foo bar").unwrap(); - w.write_all(b"baz quux herp").unwrap(); - let buf = w.end().unwrap(); - let s = from_utf8(buf.as_ref()).unwrap(); - assert_eq!(s, "7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n"); - } - - #[test] - fn test_write_sized() { - use std::str::from_utf8; - let mut w = super::HttpWriter::SizedWriter(Vec::new(), 8); - w.write_all(b"foo bar").unwrap(); - assert_eq!(w.write(b"baz").unwrap(), 1); - - let buf = w.end().unwrap(); - let s = from_utf8(buf.as_ref()).unwrap(); - assert_eq!(s, "foo barb"); - } - - #[test] - fn test_read_chunk_size() { - fn read(s: &str, result: u64) { - assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap(), result); - } - - fn read_err(s: &str) { - assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap_err().kind(), - io::ErrorKind::InvalidInput); - } - - read("1\r\n", 1); - read("01\r\n", 1); - read("0\r\n", 0); - read("00\r\n", 0); - read("A\r\n", 10); - read("a\r\n", 10); - read("Ff\r\n", 255); - read("Ff \r\n", 255); - // Missing LF or CRLF - read_err("F\rF"); - read_err("F"); - // Invalid hex digit - read_err("X\r\n"); - read_err("1X\r\n"); - read_err("-\r\n"); - read_err("-1\r\n"); - // Acceptable (if not fully valid) extensions do not influence the size - read("1;extension\r\n", 1); - read("a;ext name=value\r\n", 10); - read("1;extension;extension2\r\n", 1); - read("1;;; ;\r\n", 1); - read("2; extension...\r\n", 2); - read("3 ; extension=123\r\n", 3); - read("3 ;\r\n", 3); - read("3 ; \r\n", 3); - // Invalid extensions cause an error - read_err("1 invalid extension\r\n"); - read_err("1 A\r\n"); - read_err("1;no CRLF"); - } - - #[test] - fn test_read_sized_early_eof() { - let mut r = super::HttpReader::SizedReader(MockStream::with_input(b"foo bar"), 10); - let mut buf = [0u8; 10]; - assert_eq!(r.read(&mut buf).unwrap(), 7); - let e = r.read(&mut buf).unwrap_err(); - assert_eq!(e.kind(), io::ErrorKind::Other); - assert_eq!(e.description(), "early eof"); - } - - #[test] - fn test_read_chunked_early_eof() { - let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"\ - 9\r\n\ - foo bar\ - "), None); - - let mut buf = [0u8; 10]; - assert_eq!(r.read(&mut buf).unwrap(), 7); - let e = r.read(&mut buf).unwrap_err(); - assert_eq!(e.kind(), io::ErrorKind::Other); - assert_eq!(e.description(), "early eof"); - } - - #[test] - fn test_read_sized_zero_len_buf() { - let mut r = super::HttpReader::SizedReader(MockStream::with_input(b"foo bar"), 7); - let mut buf = [0u8; 0]; - assert_eq!(r.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_read_chunked_zero_len_buf() { - let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"\ - 7\r\n\ - foo bar\ - 0\r\n\r\n\ - "), None); - - let mut buf = [0u8; 0]; - assert_eq!(r.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_read_chunked_fully_consumes() { - let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"0\r\n\r\n"), None); - let mut buf = [0; 1]; - assert_eq!(r.read(&mut buf).unwrap(), 0); - assert_eq!(r.read(&mut buf).unwrap(), 0); - - match r { - super::HttpReader::ChunkedReader(mut r, _) => assert_eq!(r.read(&mut buf).unwrap(), 0), - _ => unreachable!(), - } - } - - #[test] - fn test_message_get_incoming_invalid_content_length() { - let raw = MockStream::with_input( - b"HTTP/1.1 200 OK\r\nContent-Length: asdf\r\n\r\n"); - let mut msg = Http11Message::with_stream(Box::new(raw)); - assert!(msg.get_incoming().is_err()); - assert!(msg.close_connection().is_ok()); - } - - #[test] - fn test_parse_incoming() { - let mut raw = MockStream::with_input(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); - let mut buf = BufReader::new(&mut raw); - parse_request(&mut buf).unwrap(); - } - - #[test] - fn test_parse_raw_status() { - let mut raw = MockStream::with_input(b"HTTP/1.1 200 OK\r\n\r\n"); - let mut buf = BufReader::new(&mut raw); - let res = parse_response(&mut buf).unwrap(); - - assert_eq!(res.subject.1, "OK"); - - let mut raw = MockStream::with_input(b"HTTP/1.1 200 Howdy\r\n\r\n"); - let mut buf = BufReader::new(&mut raw); - let res = parse_response(&mut buf).unwrap(); - - assert_eq!(res.subject.1, "Howdy"); - } - - - #[test] - fn test_parse_tcp_closed() { - use std::io::ErrorKind; - use error::Error; - - let mut empty = MockStream::new(); - let mut buf = BufReader::new(&mut empty); - match parse_request(&mut buf) { - Err(Error::Io(ref e)) if e.kind() == ErrorKind::UnexpectedEof => (), - other => panic!("unexpected result: {:?}", other) - } - } - - #[cfg(feature = "nightly")] - use test::Bencher; - - #[cfg(feature = "nightly")] - #[bench] - fn bench_parse_incoming(b: &mut Bencher) { - let mut raw = MockStream::with_input(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); - let mut buf = BufReader::new(&mut raw); - b.iter(|| { - parse_request(&mut buf).unwrap(); - buf.get_mut().read.set_position(0); - }); - } -} diff --git a/third_party/rust/hyper/src/http/message.rs b/third_party/rust/hyper/src/http/message.rs deleted file mode 100644 index d983fafa0327..000000000000 --- a/third_party/rust/hyper/src/http/message.rs +++ /dev/null @@ -1,133 +0,0 @@ -//! Defines the `HttpMessage` trait that serves to encapsulate the operations of a single -//! request-response cycle on any HTTP connection. - -use std::any::{Any, TypeId}; -use std::fmt::Debug; -use std::io::{Read, Write}; -use std::mem; - -use std::io; -use std::time::Duration; - -use typeable::Typeable; - -use header::Headers; -use http::RawStatus; -use url::Url; - -use method; -use version; -use traitobject; - -/// The trait provides an API for creating new `HttpMessage`s depending on the underlying HTTP -/// protocol. -pub trait Protocol { - /// Creates a fresh `HttpMessage` bound to the given host, based on the given protocol scheme. - fn new_message(&self, host: &str, port: u16, scheme: &str) -> ::Result>; -} - -/// Describes a request. -#[derive(Clone, Debug)] -pub struct RequestHead { - /// The headers of the request - pub headers: Headers, - /// The method of the request - pub method: method::Method, - /// The URL of the request - pub url: Url, -} - -/// Describes a response. -#[derive(Clone, Debug)] -pub struct ResponseHead { - /// The headers of the reponse - pub headers: Headers, - /// The raw status line of the response - pub raw_status: RawStatus, - /// The HTTP/2 version which generated the response - pub version: version::HttpVersion, -} - -/// The trait provides an API for sending an receiving HTTP messages. -pub trait HttpMessage: Write + Read + Send + Any + Typeable + Debug { - /// Initiates a new outgoing request. - /// - /// Only the request's head is provided (in terms of the `RequestHead` struct). - /// - /// After this, the `HttpMessage` instance can be used as an `io::Write` in order to write the - /// body of the request. - fn set_outgoing(&mut self, head: RequestHead) -> ::Result; - /// Obtains the incoming response and returns its head (i.e. the `ResponseHead` struct) - /// - /// After this, the `HttpMessage` instance can be used as an `io::Read` in order to read out - /// the response body. - fn get_incoming(&mut self) -> ::Result; - /// Set the read timeout duration for this message. - fn set_read_timeout(&self, dur: Option) -> io::Result<()>; - /// Set the write timeout duration for this message. - fn set_write_timeout(&self, dur: Option) -> io::Result<()>; - /// Closes the underlying HTTP connection. - fn close_connection(&mut self) -> ::Result<()>; - /// Returns whether the incoming message has a body. - fn has_body(&self) -> bool; - /// Called when the Client wishes to use a Proxy. - fn set_proxied(&mut self, val: bool) { - // default implementation so as to not be a breaking change. - warn!("default set_proxied({:?})", val); - } -} - -impl HttpMessage { - unsafe fn downcast_ref_unchecked(&self) -> &T { - mem::transmute(traitobject::data(self)) - } - - unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { - mem::transmute(traitobject::data_mut(self)) - } - - unsafe fn downcast_unchecked(self: Box) -> Box { - let raw: *mut HttpMessage = mem::transmute(self); - mem::transmute(traitobject::data_mut(raw)) - } -} - -impl HttpMessage { - /// Is the underlying type in this trait object a T? - #[inline] - pub fn is(&self) -> bool { - (*self).get_type() == TypeId::of::() - } - - /// If the underlying type is T, get a reference to the contained data. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - if self.is::() { - Some(unsafe { self.downcast_ref_unchecked() }) - } else { - None - } - } - - /// If the underlying type is T, get a mutable reference to the contained - /// data. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - if self.is::() { - Some(unsafe { self.downcast_mut_unchecked() }) - } else { - None - } - } - - /// If the underlying type is T, extract it. - #[inline] - pub fn downcast(self: Box) - -> Result, Box> { - if self.is::() { - Ok(unsafe { self.downcast_unchecked() }) - } else { - Err(self) - } - } -} diff --git a/third_party/rust/hyper/src/http/mod.rs b/third_party/rust/hyper/src/http/mod.rs deleted file mode 100644 index b4c97ec024be..000000000000 --- a/third_party/rust/hyper/src/http/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -//! Pieces pertaining to the HTTP message protocol. -use std::borrow::Cow; - -use header::Connection; -use header::ConnectionOption::{KeepAlive, Close}; -use header::Headers; -use version::HttpVersion; -use version::HttpVersion::{Http10, Http11}; - - -pub use self::message::{HttpMessage, RequestHead, ResponseHead, Protocol}; - -pub mod h1; -pub mod message; - -/// The raw status code and reason-phrase. -#[derive(Clone, PartialEq, Debug)] -pub struct RawStatus(pub u16, pub Cow<'static, str>); - -/// Checks if a connection should be kept alive. -#[inline] -pub fn should_keep_alive(version: HttpVersion, headers: &Headers) -> bool { - trace!("should_keep_alive( {:?}, {:?} )", version, headers.get::()); - match (version, headers.get::()) { - (Http10, None) => false, - (Http10, Some(conn)) if !conn.contains(&KeepAlive) => false, - (Http11, Some(conn)) if conn.contains(&Close) => false, - _ => true - } -} - -#[test] -fn test_should_keep_alive() { - let mut headers = Headers::new(); - - assert!(!should_keep_alive(Http10, &headers)); - assert!(should_keep_alive(Http11, &headers)); - - headers.set(Connection::close()); - assert!(!should_keep_alive(Http10, &headers)); - assert!(!should_keep_alive(Http11, &headers)); - - headers.set(Connection::keep_alive()); - assert!(should_keep_alive(Http10, &headers)); - assert!(should_keep_alive(Http11, &headers)); -} diff --git a/third_party/rust/hyper/src/lib.rs b/third_party/rust/hyper/src/lib.rs index bf51892a9de0..c757dad2fde0 100644 --- a/third_party/rust/hyper/src/lib.rs +++ b/third_party/rust/hyper/src/lib.rs @@ -1,202 +1,66 @@ -#![doc(html_root_url = "https://docs.rs/hyper/v0.10.13")] -#![cfg_attr(test, deny(missing_docs))] -#![cfg_attr(test, deny(warnings))] +#![doc(html_root_url = "https://docs.rs/hyper/0.12.7")] +#![deny(missing_docs)] +#![deny(warnings)] +#![deny(missing_debug_implementations)] #![cfg_attr(all(test, feature = "nightly"), feature(test))] -//! # Hyper +//! # hyper //! -//! Hyper is a fast, modern HTTP implementation written in and for Rust. It -//! is a low-level typesafe abstraction over raw HTTP, providing an elegant -//! layer over "stringly-typed" HTTP. +//! hyper is a **fast** and **correct** HTTP implementation written in and for Rust. //! -//! Hyper offers both a [Client](client/index.html) and a -//! [Server](server/index.html) which can be used to drive complex web -//! applications written entirely in Rust. -//! -//! ## Internal Design -//! -//! Hyper is designed as a relatively low-level wrapper over raw HTTP. It should -//! allow the implementation of higher-level abstractions with as little pain as -//! possible, and should not irrevocably hide any information from its users. -//! -//! ### Common Functionality -//! -//! Functionality and code shared between the Server and Client implementations -//! can be found in `src` directly - this includes `NetworkStream`s, `Method`s, -//! `StatusCode`, and so on. -//! -//! #### Methods -//! -//! Methods are represented as a single `enum` to remain as simple as possible. -//! Extension Methods are represented as raw `String`s. A method's safety and -//! idempotence can be accessed using the `safe` and `idempotent` methods. -//! -//! #### StatusCode -//! -//! Status codes are also represented as a single, exhaustive, `enum`. This -//! representation is efficient, typesafe, and ergonomic as it allows the use of -//! `match` to disambiguate known status codes. -//! -//! #### Headers -//! -//! Hyper's [header](header/index.html) representation is likely the most -//! complex API exposed by Hyper. -//! -//! Hyper's headers are an abstraction over an internal `HashMap` and provides a -//! typesafe API for interacting with headers that does not rely on the use of -//! "string-typing." -//! -//! Each HTTP header in Hyper has an associated type and implementation of the -//! `Header` trait, which defines an HTTP headers name as a string, how to parse -//! that header, and how to format that header. -//! -//! Headers are then parsed from the string representation lazily when the typed -//! representation of a header is requested and formatted back into their string -//! representation when headers are written back to the client. -//! -//! #### NetworkStream and NetworkAcceptor -//! -//! These are found in `src/net.rs` and define the interface that acceptors and -//! streams must fulfill for them to be used within Hyper. They are by and large -//! internal tools and you should only need to mess around with them if you want to -//! mock or replace `TcpStream` and `TcpAcceptor`. -//! -//! ### Server -//! -//! Server-specific functionality, such as `Request` and `Response` -//! representations, are found in in `src/server`. -//! -//! #### Handler + Server -//! -//! A `Handler` in Hyper accepts a `Request` and `Response`. This is where -//! user-code can handle each connection. The server accepts connections in a -//! task pool with a customizable number of threads, and passes the Request / -//! Response to the handler. -//! -//! #### Request -//! -//! An incoming HTTP Request is represented as a struct containing -//! a `Reader` over a `NetworkStream`, which represents the body, headers, a remote -//! address, an HTTP version, and a `Method` - relatively standard stuff. -//! -//! `Request` implements `Reader` itself, meaning that you can ergonomically get -//! the body out of a `Request` using standard `Reader` methods and helpers. -//! -//! #### Response -//! -//! An outgoing HTTP Response is also represented as a struct containing a `Writer` -//! over a `NetworkStream` which represents the Response body in addition to -//! standard items such as the `StatusCode` and HTTP version. `Response`'s `Writer` -//! implementation provides a streaming interface for sending data over to the -//! client. -//! -//! One of the traditional problems with representing outgoing HTTP Responses is -//! tracking the write-status of the Response - have we written the status-line, -//! the headers, the body, etc.? Hyper tracks this information statically using the -//! type system and prevents you, using the type system, from writing headers after -//! you have started writing to the body or vice versa. -//! -//! Hyper does this through a phantom type parameter in the definition of Response, -//! which tracks whether you are allowed to write to the headers or the body. This -//! phantom type can have two values `Fresh` or `Streaming`, with `Fresh` -//! indicating that you can write the headers and `Streaming` indicating that you -//! may write to the body, but not the headers. -//! -//! ### Client -//! -//! Client-specific functionality, such as `Request` and `Response` -//! representations, are found in `src/client`. -//! -//! #### Request -//! -//! An outgoing HTTP Request is represented as a struct containing a `Writer` over -//! a `NetworkStream` which represents the Request body in addition to the standard -//! information such as headers and the request method. -//! -//! Outgoing Requests track their write-status in almost exactly the same way as -//! outgoing HTTP Responses do on the Server, so we will defer to the explanation -//! in the documentation for server Response. -//! -//! Requests expose an efficient streaming interface instead of a builder pattern, -//! but they also provide the needed interface for creating a builder pattern over -//! the API exposed by core Hyper. -//! -//! #### Response -//! -//! Incoming HTTP Responses are represented as a struct containing a `Reader` over -//! a `NetworkStream` and contain headers, a status, and an http version. They -//! implement `Reader` and can be read to get the data out of a `Response`. +//! hyper provides both a [Client](client/index.html) and a +//! [Server](server/index.html). //! +//! If just starting out, **check out the [Guides](https://hyper.rs/guides) +//! first.** -extern crate base64; -extern crate time; -#[macro_use] extern crate url; -extern crate unicase; +extern crate bytes; +#[macro_use] extern crate futures; +#[cfg(feature = "runtime")] extern crate futures_cpupool; +extern crate h2; +extern crate http; extern crate httparse; -extern crate num_cpus; -extern crate traitobject; -extern crate typeable; - -#[cfg_attr(test, macro_use)] -extern crate language_tags; - -#[macro_use] -extern crate mime as mime_crate; - -#[macro_use] -extern crate log; +extern crate iovec; +extern crate itoa; +#[macro_use] extern crate log; +#[cfg(feature = "runtime")] extern crate net2; +extern crate time; +#[cfg(feature = "runtime")] extern crate tokio; +#[cfg(feature = "runtime")] extern crate tokio_executor; +#[macro_use] extern crate tokio_io; +#[cfg(feature = "runtime")] extern crate tokio_reactor; +#[cfg(feature = "runtime")] extern crate tokio_tcp; +#[cfg(feature = "runtime")] extern crate tokio_timer; +extern crate want; #[cfg(all(test, feature = "nightly"))] extern crate test; +pub use http::{ + header, + HeaderMap, + Method, + Request, + Response, + StatusCode, + Uri, + Version, +}; -pub use url::Url; pub use client::Client; pub use error::{Result, Error}; -pub use method::Method::{Get, Head, Post, Delete}; -pub use status::StatusCode::{Ok, BadRequest, NotFound}; +pub use body::{Body, Chunk}; pub use server::Server; -pub use language_tags::LanguageTag; - -macro_rules! todo( - ($($arg:tt)*) => (if cfg!(not(ndebug)) { - trace!("TODO: {:?}", format_args!($($arg)*)) - }) -); +mod common; #[cfg(test)] -#[macro_use] mod mock; -#[doc(hidden)] -pub mod buffer; +pub mod body; pub mod client; pub mod error; -pub mod method; -pub mod header; -pub mod http; -pub mod net; +mod headers; +mod proto; pub mod server; -pub mod status; -pub mod uri; -pub mod version; - -/// Re-exporting the mime crate, for convenience. -pub mod mime { - pub use mime_crate::*; -} - - -fn _assert_types() { - fn _assert_send() {} - fn _assert_sync() {} - - _assert_send::(); - _assert_send::>(); - _assert_send::(); - _assert_send::(); - _assert_send::<::client::pool::Pool<::net::DefaultConnector>>(); - - _assert_sync::(); - _assert_sync::(); - _assert_sync::<::client::pool::Pool<::net::DefaultConnector>>(); -} +pub mod service; +#[cfg(feature = "runtime")] pub mod rt; +pub mod upgrade; diff --git a/third_party/rust/hyper/src/method.rs b/third_party/rust/hyper/src/method.rs deleted file mode 100644 index ca7e5da27780..000000000000 --- a/third_party/rust/hyper/src/method.rs +++ /dev/null @@ -1,183 +0,0 @@ -//! The HTTP request method -use std::fmt; -use std::str::FromStr; -use std::convert::AsRef; - -use error::Error; -use self::Method::{Options, Get, Post, Put, Delete, Head, Trace, Connect, Patch, - Extension}; - - -/// The Request Method (VERB) -/// -/// Currently includes 8 variants representing the 8 methods defined in -/// [RFC 7230](https://tools.ietf.org/html/rfc7231#section-4.1), plus PATCH, -/// and an Extension variant for all extensions. -/// -/// It may make sense to grow this to include all variants currently -/// registered with IANA, if they are at all common to use. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum Method { - /// OPTIONS - Options, - /// GET - Get, - /// POST - Post, - /// PUT - Put, - /// DELETE - Delete, - /// HEAD - Head, - /// TRACE - Trace, - /// CONNECT - Connect, - /// PATCH - Patch, - /// Method extensions. An example would be `let m = Extension("FOO".to_string())`. - Extension(String) -} - -impl AsRef for Method { - fn as_ref(&self) -> &str { - match *self { - Options => "OPTIONS", - Get => "GET", - Post => "POST", - Put => "PUT", - Delete => "DELETE", - Head => "HEAD", - Trace => "TRACE", - Connect => "CONNECT", - Patch => "PATCH", - Extension(ref s) => s.as_ref() - } - } -} - -impl Method { - /// Whether a method is considered "safe", meaning the request is - /// essentially read-only. - /// - /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) - /// for more words. - pub fn safe(&self) -> bool { - match *self { - Get | Head | Options | Trace => true, - _ => false - } - } - - /// Whether a method is considered "idempotent", meaning the request has - /// the same result is executed multiple times. - /// - /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for - /// more words. - pub fn idempotent(&self) -> bool { - if self.safe() { - true - } else { - match *self { - Put | Delete => true, - _ => false - } - } - } -} - -impl FromStr for Method { - type Err = Error; - fn from_str(s: &str) -> Result { - if s == "" { - Err(Error::Method) - } else { - Ok(match s { - "OPTIONS" => Options, - "GET" => Get, - "POST" => Post, - "PUT" => Put, - "DELETE" => Delete, - "HEAD" => Head, - "TRACE" => Trace, - "CONNECT" => Connect, - "PATCH" => Patch, - _ => Extension(s.to_owned()) - }) - } - } -} - -impl fmt::Display for Method { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(match *self { - Options => "OPTIONS", - Get => "GET", - Post => "POST", - Put => "PUT", - Delete => "DELETE", - Head => "HEAD", - Trace => "TRACE", - Connect => "CONNECT", - Patch => "PATCH", - Extension(ref s) => s.as_ref() - }) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::str::FromStr; - use error::Error; - use super::Method; - use super::Method::{Get, Post, Put, Extension}; - - #[test] - fn test_safe() { - assert_eq!(true, Get.safe()); - assert_eq!(false, Post.safe()); - } - - #[test] - fn test_idempotent() { - assert_eq!(true, Get.idempotent()); - assert_eq!(true, Put.idempotent()); - assert_eq!(false, Post.idempotent()); - } - - #[test] - fn test_from_str() { - assert_eq!(Get, FromStr::from_str("GET").unwrap()); - assert_eq!(Extension("MOVE".to_owned()), - FromStr::from_str("MOVE").unwrap()); - let x: Result = FromStr::from_str(""); - if let Err(Error::Method) = x { - } else { - panic!("An empty method is invalid!") - } - } - - #[test] - fn test_fmt() { - assert_eq!("GET".to_owned(), format!("{}", Get)); - assert_eq!("MOVE".to_owned(), - format!("{}", Extension("MOVE".to_owned()))); - } - - #[test] - fn test_hashable() { - let mut counter: HashMap = HashMap::new(); - counter.insert(Get, 1); - assert_eq!(Some(&1), counter.get(&Get)); - } - - #[test] - fn test_as_str() { - assert_eq!(Get.as_ref(), "GET"); - assert_eq!(Post.as_ref(), "POST"); - assert_eq!(Put.as_ref(), "PUT"); - assert_eq!(Extension("MOVE".to_owned()).as_ref(), "MOVE"); - } -} diff --git a/third_party/rust/hyper/src/mock.rs b/third_party/rust/hyper/src/mock.rs index 9a1f7e451662..010b6cf9e38d 100644 --- a/third_party/rust/hyper/src/mock.rs +++ b/third_party/rust/hyper/src/mock.rs @@ -1,80 +1,60 @@ -use std::io::{self, Read, Write, Cursor}; -use std::net::{SocketAddr, Shutdown}; -use std::time::Duration; -use std::cell::Cell; +#[cfg(feature = "runtime")] +use std::collections::HashMap; +use std::cmp; +use std::io::{self, Read, Write}; +#[cfg(feature = "runtime")] +use std::sync::{Arc, Mutex}; -use net::{NetworkStream, NetworkConnector, SslClient}; +use bytes::Buf; +use futures::{Async, Poll}; +#[cfg(feature = "runtime")] +use futures::Future; +use futures::task::{self, Task}; +use tokio_io::{AsyncRead, AsyncWrite}; -#[derive(Clone, Debug)] -pub struct MockStream { - pub read: Cursor>, - next_reads: Vec>, - pub write: Vec, - pub is_closed: bool, - pub error_on_write: bool, - pub error_on_read: bool, - pub read_timeout: Cell>, - pub write_timeout: Cell>, - pub id: u64, +#[cfg(feature = "runtime")] +use ::client::connect::{Connect, Connected, Destination}; + +#[derive(Debug)] +pub struct MockCursor { + vec: Vec, + pos: usize, } -impl PartialEq for MockStream { - fn eq(&self, other: &MockStream) -> bool { - self.read.get_ref() == other.read.get_ref() && self.write == other.write - } -} - -impl MockStream { - pub fn new() -> MockStream { - MockStream::with_input(b"") - } - - pub fn with_input(input: &[u8]) -> MockStream { - MockStream::with_responses(vec![input]) - } - - pub fn with_responses(mut responses: Vec<&[u8]>) -> MockStream { - MockStream { - read: Cursor::new(responses.remove(0).to_vec()), - next_reads: responses.into_iter().map(|arr| arr.to_vec()).collect(), - write: vec![], - is_closed: false, - error_on_write: false, - error_on_read: false, - read_timeout: Cell::new(None), - write_timeout: Cell::new(None), - id: 0, +impl MockCursor { + pub fn wrap(vec: Vec) -> MockCursor { + MockCursor { + vec: vec, + pos: 0, } } } -impl Read for MockStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - if self.error_on_read { - Err(io::Error::new(io::ErrorKind::Other, "mock error")) - } else { - match self.read.read(buf) { - Ok(n) => { - if self.read.position() as usize == self.read.get_ref().len() { - if self.next_reads.len() > 0 { - self.read = Cursor::new(self.next_reads.remove(0)); - } - } - Ok(n) - }, - r => r - } - } +impl ::std::ops::Deref for MockCursor { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.vec } } -impl Write for MockStream { - fn write(&mut self, msg: &[u8]) -> io::Result { - if self.error_on_write { - Err(io::Error::new(io::ErrorKind::Other, "mock error")) - } else { - Write::write(&mut self.write, msg) - } +impl AsRef<[u8]> for MockCursor { + fn as_ref(&self) -> &[u8] { + &self.vec + } +} + +impl> PartialEq for MockCursor { + fn eq(&self, other: &S) -> bool { + self.vec == other.as_ref() + } +} + +impl Write for MockCursor { + fn write(&mut self, data: &[u8]) -> io::Result { + trace!("MockCursor::write; len={}", data.len()); + self.vec.extend(data); + Ok(data.len()) } fn flush(&mut self) -> io::Result<()> { @@ -82,87 +62,454 @@ impl Write for MockStream { } } -impl NetworkStream for MockStream { - fn peer_addr(&mut self) -> io::Result { - Ok("127.0.0.1:1337".parse().unwrap()) - } - - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.read_timeout.set(dur); - Ok(()) - } - - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.write_timeout.set(dur); - Ok(()) - } - - fn close(&mut self, _how: Shutdown) -> io::Result<()> { - self.is_closed = true; - Ok(()) +impl Read for MockCursor { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + (&self.vec[self.pos..]).read(buf).map(|n| { + trace!("MockCursor::read; len={}", n); + self.pos += n; + if self.pos == self.vec.len() { + trace!("MockCursor::read to end, clearing"); + self.pos = 0; + self.vec.clear(); + } + n + }) } } -pub struct MockConnector; +const READ_VECS_CNT: usize = 64; -impl NetworkConnector for MockConnector { - type Stream = MockStream; +#[derive(Debug)] +pub struct AsyncIo { + blocked: bool, + bytes_until_block: usize, + error: Option, + flushed: bool, + inner: T, + max_read_vecs: usize, + num_writes: usize, + panic: bool, + park_tasks: bool, + task: Option, +} - fn connect(&self, _host: &str, _port: u16, _scheme: &str) -> ::Result { - Ok(MockStream::new()) +impl AsyncIo { + pub fn new(inner: T, bytes: usize) -> AsyncIo { + AsyncIo { + blocked: false, + bytes_until_block: bytes, + error: None, + flushed: false, + inner: inner, + max_read_vecs: READ_VECS_CNT, + num_writes: 0, + panic: false, + park_tasks: false, + task: None, + } + } + + pub fn block_in(&mut self, bytes: usize) { + self.bytes_until_block = bytes; + + if let Some(task) = self.task.take() { + task.notify(); + } + } + + pub fn error(&mut self, err: io::Error) { + self.error = Some(err); + } + + #[cfg(feature = "nightly")] + pub fn panic(&mut self) { + self.panic = true; + } + + pub fn max_read_vecs(&mut self, cnt: usize) { + assert!(cnt <= READ_VECS_CNT); + self.max_read_vecs = cnt; + } + + #[cfg(feature = "runtime")] + pub fn park_tasks(&mut self, enabled: bool) { + self.park_tasks = enabled; + } + + /* + pub fn flushed(&self) -> bool { + self.flushed + } + */ + + pub fn blocked(&self) -> bool { + self.blocked + } + + pub fn num_writes(&self) -> usize { + self.num_writes + } + + fn would_block(&mut self) -> io::Error { + self.blocked = true; + if self.park_tasks { + self.task = Some(task::current()); + } + io::ErrorKind::WouldBlock.into() + } + +} + +impl AsyncIo { + pub fn new_buf>>(buf: T, bytes: usize) -> AsyncIo { + AsyncIo::new(MockCursor::wrap(buf.into()), bytes) + } + + /* + pub fn new_eof() -> AsyncIo { + AsyncIo::new(Buf::wrap(Vec::new().into()), 1) + } + */ + + #[cfg(feature = "runtime")] + fn close(&mut self) { + self.block_in(1); + assert_eq!( + self.inner.vec.len(), + self.inner.pos, + "AsyncIo::close(), but cursor not consumed", + ); + self.inner.vec.truncate(0); + self.inner.pos = 0; } } -/// new connectors must be created if you wish to intercept requests. -macro_rules! mock_connector ( - ($name:ident { - $($url:expr => $res:expr)* - }) => ( +impl AsyncIo { + fn write_no_vecs(&mut self, buf: &mut B) -> Poll { + if !buf.has_remaining() { + return Ok(Async::Ready(0)); + } - struct $name; + let n = try_nb!(self.write(buf.bytes())); + buf.advance(n); + Ok(Async::Ready(n)) + } +} - impl $crate::net::NetworkConnector for $name { - type Stream = ::mock::MockStream; - fn connect(&self, host: &str, port: u16, scheme: &str) - -> $crate::Result<::mock::MockStream> { - use std::collections::HashMap; - debug!("MockStream::connect({:?}, {:?}, {:?})", host, port, scheme); - let mut map = HashMap::new(); - $(map.insert($url, $res);)* +impl, T: AsRef<[u8]>> PartialEq for AsyncIo { + fn eq(&self, other: &S) -> bool { + self.inner.as_ref() == other.as_ref() + } +} - let key = format!("{}://{}", scheme, host); - // ignore port for now - match map.get(&*key) { - Some(&res) => Ok($crate::mock::MockStream::with_input(res.as_bytes())), - None => panic!("{:?} doesn't know url {}", stringify!($name), key) +impl Read for AsyncIo { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + assert!(!self.panic, "AsyncIo::read panic"); + self.blocked = false; + if let Some(err) = self.error.take() { + Err(err) + } else if self.bytes_until_block == 0 { + Err(self.would_block()) + } else { + let n = cmp::min(self.bytes_until_block, buf.len()); + let n = try!(self.inner.read(&mut buf[..n])); + self.bytes_until_block -= n; + Ok(n) + } + } +} + +impl Write for AsyncIo { + fn write(&mut self, data: &[u8]) -> io::Result { + assert!(!self.panic, "AsyncIo::write panic"); + self.num_writes += 1; + if let Some(err) = self.error.take() { + trace!("AsyncIo::write error"); + Err(err) + } else if self.bytes_until_block == 0 { + trace!("AsyncIo::write would block"); + Err(self.would_block()) + } else { + trace!("AsyncIo::write; {} bytes", data.len()); + self.flushed = false; + let n = cmp::min(self.bytes_until_block, data.len()); + let n = try!(self.inner.write(&data[..n])); + self.bytes_until_block -= n; + Ok(n) + } + } + + fn flush(&mut self) -> io::Result<()> { + self.flushed = true; + self.inner.flush() + } +} + +impl AsyncRead for AsyncIo { +} + +impl AsyncWrite for AsyncIo { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } + + fn write_buf(&mut self, buf: &mut B) -> Poll { + assert!(!self.panic, "AsyncIo::write_buf panic"); + if self.max_read_vecs == 0 { + return self.write_no_vecs(buf); + } + let r = { + static DUMMY: &[u8] = &[0]; + let mut bufs = [From::from(DUMMY); READ_VECS_CNT]; + let i = Buf::bytes_vec(&buf, &mut bufs[..self.max_read_vecs]); + let mut n = 0; + let mut ret = Ok(0); + // each call to write() will increase our count, but we assume + // that if iovecs are used, its really only 1 write call. + let num_writes = self.num_writes; + for iovec in &bufs[..i] { + match self.write(iovec) { + Ok(num) => { + n += num; + ret = Ok(n); + }, + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + if let Ok(0) = ret { + ret = Err(e); + } + } else { + ret = Err(e); + } + break; + } } } - } - - ); - - ($name:ident { $($response:expr),+ }) => ( - struct $name; - - impl $crate::net::NetworkConnector for $name { - type Stream = $crate::mock::MockStream; - fn connect(&self, _: &str, _: u16, _: &str) - -> $crate::Result<$crate::mock::MockStream> { - Ok($crate::mock::MockStream::with_responses(vec![ - $($response),+ - ])) + self.num_writes = num_writes + 1; + ret + }; + match r { + Ok(n) => { + Buf::advance(buf, n); + Ok(Async::Ready(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } +} + +impl ::std::ops::Deref for AsyncIo { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.inner + } +} + +#[cfg(feature = "runtime")] +pub struct Duplex { + inner: Arc>, +} + +#[cfg(feature = "runtime")] +struct DuplexInner { + handle_read_task: Option, + read: AsyncIo, + write: AsyncIo, +} + +#[cfg(feature = "runtime")] +impl Duplex { + pub(crate) fn channel() -> (Duplex, DuplexHandle) { + let mut inner = DuplexInner { + handle_read_task: None, + read: AsyncIo::new_buf(Vec::new(), 0), + write: AsyncIo::new_buf(Vec::new(), ::std::usize::MAX), + }; + + inner.read.park_tasks(true); + inner.write.park_tasks(true); + + let inner = Arc::new(Mutex::new(inner)); + + let duplex = Duplex { + inner: inner.clone(), + }; + let handle = DuplexHandle { + inner: inner, + }; + + (duplex, handle) + } +} + +#[cfg(feature = "runtime")] +impl Read for Duplex { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.lock().unwrap().read.read(buf) + } +} + +#[cfg(feature = "runtime")] +impl Write for Duplex { + fn write(&mut self, buf: &[u8]) -> io::Result { + let mut inner = self.inner.lock().unwrap(); + if let Some(task) = inner.handle_read_task.take() { + trace!("waking DuplexHandle read"); + task.notify(); + } + inner.write.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.lock().unwrap().write.flush() + } +} + +#[cfg(feature = "runtime")] +impl AsyncRead for Duplex { +} + +#[cfg(feature = "runtime")] +impl AsyncWrite for Duplex { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } + + fn write_buf(&mut self, buf: &mut B) -> Poll { + let mut inner = self.inner.lock().unwrap(); + if let Some(task) = inner.handle_read_task.take() { + task.notify(); + } + inner.write.write_buf(buf) + } +} + +#[cfg(feature = "runtime")] +pub struct DuplexHandle { + inner: Arc>, +} + +#[cfg(feature = "runtime")] +impl DuplexHandle { + pub fn read(&self, buf: &mut [u8]) -> Poll { + let mut inner = self.inner.lock().unwrap(); + assert!(buf.len() >= inner.write.inner.len()); + if inner.write.inner.is_empty() { + trace!("DuplexHandle read parking"); + inner.handle_read_task = Some(task::current()); + return Ok(Async::NotReady); + } + inner.write.inner.vec.truncate(0); + Ok(Async::Ready(inner.write.inner.len())) + } + + pub fn write(&self, bytes: &[u8]) -> Poll { + let mut inner = self.inner.lock().unwrap(); + assert_eq!(inner.read.inner.pos, 0); + assert_eq!(inner.read.inner.vec.len(), 0, "write but read isn't empty"); + inner + .read + .inner + .vec + .extend(bytes); + inner.read.block_in(bytes.len()); + Ok(Async::Ready(bytes.len())) + } +} + +#[cfg(feature = "runtime")] +impl Drop for DuplexHandle { + fn drop(&mut self) { + trace!("mock duplex handle drop"); + if !::std::thread::panicking() { + let mut inner = self.inner.lock().unwrap(); + inner.read.close(); + inner.write.close(); + } + } +} + +#[cfg(feature = "runtime")] +type BoxedConnectFut = Box + Send>; + +#[cfg(feature = "runtime")] +pub struct MockConnector { + mocks: Mutex>>, +} + +#[cfg(feature = "runtime")] +impl MockConnector { + pub fn new() -> MockConnector { + MockConnector { + mocks: Mutex::new(HashMap::new()), + } + } + + pub fn mock(&mut self, key: &str) -> DuplexHandle { + use futures::future; + self.mock_fut(key, future::ok::<_, ()>(())) + } + + pub fn mock_fut(&mut self, key: &str, fut: F) -> DuplexHandle + where + F: Future + Send + 'static, + { + let key = key.to_owned(); + + let (duplex, handle) = Duplex::channel(); + + let fut = Box::new(fut.then(move |_| { + trace!("MockConnector mocked fut ready"); + Ok((duplex, Connected::new())) + })); + self.mocks.lock().unwrap().entry(key) + .or_insert(Vec::new()) + .push(fut); + + handle + } +} + +#[cfg(feature = "runtime")] +impl Connect for MockConnector { + type Transport = Duplex; + type Error = io::Error; + type Future = BoxedConnectFut; + + fn connect(&self, dst: Destination) -> Self::Future { + trace!("mock connect: {:?}", dst); + let key = format!("{}://{}{}", dst.scheme(), dst.host(), if let Some(port) = dst.port() { + format!(":{}", port) + } else { + "".to_owned() + }); + let mut mocks = self.mocks.lock().unwrap(); + let mocks = mocks.get_mut(&key) + .expect(&format!("unknown mocks uri: {}", key)); + assert!(!mocks.is_empty(), "no additional mocks for {}", key); + mocks.remove(0) + } +} + + +#[cfg(feature = "runtime")] +impl Drop for MockConnector { + fn drop(&mut self) { + if !::std::thread::panicking() { + let mocks = self.mocks.lock().unwrap(); + for (key, mocks) in mocks.iter() { + assert_eq!( + mocks.len(), + 0, + "not all mocked connects for {:?} were used", + key, + ); } } - ); -); - -#[derive(Debug, Default)] -pub struct MockSsl; - -impl SslClient for MockSsl { - type Stream = T; - fn wrap_client(&self, stream: T, _host: &str) -> ::Result { - Ok(stream) } } diff --git a/third_party/rust/hyper/src/net.rs b/third_party/rust/hyper/src/net.rs deleted file mode 100644 index 5e8c7f532a12..000000000000 --- a/third_party/rust/hyper/src/net.rs +++ /dev/null @@ -1,638 +0,0 @@ -//! A collection of traits abstracting over Listeners and Streams. -use std::any::{Any, TypeId}; -use std::fmt; -use std::io::{self, ErrorKind, Read, Write}; -use std::net::{SocketAddr, ToSocketAddrs, TcpStream, TcpListener, Shutdown}; -use std::mem; -use std::sync::Arc; - -use std::time::Duration; - -use typeable::Typeable; -use traitobject; - -/// The write-status indicating headers have not been written. -pub enum Fresh {} - -/// The write-status indicating headers have been written. -pub enum Streaming {} - -/// An abstraction to listen for connections on a certain port. -pub trait NetworkListener: Clone { - /// The stream produced for each connection. - type Stream: NetworkStream + Send + Clone; - - /// Returns an iterator of streams. - fn accept(&mut self) -> ::Result; - - /// Get the address this Listener ended up listening on. - fn local_addr(&mut self) -> io::Result; - - /// Returns an iterator over incoming connections. - fn incoming(&mut self) -> NetworkConnections { - NetworkConnections(self) - } - - /// Sets the read timeout for all streams that are accepted - fn set_read_timeout(&mut self, _: Option) { - // This default implementation is only here to prevent the addition of - // these methods from being a breaking change. They should be removed - // when the next breaking release is made. - warn!("Ignoring read timeout"); - } - - /// Sets the write timeout for all streams that are accepted - fn set_write_timeout(&mut self, _: Option) { - // This default implementation is only here to prevent the addition of - // these methods from being a breaking change. They should be removed - // when the next breaking release is made. - warn!("Ignoring write timeout"); - } -} - -/// An iterator wrapper over a `NetworkAcceptor`. -pub struct NetworkConnections<'a, N: NetworkListener + 'a>(&'a mut N); - -impl<'a, N: NetworkListener + 'a> Iterator for NetworkConnections<'a, N> { - type Item = ::Result; - fn next(&mut self) -> Option<::Result> { - Some(self.0.accept()) - } -} - -/// An abstraction over streams that a `Server` can utilize. -pub trait NetworkStream: Read + Write + Any + Send + Typeable { - /// Get the remote address of the underlying connection. - fn peer_addr(&mut self) -> io::Result; - - /// Set the maximum time to wait for a read to complete. - fn set_read_timeout(&self, dur: Option) -> io::Result<()>; - - /// Set the maximum time to wait for a write to complete. - fn set_write_timeout(&self, dur: Option) -> io::Result<()>; - - /// This will be called when Stream should no longer be kept alive. - #[inline] - fn close(&mut self, _how: Shutdown) -> io::Result<()> { - Ok(()) - } - - // Unsure about name and implementation... - - #[doc(hidden)] - fn set_previous_response_expected_no_content(&mut self, _expected: bool) { } - - #[doc(hidden)] - fn previous_response_expected_no_content(&self) -> bool { - false - } -} - -/// A connector creates a NetworkStream. -pub trait NetworkConnector { - /// Type of `Stream` to create - type Stream: Into>; - - /// Connect to a remote address. - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result; -} - -impl From for Box { - fn from(s: T) -> Box { - Box::new(s) - } -} - -impl fmt::Debug for Box { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad("Box") - } -} - -impl NetworkStream { - unsafe fn downcast_ref_unchecked(&self) -> &T { - mem::transmute(traitobject::data(self)) - } - - unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { - mem::transmute(traitobject::data_mut(self)) - } - - unsafe fn downcast_unchecked(self: Box) -> Box { - let raw: *mut NetworkStream = mem::transmute(self); - mem::transmute(traitobject::data_mut(raw)) - } -} - -impl NetworkStream { - /// Is the underlying type in this trait object a `T`? - #[inline] - pub fn is(&self) -> bool { - (*self).get_type() == TypeId::of::() - } - - /// If the underlying type is `T`, get a reference to the contained data. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - if self.is::() { - Some(unsafe { self.downcast_ref_unchecked() }) - } else { - None - } - } - - /// If the underlying type is `T`, get a mutable reference to the contained - /// data. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - if self.is::() { - Some(unsafe { self.downcast_mut_unchecked() }) - } else { - None - } - } - - /// If the underlying type is `T`, extract it. - #[inline] - pub fn downcast(self: Box) - -> Result, Box> { - if self.is::() { - Ok(unsafe { self.downcast_unchecked() }) - } else { - Err(self) - } - } -} - -impl NetworkStream + Send { - unsafe fn downcast_ref_unchecked(&self) -> &T { - mem::transmute(traitobject::data(self)) - } - - unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { - mem::transmute(traitobject::data_mut(self)) - } - - unsafe fn downcast_unchecked(self: Box) -> Box { - let raw: *mut NetworkStream = mem::transmute(self); - mem::transmute(traitobject::data_mut(raw)) - } -} - -impl NetworkStream + Send { - /// Is the underlying type in this trait object a `T`? - #[inline] - pub fn is(&self) -> bool { - (*self).get_type() == TypeId::of::() - } - - /// If the underlying type is `T`, get a reference to the contained data. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - if self.is::() { - Some(unsafe { self.downcast_ref_unchecked() }) - } else { - None - } - } - - /// If the underlying type is `T`, get a mutable reference to the contained - /// data. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - if self.is::() { - Some(unsafe { self.downcast_mut_unchecked() }) - } else { - None - } - } - - /// If the underlying type is `T`, extract it. - #[inline] - pub fn downcast(self: Box) - -> Result, Box> { - if self.is::() { - Ok(unsafe { self.downcast_unchecked() }) - } else { - Err(self) - } - } -} - -/// A `NetworkListener` for `HttpStream`s. -#[derive(Clone)] -pub struct HttpListener { - listener: Arc, - - read_timeout : Option, - write_timeout: Option, -} - -impl From for HttpListener { - fn from(listener: TcpListener) -> HttpListener { - HttpListener { - listener: Arc::new(listener), - - read_timeout : None, - write_timeout: None, - } - } -} - -impl HttpListener { - /// Start listening to an address over HTTP. - pub fn new(addr: To) -> ::Result { - Ok(HttpListener::from(try!(TcpListener::bind(addr)))) - } -} - -impl NetworkListener for HttpListener { - type Stream = HttpStream; - - #[inline] - fn accept(&mut self) -> ::Result { - let stream = HttpStream(try!(self.listener.accept()).0); - try!(stream.set_read_timeout(self.read_timeout)); - try!(stream.set_write_timeout(self.write_timeout)); - Ok(stream) - } - - #[inline] - fn local_addr(&mut self) -> io::Result { - self.listener.local_addr() - } - - fn set_read_timeout(&mut self, duration: Option) { - self.read_timeout = duration; - } - - fn set_write_timeout(&mut self, duration: Option) { - self.write_timeout = duration; - } -} - -#[cfg(windows)] -impl ::std::os::windows::io::AsRawSocket for HttpListener { - fn as_raw_socket(&self) -> ::std::os::windows::io::RawSocket { - self.listener.as_raw_socket() - } -} - -#[cfg(windows)] -impl ::std::os::windows::io::FromRawSocket for HttpListener { - unsafe fn from_raw_socket(sock: ::std::os::windows::io::RawSocket) -> HttpListener { - HttpListener::from(TcpListener::from_raw_socket(sock)) - } -} - -#[cfg(unix)] -impl ::std::os::unix::io::AsRawFd for HttpListener { - fn as_raw_fd(&self) -> ::std::os::unix::io::RawFd { - self.listener.as_raw_fd() - } -} - -#[cfg(unix)] -impl ::std::os::unix::io::FromRawFd for HttpListener { - unsafe fn from_raw_fd(fd: ::std::os::unix::io::RawFd) -> HttpListener { - HttpListener::from(TcpListener::from_raw_fd(fd)) - } -} - -/// A wrapper around a `TcpStream`. -pub struct HttpStream(pub TcpStream); - -impl Clone for HttpStream { - #[inline] - fn clone(&self) -> HttpStream { - HttpStream(self.0.try_clone().unwrap()) - } -} - -impl fmt::Debug for HttpStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("HttpStream(_)") - } -} - -impl Read for HttpStream { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } -} - -impl Write for HttpStream { - #[inline] - fn write(&mut self, msg: &[u8]) -> io::Result { - self.0.write(msg) - } - #[inline] - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} - -#[cfg(windows)] -impl ::std::os::windows::io::AsRawSocket for HttpStream { - fn as_raw_socket(&self) -> ::std::os::windows::io::RawSocket { - self.0.as_raw_socket() - } -} - -#[cfg(windows)] -impl ::std::os::windows::io::FromRawSocket for HttpStream { - unsafe fn from_raw_socket(sock: ::std::os::windows::io::RawSocket) -> HttpStream { - HttpStream(TcpStream::from_raw_socket(sock)) - } -} - -#[cfg(unix)] -impl ::std::os::unix::io::AsRawFd for HttpStream { - fn as_raw_fd(&self) -> ::std::os::unix::io::RawFd { - self.0.as_raw_fd() - } -} - -#[cfg(unix)] -impl ::std::os::unix::io::FromRawFd for HttpStream { - unsafe fn from_raw_fd(fd: ::std::os::unix::io::RawFd) -> HttpStream { - HttpStream(TcpStream::from_raw_fd(fd)) - } -} - -impl NetworkStream for HttpStream { - #[inline] - fn peer_addr(&mut self) -> io::Result { - self.0.peer_addr() - } - - #[inline] - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - self.0.set_read_timeout(dur) - } - - #[inline] - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - self.0.set_write_timeout(dur) - } - - #[inline] - fn close(&mut self, how: Shutdown) -> io::Result<()> { - match self.0.shutdown(how) { - Ok(_) => Ok(()), - // see https://github.com/hyperium/hyper/issues/508 - Err(ref e) if e.kind() == ErrorKind::NotConnected => Ok(()), - err => err - } - } -} - -/// A connector that will produce HttpStreams. -#[derive(Debug, Clone, Default)] -pub struct HttpConnector; - -impl NetworkConnector for HttpConnector { - type Stream = HttpStream; - - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { - let addr = &(host, port); - Ok(try!(match scheme { - "http" => { - debug!("http scheme"); - Ok(HttpStream(try!(TcpStream::connect(addr)))) - }, - _ => { - Err(io::Error::new(io::ErrorKind::InvalidInput, - "Invalid scheme for Http")) - } - })) - } -} - -/// A closure as a connector used to generate `TcpStream`s per request -/// -/// # Example -/// -/// Basic example: -/// -/// ```norun -/// Client::with_connector(|addr: &str, port: u16, scheme: &str| { -/// TcpStream::connect(&(addr, port)) -/// }); -/// ``` -/// -/// Example using `TcpBuilder` from the net2 crate if you want to configure your source socket: -/// -/// ```norun -/// Client::with_connector(|addr: &str, port: u16, scheme: &str| { -/// let b = try!(TcpBuilder::new_v4()); -/// try!(b.bind("127.0.0.1:0")); -/// b.connect(&(addr, port)) -/// }); -/// ``` -impl NetworkConnector for F where F: Fn(&str, u16, &str) -> io::Result { - type Stream = HttpStream; - - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { - Ok(HttpStream(try!((*self)(host, port, scheme)))) - } -} - -/// An abstraction to allow any SSL implementation to be used with client-side HttpsStreams. -pub trait SslClient { - /// The protected stream. - type Stream: NetworkStream + Send + Clone; - /// Wrap a client stream with SSL. - fn wrap_client(&self, stream: T, host: &str) -> ::Result; -} - -/// An abstraction to allow any SSL implementation to be used with server-side HttpsStreams. -pub trait SslServer { - /// The protected stream. - type Stream: NetworkStream + Send + Clone; - /// Wrap a server stream with SSL. - fn wrap_server(&self, stream: T) -> ::Result; -} - -/// A stream over the HTTP protocol, possibly protected by SSL. -#[derive(Debug, Clone)] -pub enum HttpsStream { - /// A plain text stream. - Http(HttpStream), - /// A stream protected by SSL. - Https(S) -} - -impl Read for HttpsStream { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match *self { - HttpsStream::Http(ref mut s) => s.read(buf), - HttpsStream::Https(ref mut s) => s.read(buf) - } - } -} - -impl Write for HttpsStream { - #[inline] - fn write(&mut self, msg: &[u8]) -> io::Result { - match *self { - HttpsStream::Http(ref mut s) => s.write(msg), - HttpsStream::Https(ref mut s) => s.write(msg) - } - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - match *self { - HttpsStream::Http(ref mut s) => s.flush(), - HttpsStream::Https(ref mut s) => s.flush() - } - } -} - -impl NetworkStream for HttpsStream { - #[inline] - fn peer_addr(&mut self) -> io::Result { - match *self { - HttpsStream::Http(ref mut s) => s.peer_addr(), - HttpsStream::Https(ref mut s) => s.peer_addr() - } - } - - #[inline] - fn set_read_timeout(&self, dur: Option) -> io::Result<()> { - match *self { - HttpsStream::Http(ref inner) => inner.0.set_read_timeout(dur), - HttpsStream::Https(ref inner) => inner.set_read_timeout(dur) - } - } - - #[inline] - fn set_write_timeout(&self, dur: Option) -> io::Result<()> { - match *self { - HttpsStream::Http(ref inner) => inner.0.set_write_timeout(dur), - HttpsStream::Https(ref inner) => inner.set_write_timeout(dur) - } - } - - #[inline] - fn close(&mut self, how: Shutdown) -> io::Result<()> { - match *self { - HttpsStream::Http(ref mut s) => s.close(how), - HttpsStream::Https(ref mut s) => s.close(how) - } - } -} - -/// A Http Listener over SSL. -#[derive(Clone)] -pub struct HttpsListener { - listener: HttpListener, - ssl: S, -} - -impl HttpsListener { - /// Start listening to an address over HTTPS. - pub fn new(addr: To, ssl: S) -> ::Result> { - HttpListener::new(addr).map(|l| HttpsListener { - listener: l, - ssl: ssl - }) - } - - /// Construct an HttpsListener from a bound `TcpListener`. - pub fn with_listener(listener: HttpListener, ssl: S) -> HttpsListener { - HttpsListener { - listener: listener, - ssl: ssl - } - } -} - -impl NetworkListener for HttpsListener { - type Stream = S::Stream; - - #[inline] - fn accept(&mut self) -> ::Result { - self.listener.accept().and_then(|s| self.ssl.wrap_server(s)) - } - - #[inline] - fn local_addr(&mut self) -> io::Result { - self.listener.local_addr() - } - - fn set_read_timeout(&mut self, duration: Option) { - self.listener.set_read_timeout(duration) - } - - fn set_write_timeout(&mut self, duration: Option) { - self.listener.set_write_timeout(duration) - } -} - -/// A connector that can protect HTTP streams using SSL. -#[derive(Debug, Default)] -pub struct HttpsConnector { - ssl: S, - connector: C, -} - -impl HttpsConnector { - /// Create a new connector using the provided SSL implementation. - pub fn new(s: S) -> HttpsConnector { - HttpsConnector::with_connector(s, HttpConnector) - } -} - -impl HttpsConnector { - /// Create a new connector using the provided SSL implementation. - pub fn with_connector(s: S, connector: C) -> HttpsConnector { - HttpsConnector { ssl: s, connector: connector } - } -} - -impl> NetworkConnector for HttpsConnector { - type Stream = HttpsStream; - - fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { - let stream = try!(self.connector.connect(host, port, "http")); - if scheme == "https" { - debug!("https scheme"); - self.ssl.wrap_client(stream, host).map(HttpsStream::Https) - } else { - Ok(HttpsStream::Http(stream)) - } - } -} - - -#[doc(hidden)] -pub type DefaultConnector = HttpConnector; - -#[cfg(test)] -mod tests { - use mock::MockStream; - use super::{NetworkStream}; - - #[test] - fn test_downcast_box_stream() { - // FIXME: Use Type ascription - let stream: Box = Box::new(MockStream::new()); - - let mock = stream.downcast::().ok().unwrap(); - assert_eq!(mock, Box::new(MockStream::new())); - } - - #[test] - fn test_downcast_unchecked_box_stream() { - // FIXME: Use Type ascription - let stream: Box = Box::new(MockStream::new()); - - let mock = unsafe { stream.downcast_unchecked::() }; - assert_eq!(mock, Box::new(MockStream::new())); - } -} - diff --git a/third_party/rust/hyper/src/proto/h1/conn.rs b/third_party/rust/hyper/src/proto/h1/conn.rs new file mode 100644 index 000000000000..358cdfb660bc --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/conn.rs @@ -0,0 +1,1200 @@ +use std::fmt; +use std::io::{self}; +use std::marker::PhantomData; + +use bytes::{Buf, Bytes}; +use futures::{Async, Poll}; +use http::{HeaderMap, Method, Version}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use ::Chunk; +use proto::{BodyLength, DecodedLength, MessageHead}; +use super::io::{Buffered}; +use super::{EncodedBuf, Encode, Encoder, /*Decode,*/ Decoder, Http1Transaction, ParseContext}; + +const H2_PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + +/// This handles a connection, which will have been established over an +/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple +/// `Transaction`s over HTTP. +/// +/// The connection will determine when a message begins and ends as well as +/// determine if this connection can be kept alive after the message, +/// or if it is complete. +pub(crate) struct Conn { + io: Buffered>, + state: State, + _marker: PhantomData +} + +impl Conn +where I: AsyncRead + AsyncWrite, + B: Buf, + T: Http1Transaction, +{ + pub fn new(io: I) -> Conn { + Conn { + io: Buffered::new(io), + state: State { + cached_headers: None, + error: None, + keep_alive: KA::Busy, + method: None, + title_case_headers: false, + notify_read: false, + reading: Reading::Init, + writing: Writing::Init, + upgrade: None, + // We assume a modern world where the remote speaks HTTP/1.1. + // If they tell us otherwise, we'll downgrade in `read_head`. + version: Version::HTTP_11, + }, + _marker: PhantomData, + } + } + + pub fn set_flush_pipeline(&mut self, enabled: bool) { + self.io.set_flush_pipeline(enabled); + } + + pub fn set_max_buf_size(&mut self, max: usize) { + self.io.set_max_buf_size(max); + } + + pub fn set_write_strategy_flatten(&mut self) { + self.io.set_write_strategy_flatten(); + } + + pub fn set_title_case_headers(&mut self) { + self.state.title_case_headers = true; + } + + pub fn into_inner(self) -> (I, Bytes) { + self.io.into_inner() + } + + pub fn pending_upgrade(&mut self) -> Option<::upgrade::Pending> { + self.state.upgrade.take() + } + + pub fn is_read_closed(&self) -> bool { + self.state.is_read_closed() + } + + pub fn is_write_closed(&self) -> bool { + self.state.is_write_closed() + } + + pub fn can_read_head(&self) -> bool { + match self.state.reading { + //Reading::Init => true, + Reading::Init => { + if T::should_read_first() { + true + } else { + match self.state.writing { + Writing::Init => false, + _ => true, + } + } + }, + _ => false, + } + } + + pub fn can_read_body(&self) -> bool { + match self.state.reading { + Reading::Body(..) => true, + _ => false, + } + } + + fn should_error_on_eof(&self) -> bool { + // If we're idle, it's probably just the connection closing gracefully. + T::should_error_on_parse_eof() && !self.state.is_idle() + } + + fn has_h2_prefix(&self) -> bool { + let read_buf = self.io.read_buf(); + read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE + } + + pub fn read_head(&mut self) -> Poll, DecodedLength, bool)>, ::Error> { + debug_assert!(self.can_read_head()); + trace!("Conn::read_head"); + + let msg = match self.io.parse::(ParseContext { + cached_headers: &mut self.state.cached_headers, + req_method: &mut self.state.method, + }) { + Ok(Async::Ready(msg)) => msg, + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => return self.on_read_head_error(e), + }; + + // Note: don't deconstruct `msg` into local variables, it appears + // the optimizer doesn't remove the extra copies. + + debug!("incoming body is {}", msg.decode); + + self.state.busy(); + self.state.keep_alive &= msg.keep_alive; + self.state.version = msg.head.version; + + if msg.decode == DecodedLength::ZERO { + debug_assert!(!msg.expect_continue, "expect-continue needs a body"); + self.state.reading = Reading::KeepAlive; + if !T::should_read_first() { + self.try_keep_alive(); + } + } else { + if msg.expect_continue { + let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; + self.io.headers_buf().extend_from_slice(cont); + } + self.state.reading = Reading::Body(Decoder::new(msg.decode)); + }; + + Ok(Async::Ready(Some((msg.head, msg.decode, msg.wants_upgrade)))) + } + + fn on_read_head_error(&mut self, e: ::Error) -> Poll, ::Error> { + // If we are currently waiting on a message, then an empty + // message should be reported as an error. If not, it is just + // the connection closing gracefully. + let must_error = self.should_error_on_eof(); + self.state.close_read(); + self.io.consume_leading_lines(); + let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty(); + if was_mid_parse || must_error { + // We check if the buf contains the h2 Preface + debug!("parse error ({}) with {} bytes", e, self.io.read_buf().len()); + self.on_parse_error(e) + .map(|()| Async::NotReady) + } else { + debug!("read eof"); + Ok(Async::Ready(None)) + } + } + + pub fn read_body(&mut self) -> Poll, io::Error> { + debug_assert!(self.can_read_body()); + + let (reading, ret) = match self.state.reading { + Reading::Body(ref mut decoder) => { + match decoder.decode(&mut self.io) { + Ok(Async::Ready(slice)) => { + let (reading, chunk) = if decoder.is_eof() { + debug!("incoming body completed"); + (Reading::KeepAlive, if !slice.is_empty() { + Some(Chunk::from(slice)) + } else { + None + }) + } else if slice.is_empty() { + error!("decode stream unexpectedly ended"); + // This should be unreachable, since all 3 decoders + // either set eof=true or return an Err when reading + // an empty slice... + (Reading::Closed, None) + } else { + return Ok(Async::Ready(Some(Chunk::from(slice)))); + }; + (reading, Ok(Async::Ready(chunk))) + }, + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + debug!("decode stream error: {}", e); + (Reading::Closed, Err(e)) + }, + } + }, + _ => unreachable!("read_body invalid state: {:?}", self.state.reading), + }; + + self.state.reading = reading; + self.try_keep_alive(); + ret + } + + pub fn read_keep_alive(&mut self) -> Result<(), ::Error> { + debug_assert!(!self.can_read_head() && !self.can_read_body()); + + trace!("read_keep_alive; is_mid_message={}", self.is_mid_message()); + + if !self.is_mid_message() { + self.require_empty_read().map_err(::Error::new_io)?; + } + Ok(()) + } + + fn is_mid_message(&self) -> bool { + match (&self.state.reading, &self.state.writing) { + (&Reading::Init, &Writing::Init) => false, + _ => true, + } + } + + pub fn wants_read_again(&mut self) -> bool { + let ret = self.state.notify_read; + self.state.notify_read = false; + ret + } + + // This will check to make sure the io object read is empty. + // + // This should only be called for Clients wanting to enter the idle + // state. + fn require_empty_read(&mut self) -> io::Result<()> { + assert!(!self.can_read_head() && !self.can_read_body()); + + if !self.io.read_buf().is_empty() { + debug!("received an unexpected {} bytes", self.io.read_buf().len()); + Err(io::Error::new(io::ErrorKind::InvalidData, "unexpected bytes after message ended")) + } else { + match self.try_io_read()? { + Async::Ready(0) => { + // case handled in try_io_read + Ok(()) + }, + Async::Ready(n) => { + debug!("received {} bytes on an idle connection", n); + let desc = if self.state.is_idle() { + "unexpected bytes after message ended" + } else { + "unexpected bytes before writing message" + }; + Err(io::Error::new(io::ErrorKind::InvalidData, desc)) + }, + Async::NotReady => { + Ok(()) + }, + } + } + } + + fn try_io_read(&mut self) -> Poll { + match self.io.read_from_io() { + Ok(Async::Ready(0)) => { + trace!("try_io_read; found EOF on connection: {:?}", self.state); + let must_error = self.should_error_on_eof(); + let ret = if must_error { + let desc = if self.is_mid_message() { + "unexpected EOF waiting for response" + } else { + "unexpected EOF before writing message" + }; + Err(io::Error::new(io::ErrorKind::UnexpectedEof, desc)) + } else { + Ok(Async::Ready(0)) + }; + + // order is important: must_error needs state BEFORE close_read + self.state.close_read(); + ret + }, + Ok(Async::Ready(n)) => { + Ok(Async::Ready(n)) + }, + Ok(Async::NotReady) => { + Ok(Async::NotReady) + }, + Err(e) => { + trace!("try_io_read; error = {}", e); + self.state.close(); + Err(e) + } + } + } + + + fn maybe_notify(&mut self) { + // its possible that we returned NotReady from poll() without having + // exhausted the underlying Io. We would have done this when we + // determined we couldn't keep reading until we knew how writing + // would finish. + + + + match self.state.reading { + Reading::Body(..) | + Reading::KeepAlive | + Reading::Closed => return, + Reading::Init => (), + }; + + match self.state.writing { + Writing::Body(..) => return, + Writing::Init | + Writing::KeepAlive | + Writing::Closed => (), + } + + if !self.io.is_read_blocked() { + if self.io.read_buf().is_empty() { + match self.io.read_from_io() { + Ok(Async::Ready(_)) => (), + Ok(Async::NotReady) => { + trace!("maybe_notify; read_from_io blocked"); + return + }, + Err(e) => { + trace!("maybe_notify; read_from_io error: {}", e); + self.state.close(); + } + } + } + self.state.notify_read = true; + } + } + + fn try_keep_alive(&mut self) { + self.state.try_keep_alive::(); + self.maybe_notify(); + } + + pub fn can_write_head(&self) -> bool { + if !T::should_read_first() { + match self.state.reading { + Reading::Closed => return false, + _ => {}, + } + } + match self.state.writing { + Writing::Init => true, + _ => false + } + } + + pub fn can_write_body(&self) -> bool { + match self.state.writing { + Writing::Body(..) => true, + Writing::Init | + Writing::KeepAlive | + Writing::Closed => false, + } + } + + pub fn can_buffer_body(&self) -> bool { + self.io.can_buffer() + } + + pub fn write_head(&mut self, head: MessageHead, body: Option) { + if let Some(encoder) = self.encode_head(head, body) { + self.state.writing = if !encoder.is_eof() { + Writing::Body(encoder) + } else if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + }; + } + } + + pub fn write_full_msg(&mut self, head: MessageHead, body: B) { + if let Some(encoder) = self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) { + let is_last = encoder.is_last(); + // Make sure we don't write a body if we weren't actually allowed + // to do so, like because its a HEAD request. + if !encoder.is_eof() { + encoder.danger_full_buf(body, self.io.write_buf()); + } + self.state.writing = if is_last { + Writing::Closed + } else { + Writing::KeepAlive + } + } + } + + fn encode_head(&mut self, mut head: MessageHead, body: Option) -> Option { + debug_assert!(self.can_write_head()); + + if !T::should_read_first() { + self.state.busy(); + } + + self.enforce_version(&mut head); + + let buf = self.io.headers_buf(); + match T::encode(Encode { + head: &mut head, + body, + keep_alive: self.state.wants_keep_alive(), + req_method: &mut self.state.method, + title_case_headers: self.state.title_case_headers, + }, buf) { + Ok(encoder) => { + debug_assert!(self.state.cached_headers.is_none()); + debug_assert!(head.headers.is_empty()); + self.state.cached_headers = Some(head.headers); + Some(encoder) + }, + Err(err) => { + self.state.error = Some(err); + self.state.writing = Writing::Closed; + None + }, + } + } + + // If we know the remote speaks an older version, we try to fix up any messages + // to work with our older peer. + fn enforce_version(&mut self, head: &mut MessageHead) { + + match self.state.version { + Version::HTTP_10 => { + // If the remote only knows HTTP/1.0, we should force ourselves + // to do only speak HTTP/1.0 as well. + head.version = Version::HTTP_10; + }, + _ => { + // If the remote speaks HTTP/1.1, then it *should* be fine with + // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let + // the user's headers be. + } + } + } + + pub fn write_body(&mut self, chunk: B) { + debug_assert!(self.can_write_body() && self.can_buffer_body()); + // empty chunks should be discarded at Dispatcher level + debug_assert!(chunk.remaining() != 0); + + let state = match self.state.writing { + Writing::Body(ref mut encoder) => { + self.io.buffer(encoder.encode(chunk)); + + if encoder.is_eof() { + if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + } + } else { + return; + } + }, + _ => unreachable!("write_body invalid state: {:?}", self.state.writing), + }; + + self.state.writing = state; + } + + pub fn write_body_and_end(&mut self, chunk: B) { + debug_assert!(self.can_write_body() && self.can_buffer_body()); + // empty chunks should be discarded at Dispatcher level + debug_assert!(chunk.remaining() != 0); + + let state = match self.state.writing { + Writing::Body(ref encoder) => { + let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf()); + if can_keep_alive { + Writing::KeepAlive + } else { + Writing::Closed + } + }, + _ => unreachable!("write_body invalid state: {:?}", self.state.writing), + }; + + self.state.writing = state; + } + + pub fn end_body(&mut self) { + debug_assert!(self.can_write_body()); + + let state = match self.state.writing { + Writing::Body(ref mut encoder) => { + // end of stream, that means we should try to eof + match encoder.end() { + Ok(end) => { + if let Some(end) = end { + self.io.buffer(end); + } + if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + } + }, + Err(_not_eof) => Writing::Closed, + } + }, + _ => return, + }; + + self.state.writing = state; + + } + + // When we get a parse error, depending on what side we are, we might be able + // to write a response before closing the connection. + // + // - Client: there is nothing we can do + // - Server: if Response hasn't been written yet, we can send a 4xx response + fn on_parse_error(&mut self, err: ::Error) -> ::Result<()> { + + match self.state.writing { + Writing::Init => { + if self.has_h2_prefix() { + return Err(::Error::new_version_h2()) + } + if let Some(msg) = T::on_error(&err) { + // Drop the cached headers so as to not trigger a debug + // assert in `write_head`... + self.state.cached_headers.take(); + self.write_head(msg, None); + self.state.error = Some(err); + return Ok(()); + } + } + _ => (), + } + + // fallback is pass the error back up + Err(err) + } + + pub fn flush(&mut self) -> Poll<(), io::Error> { + try_ready!(self.io.flush()); + self.try_keep_alive(); + trace!("flushed({}): {:?}", T::LOG, self.state); + Ok(Async::Ready(())) + } + + pub fn shutdown(&mut self) -> Poll<(), io::Error> { + match self.io.io_mut().shutdown() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(())) => { + trace!("shut down IO complete"); + Ok(Async::Ready(())) + } + Err(e) => { + debug!("error shutting down IO: {}", e); + Err(e) + } + } + } + + pub fn close_read(&mut self) { + self.state.close_read(); + } + + pub fn close_write(&mut self) { + self.state.close_write(); + } + + pub fn disable_keep_alive(&mut self) { + if self.state.is_idle() { + self.state.close_read(); + } else { + self.state.disable_keep_alive(); + } + } + + pub fn take_error(&mut self) -> ::Result<()> { + if let Some(err) = self.state.error.take() { + Err(err) + } else { + Ok(()) + } + } + + pub(super) fn on_upgrade(&mut self) -> ::upgrade::OnUpgrade { + trace!("{}: prepare possible HTTP upgrade", T::LOG); + self.state.prepare_upgrade() + } + + // Used in h1::dispatch tests + #[cfg(test)] + pub(super) fn io_mut(&mut self) -> &mut I { + self.io.io_mut() + } +} + +impl fmt::Debug for Conn { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Conn") + .field("state", &self.state) + .field("io", &self.io) + .finish() + } +} + +struct State { + /// Re-usable HeaderMap to reduce allocating new ones. + cached_headers: Option, + /// If an error occurs when there wasn't a direct way to return it + /// back to the user, this is set. + error: Option<::Error>, + /// Current keep-alive status. + keep_alive: KA, + /// If mid-message, the HTTP Method that started it. + /// + /// This is used to know things such as if the message can include + /// a body or not. + method: Option, + title_case_headers: bool, + /// Set to true when the Dispatcher should poll read operations + /// again. See the `maybe_notify` method for more. + notify_read: bool, + /// State of allowed reads + reading: Reading, + /// State of allowed writes + writing: Writing, + /// An expected pending HTTP upgrade. + upgrade: Option<::upgrade::Pending>, + /// Either HTTP/1.0 or 1.1 connection + version: Version, +} + +#[derive(Debug)] +enum Reading { + Init, + Body(Decoder), + KeepAlive, + Closed, +} + +enum Writing { + Init, + Body(Encoder), + KeepAlive, + Closed, +} + +impl fmt::Debug for State { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("State") + .field("reading", &self.reading) + .field("writing", &self.writing) + .field("keep_alive", &self.keep_alive) + .field("error", &self.error) + //.field("method", &self.method) + //.field("title_case_headers", &self.title_case_headers) + .finish() + } +} + +impl fmt::Debug for Writing { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Writing::Init => f.write_str("Init"), + Writing::Body(ref enc) => f.debug_tuple("Body") + .field(enc) + .finish(), + Writing::KeepAlive => f.write_str("KeepAlive"), + Writing::Closed => f.write_str("Closed"), + } + } +} + +impl ::std::ops::BitAndAssign for KA { + fn bitand_assign(&mut self, enabled: bool) { + if !enabled { + trace!("remote disabling keep-alive"); + *self = KA::Disabled; + } + } +} + +#[derive(Clone, Copy, Debug)] +enum KA { + Idle, + Busy, + Disabled, +} + +impl Default for KA { + fn default() -> KA { + KA::Busy + } +} + +impl KA { + fn idle(&mut self) { + *self = KA::Idle; + } + + fn busy(&mut self) { + *self = KA::Busy; + } + + fn disable(&mut self) { + *self = KA::Disabled; + } + + fn status(&self) -> KA { + *self + } +} + +impl State { + fn close(&mut self) { + trace!("State::close()"); + self.reading = Reading::Closed; + self.writing = Writing::Closed; + self.keep_alive.disable(); + } + + fn close_read(&mut self) { + trace!("State::close_read()"); + self.reading = Reading::Closed; + self.keep_alive.disable(); + } + + fn close_write(&mut self) { + trace!("State::close_write()"); + self.writing = Writing::Closed; + self.keep_alive.disable(); + } + + fn wants_keep_alive(&self) -> bool { + if let KA::Disabled = self.keep_alive.status() { + false + } else { + true + } + } + + fn try_keep_alive(&mut self) { + match (&self.reading, &self.writing) { + (&Reading::KeepAlive, &Writing::KeepAlive) => { + if let KA::Busy = self.keep_alive.status() { + self.idle(); + } else { + trace!("try_keep_alive({}): could keep-alive, but status = {:?}", T::LOG, self.keep_alive); + self.close(); + } + }, + (&Reading::Closed, &Writing::KeepAlive) | + (&Reading::KeepAlive, &Writing::Closed) => { + self.close() + } + _ => () + } + } + + fn disable_keep_alive(&mut self) { + self.keep_alive.disable() + } + + fn busy(&mut self) { + if let KA::Disabled = self.keep_alive.status() { + return; + } + self.keep_alive.busy(); + } + + fn idle(&mut self) { + self.method = None; + self.keep_alive.idle(); + if self.is_idle() { + self.reading = Reading::Init; + self.writing = Writing::Init; + } else { + self.close(); + } + } + + fn is_idle(&self) -> bool { + if let KA::Idle = self.keep_alive.status() { + true + } else { + false + } + } + + fn is_read_closed(&self) -> bool { + match self.reading { + Reading::Closed => true, + _ => false + } + } + + fn is_write_closed(&self) -> bool { + match self.writing { + Writing::Closed => true, + _ => false + } + } + + fn prepare_upgrade(&mut self) -> ::upgrade::OnUpgrade { + debug_assert!(self.upgrade.is_none()); + let (tx, rx) = ::upgrade::pending(); + self.upgrade = Some(tx); + rx + } +} + +#[cfg(test)] +//TODO: rewrite these using dispatch +mod tests { + + #[cfg(feature = "nightly")] + #[bench] + fn bench_read_head_short(b: &mut ::test::Bencher) { + use super::*; + let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"; + let len = s.len(); + b.bytes = len as u64; + + let mut io = ::mock::AsyncIo::new_buf(Vec::new(), 0); + io.panic(); + let mut conn = Conn::<_, ::Chunk, ::proto::h1::ServerTransaction>::new(io); + *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); + conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); + + b.iter(|| { + match conn.read_head().unwrap() { + Async::Ready(Some(x)) => { + ::test::black_box(&x); + let mut headers = x.0.headers; + headers.clear(); + conn.state.cached_headers = Some(headers); + }, + f => panic!("expected Ready(Some(..)): {:?}", f) + } + + + conn.io.read_buf_mut().reserve(1); + unsafe { + conn.io.read_buf_mut().set_len(len); + } + conn.state.reading = Reading::Init; + }); + } + /* + use futures::{Async, Future, Stream, Sink}; + use futures::future; + + use proto::{self, ClientTransaction, MessageHead, ServerTransaction}; + use super::super::Encoder; + use mock::AsyncIo; + + use super::{Conn, Decoder, Reading, Writing}; + use ::uri::Uri; + + use std::str::FromStr; + + #[test] + fn test_conn_init_read() { + let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec(); + let len = good_message.len(); + let io = AsyncIo::new_buf(good_message, len); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + + match conn.poll().unwrap() { + Async::Ready(Some(Frame::Message { message, body: false })) => { + assert_eq!(message, MessageHead { + subject: ::proto::RequestLine(::Get, Uri::from_str("/").unwrap()), + .. MessageHead::default() + }) + }, + f => panic!("frame is not Frame::Message: {:?}", f) + } + } + + #[test] + fn test_conn_parse_partial() { + let _: Result<(), ()> = future::lazy(|| { + let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec(); + let io = AsyncIo::new_buf(good_message, 10); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + assert!(conn.poll().unwrap().is_not_ready()); + conn.io.io_mut().block_in(50); + let async = conn.poll().unwrap(); + assert!(async.is_ready()); + match async { + Async::Ready(Some(Frame::Message { .. })) => (), + f => panic!("frame is not Message: {:?}", f), + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_init_read_eof_idle() { + let io = AsyncIo::new_buf(vec![], 1); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.idle(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("frame is not None: {:?}", other) + } + } + + #[test] + fn test_conn_init_read_eof_idle_partial_parse() { + let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.idle(); + + match conn.poll() { + Err(ref err) if err.kind() == ::std::io::ErrorKind::UnexpectedEof => {}, + other => panic!("unexpected frame: {:?}", other) + } + } + + #[test] + fn test_conn_init_read_eof_busy() { + let _: Result<(), ()> = future::lazy(|| { + // server ignores + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.busy(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("unexpected frame: {:?}", other) + } + + // client + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io); + conn.state.busy(); + + match conn.poll() { + Err(ref err) if err.kind() == ::std::io::ErrorKind::UnexpectedEof => {}, + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_finish_read_eof() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_eof(); + let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io); + conn.state.busy(); + conn.state.writing = Writing::KeepAlive; + conn.state.reading = Reading::Body(Decoder::length(0)); + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // conn eofs, but tokio-proto will call poll() again, before calling flush() + // the conn eof in this case is perfectly fine + + match conn.poll() { + Ok(Async::Ready(None)) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_message_empty_body_read_eof() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec(), 1024); + let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io); + conn.state.busy(); + conn.state.writing = Writing::KeepAlive; + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Message { body: false, .. }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // conn eofs, but tokio-proto will call poll() again, before calling flush() + // the conn eof in this case is perfectly fine + + match conn.poll() { + Ok(Async::Ready(None)) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_read_body_end() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(b"POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\n12345".to_vec(), 1024); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.busy(); + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Message { body: true, .. }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: Some(_) }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + // When the body is done, `poll` MUST return a `Body` frame with chunk set to `None` + match conn.poll() { + Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), + other => panic!("unexpected frame: {:?}", other) + } + + match conn.poll() { + Ok(Async::NotReady) => (), + other => panic!("unexpected frame: {:?}", other) + } + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_closed_read() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.close(); + + match conn.poll().unwrap() { + Async::Ready(None) => {}, + other => panic!("frame is not None: {:?}", other) + } + } + + #[test] + fn test_conn_body_write_length() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + let max = super::super::io::DEFAULT_MAX_BUFFER_SIZE + 4096; + conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64)); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; max].into()) }).unwrap().is_ready()); + assert!(!conn.can_buffer_body()); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; 1024 * 8].into()) }).unwrap().is_not_ready()); + + conn.io.io_mut().block_in(1024 * 3); + assert!(conn.poll_complete().unwrap().is_not_ready()); + conn.io.io_mut().block_in(1024 * 3); + assert!(conn.poll_complete().unwrap().is_not_ready()); + conn.io.io_mut().block_in(max * 2); + assert!(conn.poll_complete().unwrap().is_ready()); + + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 8].into()) }).unwrap().is_ready()); + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_write_chunked() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.writing = Writing::Body(Encoder::chunked()); + + assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready()); + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'x'; 8192].into()) }).unwrap().is_ready()); + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_body_flush() { + let _: Result<(), ()> = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.writing = Writing::Body(Encoder::length(1024 * 1024)); + assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready()); + assert!(!conn.can_buffer_body()); + conn.io.io_mut().block_in(1024 * 1024 * 5); + assert!(conn.poll_complete().unwrap().is_ready()); + assert!(conn.can_buffer_body()); + assert!(conn.io.io_mut().flushed()); + + Ok(()) + }).wait(); + } + + #[test] + fn test_conn_parking() { + use std::sync::Arc; + use futures::executor::Notify; + use futures::executor::NotifyHandle; + + struct Car { + permit: bool, + } + impl Notify for Car { + fn notify(&self, _id: usize) { + assert!(self.permit, "unparked without permit"); + } + } + + fn car(permit: bool) -> NotifyHandle { + Arc::new(Car { + permit: permit, + }).into() + } + + // test that once writing is done, unparks + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.reading = Reading::KeepAlive; + assert!(conn.poll().unwrap().is_not_ready()); + + conn.state.writing = Writing::KeepAlive; + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(true), 0).unwrap(); + + + // test that flushing when not waiting on read doesn't unpark + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.writing = Writing::KeepAlive; + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); + + + // test that flushing and writing isn't done doesn't unpark + let f = future::lazy(|| { + let io = AsyncIo::new_buf(vec![], 4096); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.reading = Reading::KeepAlive; + assert!(conn.poll().unwrap().is_not_ready()); + conn.state.writing = Writing::Body(Encoder::length(5_000)); + assert!(conn.poll_complete().unwrap().is_ready()); + Ok::<(), ()>(()) + }); + ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); + } + + #[test] + fn test_conn_closed_write() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.close(); + + match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) { + Err(_e) => {}, + other => panic!("did not return Err: {:?}", other) + } + + assert!(conn.state.is_write_closed()); + } + + #[test] + fn test_conn_write_empty_chunk() { + let io = AsyncIo::new_buf(vec![], 0); + let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io); + conn.state.writing = Writing::KeepAlive; + + assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready()); + assert!(conn.start_send(Frame::Body { chunk: Some(Vec::new().into()) }).unwrap().is_ready()); + conn.start_send(Frame::Body { chunk: Some(vec![b'a'].into()) }).unwrap_err(); + } + */ +} diff --git a/third_party/rust/hyper/src/proto/h1/date.rs b/third_party/rust/hyper/src/proto/h1/date.rs new file mode 100644 index 000000000000..48bcdfcd6b87 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/date.rs @@ -0,0 +1,73 @@ +use std::cell::RefCell; +use std::fmt::{self, Write}; +use std::str; + +use time::{self, Duration}; + +// "Sun, 06 Nov 1994 08:49:37 GMT".len() +pub const DATE_VALUE_LENGTH: usize = 29; + +pub fn extend(dst: &mut Vec) { + CACHED.with(|cache| { + dst.extend_from_slice(cache.borrow().buffer()); + }) +} + +pub fn update() { + CACHED.with(|cache| { + cache.borrow_mut().check(); + }) +} + +struct CachedDate { + bytes: [u8; DATE_VALUE_LENGTH], + pos: usize, + next_update: time::Timespec, +} + +thread_local!(static CACHED: RefCell = RefCell::new(CachedDate::new())); + +impl CachedDate { + fn new() -> Self { + let mut cache = CachedDate { + bytes: [0; DATE_VALUE_LENGTH], + pos: 0, + next_update: time::Timespec::new(0, 0), + }; + cache.update(time::get_time()); + cache + } + + fn buffer(&self) -> &[u8] { + &self.bytes[..] + } + + fn check(&mut self) { + let now = time::get_time(); + if now > self.next_update { + self.update(now); + } + } + + fn update(&mut self, now: time::Timespec) { + self.pos = 0; + let _ = write!(self, "{}", time::at_utc(now).rfc822()); + debug_assert!(self.pos == DATE_VALUE_LENGTH); + self.next_update = now + Duration::seconds(1); + self.next_update.nsec = 0; + } +} + +impl fmt::Write for CachedDate { + fn write_str(&mut self, s: &str) -> fmt::Result { + let len = s.len(); + self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); + self.pos += len; + Ok(()) + } +} + +#[test] +fn test_date_len() { + assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len()); +} diff --git a/third_party/rust/hyper/src/proto/h1/decode.rs b/third_party/rust/hyper/src/proto/h1/decode.rs new file mode 100644 index 000000000000..b8e2cac7d5a9 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/decode.rs @@ -0,0 +1,536 @@ +use std::error::Error as StdError; +use std::fmt; +use std::usize; +use std::io; + +use futures::{Async, Poll}; +use bytes::Bytes; + +use super::io::MemRead; +use super::{DecodedLength}; + +use self::Kind::{Length, Chunked, Eof}; + +/// Decoders to handle different Transfer-Encodings. +/// +/// If a message body does not include a Transfer-Encoding, it *should* +/// include a Content-Length header. +#[derive(Clone, PartialEq)] +pub struct Decoder { + kind: Kind, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum Kind { + /// A Reader used when a Content-Length header is passed with a positive integer. + Length(u64), + /// A Reader used when Transfer-Encoding is `chunked`. + Chunked(ChunkedState, u64), + /// A Reader used for responses that don't indicate a length or chunked. + /// + /// The bool tracks when EOF is seen on the transport. + /// + /// Note: This should only used for `Response`s. It is illegal for a + /// `Request` to be made with both `Content-Length` and + /// `Transfer-Encoding: chunked` missing, as explained from the spec: + /// + /// > If a Transfer-Encoding header field is present in a response and + /// > the chunked transfer coding is not the final encoding, the + /// > message body length is determined by reading the connection until + /// > it is closed by the server. If a Transfer-Encoding header field + /// > is present in a request and the chunked transfer coding is not + /// > the final encoding, the message body length cannot be determined + /// > reliably; the server MUST respond with the 400 (Bad Request) + /// > status code and then close the connection. + Eof(bool), +} + +#[derive(Debug, PartialEq, Clone, Copy)] +enum ChunkedState { + Size, + SizeLws, + Extension, + SizeLf, + Body, + BodyCr, + BodyLf, + EndCr, + EndLf, + End, +} + +impl Decoder { + // constructors + + pub fn length(x: u64) -> Decoder { + Decoder { kind: Kind::Length(x) } + } + + pub fn chunked() -> Decoder { + Decoder { kind: Kind::Chunked(ChunkedState::Size, 0) } + } + + pub fn eof() -> Decoder { + Decoder { kind: Kind::Eof(false) } + } + + pub(super) fn new(len: DecodedLength) -> Self { + match len { + DecodedLength::CHUNKED => Decoder::chunked(), + DecodedLength::CLOSE_DELIMITED => Decoder::eof(), + length => Decoder::length(length.danger_len()), + } + } + + // methods + + pub fn is_eof(&self) -> bool { + match self.kind { + Length(0) | + Chunked(ChunkedState::End, _) | + Eof(true) => true, + _ => false, + } + } + + pub fn decode(&mut self, body: &mut R) -> Poll { + trace!("decode; state={:?}", self.kind); + match self.kind { + Length(ref mut remaining) => { + if *remaining == 0 { + Ok(Async::Ready(Bytes::new())) + } else { + let to_read = *remaining as usize; + let buf = try_ready!(body.read_mem(to_read)); + let num = buf.as_ref().len() as u64; + if num > *remaining { + *remaining = 0; + } else if num == 0 { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody)); + } else { + *remaining -= num; + } + Ok(Async::Ready(buf)) + } + } + Chunked(ref mut state, ref mut size) => { + loop { + let mut buf = None; + // advances the chunked state + *state = try_ready!(state.step(body, size, &mut buf)); + if *state == ChunkedState::End { + trace!("end of chunked"); + return Ok(Async::Ready(Bytes::new())); + } + if let Some(buf) = buf { + return Ok(Async::Ready(buf)); + } + } + } + Eof(ref mut is_eof) => { + if *is_eof { + Ok(Async::Ready(Bytes::new())) + } else { + // 8192 chosen because its about 2 packets, there probably + // won't be that much available, so don't have MemReaders + // allocate buffers to big + let slice = try_ready!(body.read_mem(8192)); + *is_eof = slice.is_empty(); + Ok(Async::Ready(slice)) + } + } + } + } +} + + +impl fmt::Debug for Decoder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.kind, f) + } +} + +macro_rules! byte ( + ($rdr:ident) => ({ + let buf = try_ready!($rdr.read_mem(1)); + if !buf.is_empty() { + buf[0] + } else { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, + "Unexpected eof during chunk size line")); + } + }) +); + +impl ChunkedState { + fn step(&self, + body: &mut R, + size: &mut u64, + buf: &mut Option) + -> Poll { + use self::ChunkedState::*; + match *self { + Size => ChunkedState::read_size(body, size), + SizeLws => ChunkedState::read_size_lws(body), + Extension => ChunkedState::read_extension(body), + SizeLf => ChunkedState::read_size_lf(body, *size), + Body => ChunkedState::read_body(body, size, buf), + BodyCr => ChunkedState::read_body_cr(body), + BodyLf => ChunkedState::read_body_lf(body), + EndCr => ChunkedState::read_end_cr(body), + EndLf => ChunkedState::read_end_lf(body), + End => Ok(Async::Ready(ChunkedState::End)), + } + } + fn read_size(rdr: &mut R, size: &mut u64) -> Poll { + trace!("Read chunk hex size"); + let radix = 16; + match byte!(rdr) { + b @ b'0'...b'9' => { + *size *= radix; + *size += (b - b'0') as u64; + } + b @ b'a'...b'f' => { + *size *= radix; + *size += (b + 10 - b'a') as u64; + } + b @ b'A'...b'F' => { + *size *= radix; + *size += (b + 10 - b'A') as u64; + } + b'\t' | b' ' => return Ok(Async::Ready(ChunkedState::SizeLws)), + b';' => return Ok(Async::Ready(ChunkedState::Extension)), + b'\r' => return Ok(Async::Ready(ChunkedState::SizeLf)), + _ => { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid chunk size line: Invalid Size")); + } + } + Ok(Async::Ready(ChunkedState::Size)) + } + fn read_size_lws(rdr: &mut R) -> Poll { + trace!("read_size_lws"); + match byte!(rdr) { + // LWS can follow the chunk size, but no more digits can come + b'\t' | b' ' => Ok(Async::Ready(ChunkedState::SizeLws)), + b';' => Ok(Async::Ready(ChunkedState::Extension)), + b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)), + _ => { + Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid chunk size linear white space")) + } + } + } + fn read_extension(rdr: &mut R) -> Poll { + trace!("read_extension"); + match byte!(rdr) { + b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)), + _ => Ok(Async::Ready(ChunkedState::Extension)), // no supported extensions + } + } + fn read_size_lf(rdr: &mut R, size: u64) -> Poll { + trace!("Chunk size is {:?}", size); + match byte!(rdr) { + b'\n' => { + if size == 0 { + Ok(Async::Ready(ChunkedState::EndCr)) + } else { + debug!("incoming chunked header: {0:#X} ({0} bytes)", size); + Ok(Async::Ready(ChunkedState::Body)) + } + }, + _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk size LF")), + } + } + + fn read_body(rdr: &mut R, + rem: &mut u64, + buf: &mut Option) + -> Poll { + trace!("Chunked read, remaining={:?}", rem); + + // cap remaining bytes at the max capacity of usize + let rem_cap = match *rem { + r if r > usize::MAX as u64 => usize::MAX, + r => r as usize, + }; + + let to_read = rem_cap; + let slice = try_ready!(rdr.read_mem(to_read)); + let count = slice.len(); + + if count == 0 { + *rem = 0; + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, IncompleteBody)); + } + *buf = Some(slice); + *rem -= count as u64; + + if *rem > 0 { + Ok(Async::Ready(ChunkedState::Body)) + } else { + Ok(Async::Ready(ChunkedState::BodyCr)) + } + } + fn read_body_cr(rdr: &mut R) -> Poll { + match byte!(rdr) { + b'\r' => Ok(Async::Ready(ChunkedState::BodyLf)), + _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body CR")), + } + } + fn read_body_lf(rdr: &mut R) -> Poll { + match byte!(rdr) { + b'\n' => Ok(Async::Ready(ChunkedState::Size)), + _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body LF")), + } + } + + fn read_end_cr(rdr: &mut R) -> Poll { + match byte!(rdr) { + b'\r' => Ok(Async::Ready(ChunkedState::EndLf)), + _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end CR")), + } + } + fn read_end_lf(rdr: &mut R) -> Poll { + match byte!(rdr) { + b'\n' => Ok(Async::Ready(ChunkedState::End)), + _ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end LF")), + } + } +} + +#[derive(Debug)] +struct IncompleteBody; + +impl fmt::Display for IncompleteBody { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.description()) + } +} + +impl StdError for IncompleteBody { + fn description(&self) -> &str { + "end of file before message length reached" + } +} + +#[cfg(test)] +mod tests { + use std::io; + use std::io::Write; + use super::Decoder; + use super::ChunkedState; + use super::super::io::MemRead; + use futures::{Async, Poll}; + use bytes::{BytesMut, Bytes}; + use mock::AsyncIo; + + impl<'a> MemRead for &'a [u8] { + fn read_mem(&mut self, len: usize) -> Poll { + let n = ::std::cmp::min(len, self.len()); + if n > 0 { + let (a, b) = self.split_at(n); + let mut buf = BytesMut::from(a); + *self = b; + Ok(Async::Ready(buf.split_to(n).freeze())) + } else { + Ok(Async::Ready(Bytes::new())) + } + } + } + + trait HelpUnwrap { + fn unwrap(self) -> T; + } + impl HelpUnwrap for Async { + fn unwrap(self) -> Bytes { + match self { + Async::Ready(bytes) => bytes, + Async::NotReady => panic!(), + } + } + } + impl HelpUnwrap for Async { + fn unwrap(self) -> ChunkedState { + match self { + Async::Ready(state) => state, + Async::NotReady => panic!(), + } + } + } + + #[test] + fn test_read_chunk_size() { + use std::io::ErrorKind::{UnexpectedEof, InvalidInput}; + + fn read(s: &str) -> u64 { + let mut state = ChunkedState::Size; + let rdr = &mut s.as_bytes(); + let mut size = 0; + loop { + let result = state.step(rdr, &mut size, &mut None); + let desc = format!("read_size failed for {:?}", s); + state = result.expect(desc.as_str()).unwrap(); + if state == ChunkedState::Body || state == ChunkedState::EndCr { + break; + } + } + size + } + + fn read_err(s: &str, expected_err: io::ErrorKind) { + let mut state = ChunkedState::Size; + let rdr = &mut s.as_bytes(); + let mut size = 0; + loop { + let result = state.step(rdr, &mut size, &mut None); + state = match result { + Ok(s) => s.unwrap(), + Err(e) => { + assert!(expected_err == e.kind(), "Reading {:?}, expected {:?}, but got {:?}", + s, expected_err, e.kind()); + return; + } + }; + if state == ChunkedState::Body || state == ChunkedState::End { + panic!(format!("Was Ok. Expected Err for {:?}", s)); + } + } + } + + assert_eq!(1, read("1\r\n")); + assert_eq!(1, read("01\r\n")); + assert_eq!(0, read("0\r\n")); + assert_eq!(0, read("00\r\n")); + assert_eq!(10, read("A\r\n")); + assert_eq!(10, read("a\r\n")); + assert_eq!(255, read("Ff\r\n")); + assert_eq!(255, read("Ff \r\n")); + // Missing LF or CRLF + read_err("F\rF", InvalidInput); + read_err("F", UnexpectedEof); + // Invalid hex digit + read_err("X\r\n", InvalidInput); + read_err("1X\r\n", InvalidInput); + read_err("-\r\n", InvalidInput); + read_err("-1\r\n", InvalidInput); + // Acceptable (if not fully valid) extensions do not influence the size + assert_eq!(1, read("1;extension\r\n")); + assert_eq!(10, read("a;ext name=value\r\n")); + assert_eq!(1, read("1;extension;extension2\r\n")); + assert_eq!(1, read("1;;; ;\r\n")); + assert_eq!(2, read("2; extension...\r\n")); + assert_eq!(3, read("3 ; extension=123\r\n")); + assert_eq!(3, read("3 ;\r\n")); + assert_eq!(3, read("3 ; \r\n")); + // Invalid extensions cause an error + read_err("1 invalid extension\r\n", InvalidInput); + read_err("1 A\r\n", InvalidInput); + read_err("1;no CRLF", UnexpectedEof); + } + + #[test] + fn test_read_sized_early_eof() { + let mut bytes = &b"foo bar"[..]; + let mut decoder = Decoder::length(10); + assert_eq!(decoder.decode(&mut bytes).unwrap().unwrap().len(), 7); + let e = decoder.decode(&mut bytes).unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); + } + + #[test] + fn test_read_chunked_early_eof() { + let mut bytes = &b"\ + 9\r\n\ + foo bar\ + "[..]; + let mut decoder = Decoder::chunked(); + assert_eq!(decoder.decode(&mut bytes).unwrap().unwrap().len(), 7); + let e = decoder.decode(&mut bytes).unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); + } + + #[test] + fn test_read_chunked_single_read() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..]; + let buf = Decoder::chunked().decode(&mut mock_buf).expect("decode").unwrap(); + assert_eq!(16, buf.len()); + let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); + assert_eq!("1234567890abcdef", &result); + } + + #[test] + fn test_read_chunked_after_eof() { + let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; + let mut decoder = Decoder::chunked(); + + // normal read + let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap(); + assert_eq!(16, buf.len()); + let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); + assert_eq!("1234567890abcdef", &result); + + // eof read + let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap(); + assert_eq!(0, buf.len()); + + // ensure read after eof also returns eof + let buf = decoder.decode(&mut mock_buf).expect("decode").unwrap(); + assert_eq!(0, buf.len()); + } + + // perform an async read using a custom buffer size and causing a blocking + // read at the specified byte + fn read_async(mut decoder: Decoder, + content: &[u8], + block_at: usize) + -> String { + let content_len = content.len(); + let mut ins = AsyncIo::new(content, block_at); + let mut outs = Vec::new(); + loop { + match decoder.decode(&mut ins).expect("unexpected decode error: {}") { + Async::Ready(buf) => { + if buf.is_empty() { + break; // eof + } + outs.write(buf.as_ref()).expect("write buffer"); + }, + Async::NotReady => { + ins.block_in(content_len); // we only block once + } + }; + } + String::from_utf8(outs).expect("decode String") + } + + // iterate over the different ways that this async read could go. + // tests blocking a read at each byte along the content - The shotgun approach + fn all_async_cases(content: &str, expected: &str, decoder: Decoder) { + let content_len = content.len(); + for block_at in 0..content_len { + let actual = read_async(decoder.clone(), content.as_bytes(), block_at); + assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at); + } + } + + #[test] + fn test_read_length_async() { + let content = "foobar"; + all_async_cases(content, content, Decoder::length(content.len() as u64)); + } + + #[test] + fn test_read_chunked_async() { + let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n"; + let expected = "foobar"; + all_async_cases(content, expected, Decoder::chunked()); + } + + #[test] + fn test_read_eof_async() { + let content = "foobar"; + all_async_cases(content, content, Decoder::eof()); + } + +} diff --git a/third_party/rust/hyper/src/proto/h1/dispatch.rs b/third_party/rust/hyper/src/proto/h1/dispatch.rs new file mode 100644 index 000000000000..174a8cb95118 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/dispatch.rs @@ -0,0 +1,584 @@ +use bytes::{Buf, Bytes}; +use futures::{Async, Future, Poll, Stream}; +use http::{Request, Response, StatusCode}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use body::{Body, Payload}; +use body::internal::FullDataArg; +use common::Never; +use proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead}; +use super::Http1Transaction; +use service::Service; + +pub(crate) struct Dispatcher { + conn: Conn, + dispatch: D, + body_tx: Option<::body::Sender>, + body_rx: Option, + is_closing: bool, +} + +pub(crate) trait Dispatch { + type PollItem; + type PollBody; + type PollError; + type RecvItem; + fn poll_msg(&mut self) -> Poll, Self::PollError>; + fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()>; + fn poll_ready(&mut self) -> Poll<(), ()>; + fn should_poll(&self) -> bool; +} + +pub struct Server { + in_flight: Option, + pub(crate) service: S, +} + +pub struct Client { + callback: Option<::client::dispatch::Callback, Response>>, + rx: ClientRx, +} + +type ClientRx = ::client::dispatch::Receiver, Response>; + +impl Dispatcher +where + D: Dispatch, PollBody=Bs, RecvItem=MessageHead>, + D::PollError: Into>, + I: AsyncRead + AsyncWrite, + T: Http1Transaction, + Bs: Payload, +{ + pub fn new(dispatch: D, conn: Conn) -> Self { + Dispatcher { + conn: conn, + dispatch: dispatch, + body_tx: None, + body_rx: None, + is_closing: false, + } + } + + pub fn disable_keep_alive(&mut self) { + self.conn.disable_keep_alive() + } + + pub fn into_inner(self) -> (I, Bytes, D) { + let (io, buf) = self.conn.into_inner(); + (io, buf, self.dispatch) + } + + /// Run this dispatcher until HTTP says this connection is done, + /// but don't call `AsyncWrite::shutdown` on the underlying IO. + /// + /// This is useful for old-style HTTP upgrades, but ignores + /// newer-style upgrade API. + pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> { + self.poll_catch(false) + .map(|x| { + x.map(|ds| if let Dispatched::Upgrade(pending) = ds { + pending.manual(); + }) + }) + } + + fn poll_catch(&mut self, should_shutdown: bool) -> Poll { + self.poll_inner(should_shutdown).or_else(|e| { + // An error means we're shutting down either way. + // We just try to give the error to the user, + // and close the connection with an Ok. If we + // cannot give it to the user, then return the Err. + self.dispatch.recv_msg(Err(e))?; + Ok(Async::Ready(Dispatched::Shutdown)) + }) + } + + fn poll_inner(&mut self, should_shutdown: bool) -> Poll { + T::update_date(); + + loop { + self.poll_read()?; + self.poll_write()?; + self.poll_flush()?; + + // This could happen if reading paused before blocking on IO, + // such as getting to the end of a framed message, but then + // writing/flushing set the state back to Init. In that case, + // if the read buffer still had bytes, we'd want to try poll_read + // again, or else we wouldn't ever be woken up again. + // + // Using this instead of task::current() and notify() inside + // the Conn is noticeably faster in pipelined benchmarks. + if !self.conn.wants_read_again() { + break; + } + } + + if self.is_done() { + if let Some(pending) = self.conn.pending_upgrade() { + self.conn.take_error()?; + return Ok(Async::Ready(Dispatched::Upgrade(pending))); + } else if should_shutdown { + try_ready!(self.conn.shutdown().map_err(::Error::new_shutdown)); + } + self.conn.take_error()?; + Ok(Async::Ready(Dispatched::Shutdown)) + } else { + Ok(Async::NotReady) + } + } + + fn poll_read(&mut self) -> Poll<(), ::Error> { + loop { + if self.is_closing { + return Ok(Async::Ready(())); + } else if self.conn.can_read_head() { + try_ready!(self.poll_read_head()); + } else if let Some(mut body) = self.body_tx.take() { + if self.conn.can_read_body() { + match body.poll_ready() { + Ok(Async::Ready(())) => (), + Ok(Async::NotReady) => { + self.body_tx = Some(body); + return Ok(Async::NotReady); + }, + Err(_canceled) => { + // user doesn't care about the body + // so we should stop reading + trace!("body receiver dropped before eof, closing"); + self.conn.close_read(); + return Ok(Async::Ready(())); + } + } + match self.conn.read_body() { + Ok(Async::Ready(Some(chunk))) => { + match body.send_data(chunk) { + Ok(()) => { + self.body_tx = Some(body); + }, + Err(_canceled) => { + if self.conn.can_read_body() { + trace!("body receiver dropped before eof, closing"); + self.conn.close_read(); + } + } + + } + }, + Ok(Async::Ready(None)) => { + // just drop, the body will close automatically + }, + Ok(Async::NotReady) => { + self.body_tx = Some(body); + return Ok(Async::NotReady); + } + Err(e) => { + body.send_error(::Error::new_body(e)); + } + } + } else { + // just drop, the body will close automatically + } + } else { + return self.conn.read_keep_alive().map(Async::Ready); + } + } + } + + fn poll_read_head(&mut self) -> Poll<(), ::Error> { + // can dispatch receive, or does it still care about, an incoming message? + match self.dispatch.poll_ready() { + Ok(Async::Ready(())) => (), + Ok(Async::NotReady) => unreachable!("dispatch not ready when conn is"), + Err(()) => { + trace!("dispatch no longer receiving messages"); + self.close(); + return Ok(Async::Ready(())); + } + } + // dispatch is ready for a message, try to read one + match self.conn.read_head() { + Ok(Async::Ready(Some((head, body_len, wants_upgrade)))) => { + let mut body = match body_len { + DecodedLength::ZERO => Body::empty(), + other => { + let (tx, rx) = Body::new_channel(other.into_opt()); + self.body_tx = Some(tx); + rx + }, + }; + if wants_upgrade { + body.set_on_upgrade(self.conn.on_upgrade()); + } + self.dispatch.recv_msg(Ok((head, body)))?; + Ok(Async::Ready(())) + } + Ok(Async::Ready(None)) => { + // read eof, conn will start to shutdown automatically + Ok(Async::Ready(())) + } + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(err) => { + debug!("read_head error: {}", err); + self.dispatch.recv_msg(Err(err))?; + // if here, the dispatcher gave the user the error + // somewhere else. we still need to shutdown, but + // not as a second error. + Ok(Async::Ready(())) + } + } + } + + fn poll_write(&mut self) -> Poll<(), ::Error> { + loop { + if self.is_closing { + return Ok(Async::Ready(())); + } else if self.body_rx.is_none() && self.conn.can_write_head() && self.dispatch.should_poll() { + if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(::Error::new_user_service)) { + // Check if the body knows its full data immediately. + // + // If so, we can skip a bit of bookkeeping that streaming + // bodies need to do. + if let Some(full) = body.__hyper_full_data(FullDataArg(())).0 { + self.conn.write_full_msg(head, full); + return Ok(Async::Ready(())); + } + let body_type = if body.is_end_stream() { + self.body_rx = None; + None + } else { + let btype = body.content_length() + .map(BodyLength::Known) + .or_else(|| Some(BodyLength::Unknown)); + self.body_rx = Some(body); + btype + }; + self.conn.write_head(head, body_type); + } else { + self.close(); + return Ok(Async::Ready(())); + } + } else if !self.conn.can_buffer_body() { + try_ready!(self.poll_flush()); + } else if let Some(mut body) = self.body_rx.take() { + if !self.conn.can_write_body() { + trace!( + "no more write body allowed, user body is_end_stream = {}", + body.is_end_stream(), + ); + continue; + } + match body.poll_data().map_err(::Error::new_user_body)? { + Async::Ready(Some(chunk)) => { + let eos = body.is_end_stream(); + if eos { + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + self.conn.end_body(); + } else { + self.conn.write_body_and_end(chunk); + } + } else { + self.body_rx = Some(body); + if chunk.remaining() == 0 { + trace!("discarding empty chunk"); + continue; + } + self.conn.write_body(chunk); + } + }, + Async::Ready(None) => { + self.conn.end_body(); + }, + Async::NotReady => { + self.body_rx = Some(body); + return Ok(Async::NotReady); + } + } + } else { + return Ok(Async::NotReady); + } + } + } + + fn poll_flush(&mut self) -> Poll<(), ::Error> { + self.conn.flush().map_err(|err| { + debug!("error writing: {}", err); + ::Error::new_body_write(err) + }) + } + + fn close(&mut self) { + self.is_closing = true; + self.conn.close_read(); + self.conn.close_write(); + } + + fn is_done(&self) -> bool { + if self.is_closing { + return true; + } + + let read_done = self.conn.is_read_closed(); + + if !T::should_read_first() && read_done { + // a client that cannot read may was well be done. + true + } else { + let write_done = self.conn.is_write_closed() || + (!self.dispatch.should_poll() && self.body_rx.is_none()); + read_done && write_done + } + } +} + +impl Future for Dispatcher +where + D: Dispatch, PollBody=Bs, RecvItem=MessageHead>, + D::PollError: Into>, + I: AsyncRead + AsyncWrite, + T: Http1Transaction, + Bs: Payload, +{ + type Item = Dispatched; + type Error = ::Error; + + #[inline] + fn poll(&mut self) -> Poll { + self.poll_catch(true) + } +} + +// ===== impl Server ===== + +impl Server where S: Service { + pub fn new(service: S) -> Server { + Server { + in_flight: None, + service: service, + } + } + pub fn into_service(self) -> S { + self.service + } +} + +impl Dispatch for Server +where + S: Service, + S::Error: Into>, + Bs: Payload, +{ + type PollItem = MessageHead; + type PollBody = Bs; + type PollError = S::Error; + type RecvItem = RequestHead; + + fn poll_msg(&mut self) -> Poll, Self::PollError> { + if let Some(mut fut) = self.in_flight.take() { + let resp = match fut.poll()? { + Async::Ready(res) => res, + Async::NotReady => { + self.in_flight = Some(fut); + return Ok(Async::NotReady); + } + }; + let (parts, body) = resp.into_parts(); + let head = MessageHead { + version: parts.version, + subject: parts.status, + headers: parts.headers, + }; + Ok(Async::Ready(Some((head, body)))) + } else { + unreachable!("poll_msg shouldn't be called if no inflight"); + } + } + + fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> { + let (msg, body) = msg?; + let mut req = Request::new(body); + *req.method_mut() = msg.subject.0; + *req.uri_mut() = msg.subject.1; + *req.headers_mut() = msg.headers; + *req.version_mut() = msg.version; + self.in_flight = Some(self.service.call(req)); + Ok(()) + } + + fn poll_ready(&mut self) -> Poll<(), ()> { + if self.in_flight.is_some() { + Ok(Async::NotReady) + } else { + Ok(Async::Ready(())) + } + } + + fn should_poll(&self) -> bool { + self.in_flight.is_some() + } +} + +// ===== impl Client ===== + + +impl Client { + pub fn new(rx: ClientRx) -> Client { + Client { + callback: None, + rx: rx, + } + } +} + +impl Dispatch for Client +where + B: Payload, +{ + type PollItem = RequestHead; + type PollBody = B; + type PollError = Never; + type RecvItem = ResponseHead; + + fn poll_msg(&mut self) -> Poll, Never> { + match self.rx.poll() { + Ok(Async::Ready(Some((req, mut cb)))) => { + // check that future hasn't been canceled already + match cb.poll_cancel().expect("poll_cancel cannot error") { + Async::Ready(()) => { + trace!("request canceled"); + Ok(Async::Ready(None)) + }, + Async::NotReady => { + let (parts, body) = req.into_parts(); + let head = RequestHead { + version: parts.version, + subject: RequestLine(parts.method, parts.uri), + headers: parts.headers, + }; + self.callback = Some(cb); + Ok(Async::Ready(Some((head, body)))) + } + } + }, + Ok(Async::Ready(None)) => { + trace!("client tx closed"); + // user has dropped sender handle + Ok(Async::Ready(None)) + }, + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(never) => match never {}, + } + } + + fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> { + match msg { + Ok((msg, body)) => { + if let Some(cb) = self.callback.take() { + let mut res = Response::new(body); + *res.status_mut() = msg.subject; + *res.headers_mut() = msg.headers; + *res.version_mut() = msg.version; + let _ = cb.send(Ok(res)); + Ok(()) + } else { + Err(::Error::new_mismatched_response()) + } + }, + Err(err) => { + if let Some(cb) = self.callback.take() { + let _ = cb.send(Err((err, None))); + Ok(()) + } else if let Ok(Async::Ready(Some((req, cb)))) = self.rx.poll() { + trace!("canceling queued request with connection error: {}", err); + // in this case, the message was never even started, so it's safe to tell + // the user that the request was completely canceled + let _ = cb.send(Err((::Error::new_canceled(Some(err)), Some(req)))); + Ok(()) + } else { + Err(err) + } + } + } + } + + fn poll_ready(&mut self) -> Poll<(), ()> { + match self.callback { + Some(ref mut cb) => match cb.poll_cancel() { + Ok(Async::Ready(())) => { + trace!("callback receiver has dropped"); + Err(()) + }, + Ok(Async::NotReady) => Ok(Async::Ready(())), + Err(_) => unreachable!("oneshot poll_cancel cannot error"), + }, + None => Err(()), + } + } + + fn should_poll(&self) -> bool { + self.callback.is_none() + } +} + +#[cfg(test)] +mod tests { + extern crate pretty_env_logger; + + use super::*; + use mock::AsyncIo; + use proto::h1::ClientTransaction; + + #[test] + fn client_read_bytes_before_writing_request() { + let _ = pretty_env_logger::try_init(); + ::futures::lazy(|| { + // Block at 0 for now, but we will release this response before + // the request is ready to write later... + let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0); + let (mut tx, rx) = ::client::dispatch::channel(); + let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io); + let mut dispatcher = Dispatcher::new(Client::new(rx), conn); + + // First poll is needed to allow tx to send... + assert!(dispatcher.poll().expect("nothing is ready").is_not_ready()); + // Unblock our IO, which has a response before we've sent request! + dispatcher.conn.io_mut().block_in(100); + + let res_rx = tx.try_send(::Request::new(::Body::empty())).unwrap(); + + let a1 = dispatcher.poll().expect("error should be sent on channel"); + assert!(a1.is_ready(), "dispatcher should be closed"); + let err = res_rx.wait() + .expect("callback poll") + .expect_err("callback response"); + + match (err.0.kind(), err.1) { + (&::error::Kind::Canceled, Some(_)) => (), + other => panic!("expected Canceled, got {:?}", other), + } + Ok::<(), ()>(()) + }).wait().unwrap(); + } + + #[test] + fn body_empty_chunks_ignored() { + let _ = pretty_env_logger::try_init(); + ::futures::lazy(|| { + let io = AsyncIo::new_buf(vec![], 0); + let (mut tx, rx) = ::client::dispatch::channel(); + let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io); + let mut dispatcher = Dispatcher::new(Client::new(rx), conn); + + // First poll is needed to allow tx to send... + assert!(dispatcher.poll().expect("nothing is ready").is_not_ready()); + + let body = ::Body::wrap_stream(::futures::stream::once(Ok::<_, ::Error>(""))); + + let _res_rx = tx.try_send(::Request::new(body)).unwrap(); + + dispatcher.poll().expect("empty body shouldn't panic"); + Ok::<(), ()>(()) + }).wait().unwrap(); + } +} diff --git a/third_party/rust/hyper/src/proto/h1/encode.rs b/third_party/rust/hyper/src/proto/h1/encode.rs new file mode 100644 index 000000000000..fe7c00257bb7 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/encode.rs @@ -0,0 +1,421 @@ +use std::fmt; + +use bytes::{Buf, IntoBuf}; +use bytes::buf::{Chain, Take}; +use iovec::IoVec; + +use common::StaticBuf; +use super::io::WriteBuf; + +/// Encoders to handle different Transfer-Encodings. +#[derive(Debug, Clone, PartialEq)] +pub struct Encoder { + kind: Kind, + is_last: bool, +} + +#[derive(Debug)] +pub struct EncodedBuf { + kind: BufKind, +} + +#[derive(Debug)] +pub struct NotEof; + +#[derive(Debug, PartialEq, Clone)] +enum Kind { + /// An Encoder for when Transfer-Encoding includes `chunked`. + Chunked, + /// An Encoder for when Content-Length is set. + /// + /// Enforces that the body is not longer than the Content-Length header. + Length(u64), + /// An Encoder for when neither Content-Length nore Chunked encoding is set. + /// + /// This is mostly only used with HTTP/1.0 with a length. This kind requires + /// the connection to be closed when the body is finished. + CloseDelimited, +} + +#[derive(Debug)] +enum BufKind { + Exact(B), + Limited(Take), + Chunked(Chain, StaticBuf>), + ChunkedEnd(StaticBuf), +} + +impl Encoder { + fn new(kind: Kind) -> Encoder { + Encoder { + kind: kind, + is_last: false, + } + } + pub fn chunked() -> Encoder { + Encoder::new(Kind::Chunked) + } + + pub fn length(len: u64) -> Encoder { + Encoder::new(Kind::Length(len)) + } + + pub fn close_delimited() -> Encoder { + Encoder::new(Kind::CloseDelimited) + } + + pub fn is_eof(&self) -> bool { + match self.kind { + Kind::Length(0) => true, + _ => false + } + } + + pub fn set_last(mut self, is_last: bool) -> Self { + self.is_last = is_last; + self + } + + pub fn is_last(&self) -> bool { + self.is_last + } + + pub fn end(&self) -> Result>, NotEof> { + match self.kind { + Kind::Length(0) => Ok(None), + Kind::Chunked => Ok(Some(EncodedBuf { + kind: BufKind::ChunkedEnd(StaticBuf(b"0\r\n\r\n")), + })), + _ => Err(NotEof), + } + } + + pub fn encode(&mut self, msg: B) -> EncodedBuf + where + B: IntoBuf, + { + let msg = msg.into_buf(); + let len = msg.remaining(); + debug_assert!(len > 0, "encode() called with empty buf"); + + let kind = match self.kind { + Kind::Chunked => { + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(StaticBuf(b"\r\n")); + BufKind::Chunked(buf) + }, + Kind::Length(ref mut remaining) => { + trace!("sized write, len = {}", len); + if len as u64 > *remaining { + let limit = *remaining as usize; + *remaining = 0; + BufKind::Limited(msg.take(limit)) + } else { + *remaining -= len as u64; + BufKind::Exact(msg) + } + }, + Kind::CloseDelimited => { + trace!("close delimited write {}B", len); + BufKind::Exact(msg) + } + }; + EncodedBuf { + kind, + } + } + + pub(super) fn encode_and_end(&self, msg: B, dst: &mut WriteBuf>) -> bool + where + B: IntoBuf, + { + let msg = msg.into_buf(); + let len = msg.remaining(); + debug_assert!(len > 0, "encode() called with empty buf"); + + match self.kind { + Kind::Chunked => { + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(StaticBuf(b"\r\n0\r\n\r\n")); + dst.buffer(buf); + !self.is_last + }, + Kind::Length(remaining) => { + use std::cmp::Ordering; + + trace!("sized write, len = {}", len); + match (len as u64).cmp(&remaining) { + Ordering::Equal => { + dst.buffer(msg); + !self.is_last + }, + Ordering::Greater => { + dst.buffer(msg.take(remaining as usize)); + !self.is_last + }, + Ordering::Less => { + dst.buffer(msg); + false + } + } + }, + Kind::CloseDelimited => { + trace!("close delimited write {}B", len); + dst.buffer(msg); + false + } + } + } + + /// Encodes the full body, without verifying the remaining length matches. + /// + /// This is used in conjunction with Payload::__hyper_full_data(), which + /// means we can trust that the buf has the correct size (the buf itself + /// was checked to make the headers). + pub(super) fn danger_full_buf(self, msg: B, dst: &mut WriteBuf>) + where + B: IntoBuf, + { + let msg = msg.into_buf(); + debug_assert!(msg.remaining() > 0, "encode() called with empty buf"); + debug_assert!(match self.kind { + Kind::Length(len) => len == msg.remaining() as u64, + _ => true, + }, "danger_full_buf length mismatches"); + + match self.kind { + Kind::Chunked => { + let len = msg.remaining(); + trace!("encoding chunked {}B", len); + let buf = ChunkSize::new(len) + .chain(msg) + .chain(StaticBuf(b"\r\n0\r\n\r\n")); + dst.buffer(buf); + }, + _ => { + dst.buffer(msg); + }, + } + } +} + +impl Buf for EncodedBuf +where + B: Buf, +{ + #[inline] + fn remaining(&self) -> usize { + match self.kind { + BufKind::Exact(ref b) => b.remaining(), + BufKind::Limited(ref b) => b.remaining(), + BufKind::Chunked(ref b) => b.remaining(), + BufKind::ChunkedEnd(ref b) => b.remaining(), + } + } + + #[inline] + fn bytes(&self) -> &[u8] { + match self.kind { + BufKind::Exact(ref b) => b.bytes(), + BufKind::Limited(ref b) => b.bytes(), + BufKind::Chunked(ref b) => b.bytes(), + BufKind::ChunkedEnd(ref b) => b.bytes(), + } + } + + #[inline] + fn advance(&mut self, cnt: usize) { + match self.kind { + BufKind::Exact(ref mut b) => b.advance(cnt), + BufKind::Limited(ref mut b) => b.advance(cnt), + BufKind::Chunked(ref mut b) => b.advance(cnt), + BufKind::ChunkedEnd(ref mut b) => b.advance(cnt), + } + } + + #[inline] + fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize { + match self.kind { + BufKind::Exact(ref b) => b.bytes_vec(dst), + BufKind::Limited(ref b) => b.bytes_vec(dst), + BufKind::Chunked(ref b) => b.bytes_vec(dst), + BufKind::ChunkedEnd(ref b) => b.bytes_vec(dst), + } + } +} + + +#[cfg(target_pointer_width = "32")] +const USIZE_BYTES: usize = 4; + +#[cfg(target_pointer_width = "64")] +const USIZE_BYTES: usize = 8; + +// each byte will become 2 hex +const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2; + +#[derive(Clone, Copy)] +struct ChunkSize { + bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2], + pos: u8, + len: u8, +} + +impl ChunkSize { + fn new(len: usize) -> ChunkSize { + use std::fmt::Write; + let mut size = ChunkSize { + bytes: [0; CHUNK_SIZE_MAX_BYTES + 2], + pos: 0, + len: 0, + }; + write!(&mut size, "{:X}\r\n", len) + .expect("CHUNK_SIZE_MAX_BYTES should fit any usize"); + size + } +} + +impl Buf for ChunkSize { + #[inline] + fn remaining(&self) -> usize { + (self.len - self.pos).into() + } + + #[inline] + fn bytes(&self) -> &[u8] { + &self.bytes[self.pos.into() .. self.len.into()] + } + + #[inline] + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.remaining()); + self.pos += cnt as u8; // just asserted cnt fits in u8 + } +} + +impl fmt::Debug for ChunkSize { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ChunkSize") + .field("bytes", &&self.bytes[..self.len.into()]) + .field("pos", &self.pos) + .finish() + } +} + +impl fmt::Write for ChunkSize { + fn write_str(&mut self, num: &str) -> fmt::Result { + use std::io::Write; + (&mut self.bytes[self.len.into()..]).write(num.as_bytes()) + .expect("&mut [u8].write() cannot error"); + self.len += num.len() as u8; // safe because bytes is never bigger than 256 + Ok(()) + } +} + +impl From for EncodedBuf { + fn from(buf: B) -> Self { + EncodedBuf { + kind: BufKind::Exact(buf), + } + } +} + +impl From> for EncodedBuf { + fn from(buf: Take) -> Self { + EncodedBuf { + kind: BufKind::Limited(buf), + } + } +} + +impl From, StaticBuf>> for EncodedBuf { + fn from(buf: Chain, StaticBuf>) -> Self { + EncodedBuf { + kind: BufKind::Chunked(buf), + } + } +} + +#[cfg(test)] +mod tests { + use bytes::{BufMut}; + + use super::super::io::Cursor; + use super::Encoder; + + #[test] + fn chunked() { + let mut encoder = Encoder::chunked(); + let mut dst = Vec::new(); + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + assert_eq!(dst, b"7\r\nfoo bar\r\n"); + + let msg2 = b"baz quux herp".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n"); + + let end = encoder.end::>>().unwrap().unwrap(); + dst.put(end); + + assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref()); + } + + #[test] + fn length() { + let max_len = 8; + let mut encoder = Encoder::length(max_len as u64); + let mut dst = Vec::new(); + + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + + + assert_eq!(dst, b"foo bar"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap_err(); + + let msg2 = b"baz".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst.len(), max_len); + assert_eq!(dst, b"foo barb"); + assert!(encoder.is_eof()); + assert!(encoder.end::<()>().unwrap().is_none()); + } + + #[test] + fn eof() { + let mut encoder = Encoder::close_delimited(); + let mut dst = Vec::new(); + + + let msg1 = b"foo bar".as_ref(); + let buf1 = encoder.encode(msg1); + dst.put(buf1); + + + assert_eq!(dst, b"foo bar"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap_err(); + + let msg2 = b"baz".as_ref(); + let buf2 = encoder.encode(msg2); + dst.put(buf2); + + assert_eq!(dst, b"foo barbaz"); + assert!(!encoder.is_eof()); + encoder.end::<()>().unwrap_err(); + } +} diff --git a/third_party/rust/hyper/src/proto/h1/io.rs b/third_party/rust/hyper/src/proto/h1/io.rs new file mode 100644 index 000000000000..441dec74f47e --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/io.rs @@ -0,0 +1,725 @@ +use std::cell::Cell; +use std::collections::VecDeque; +use std::fmt; +use std::io; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use futures::{Async, Poll}; +use iovec::IoVec; +use tokio_io::{AsyncRead, AsyncWrite}; + +use super::{Http1Transaction, ParseContext, ParsedMessage}; + +/// The initial buffer size allocated before trying to read from IO. +pub(crate) const INIT_BUFFER_SIZE: usize = 8192; + +/// The minimum value that can be set to max buffer size. +pub const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; + +/// The default maximum read buffer size. If the buffer gets this big and +/// a message is still not complete, a `TooLarge` error is triggered. +// Note: if this changes, update server::conn::Http::max_buf_size docs. +pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; + +/// The maximum number of distinct `Buf`s to hold in a list before requiring +/// a flush. Only affects when the buffer strategy is to queue buffers. +/// +/// Note that a flush can happen before reaching the maximum. This simply +/// forces a flush if the queue gets this big. +const MAX_BUF_LIST_BUFFERS: usize = 16; + +pub struct Buffered { + flush_pipeline: bool, + io: T, + max_buf_size: usize, + read_blocked: bool, + read_buf: BytesMut, + write_buf: WriteBuf, +} + +impl fmt::Debug for Buffered +where + B: Buf, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Buffered") + .field("read_buf", &self.read_buf) + .field("write_buf", &self.write_buf) + .finish() + } +} + +impl Buffered +where + T: AsyncRead + AsyncWrite, + B: Buf, +{ + pub fn new(io: T) -> Buffered { + Buffered { + flush_pipeline: false, + io: io, + max_buf_size: DEFAULT_MAX_BUFFER_SIZE, + read_buf: BytesMut::with_capacity(0), + write_buf: WriteBuf::new(), + read_blocked: false, + } + } + + pub fn set_flush_pipeline(&mut self, enabled: bool) { + debug_assert!(!self.write_buf.has_remaining()); + self.flush_pipeline = enabled; + if enabled { + self.set_write_strategy_flatten(); + } + } + + pub fn set_max_buf_size(&mut self, max: usize) { + assert!( + max >= MINIMUM_MAX_BUFFER_SIZE, + "The max_buf_size cannot be smaller than the initial buffer size." + ); + self.max_buf_size = max; + self.write_buf.max_buf_size = max; + } + + pub fn set_write_strategy_flatten(&mut self) { + // this should always be called only at construction time, + // so this assert is here to catch myself + debug_assert!(self.write_buf.queue.bufs.is_empty()); + self.write_buf.set_strategy(Strategy::Flatten); + } + + pub fn read_buf(&self) -> &[u8] { + self.read_buf.as_ref() + } + + #[cfg(test)] + #[cfg(feature = "nightly")] + pub(super) fn read_buf_mut(&mut self) -> &mut BytesMut { + &mut self.read_buf + } + + pub fn headers_buf(&mut self) -> &mut Vec { + let buf = self.write_buf.headers_mut(); + &mut buf.bytes + } + + pub(super) fn write_buf(&mut self) -> &mut WriteBuf { + &mut self.write_buf + } + + pub fn buffer>(&mut self, buf: BB) { + self.write_buf.buffer(buf) + } + + pub fn can_buffer(&self) -> bool { + self.flush_pipeline || self.write_buf.can_buffer() + } + + pub fn consume_leading_lines(&mut self) { + if !self.read_buf.is_empty() { + let mut i = 0; + while i < self.read_buf.len() { + match self.read_buf[i] { + b'\r' | b'\n' => i += 1, + _ => break, + } + } + self.read_buf.split_to(i); + } + } + + pub(super) fn parse(&mut self, ctx: ParseContext) + -> Poll, ::Error> + where + S: Http1Transaction, + { + loop { + match try!(S::parse(&mut self.read_buf, ParseContext { cached_headers: ctx.cached_headers, req_method: ctx.req_method, })) { + Some(msg) => { + debug!("parsed {} headers", msg.head.headers.len()); + return Ok(Async::Ready(msg)) + }, + None => { + if self.read_buf.capacity() >= self.max_buf_size { + debug!("max_buf_size ({}) reached, closing", self.max_buf_size); + return Err(::Error::new_too_large()); + } + }, + } + match try_ready!(self.read_from_io().map_err(::Error::new_io)) { + 0 => { + trace!("parse eof"); + return Err(::Error::new_incomplete()); + } + _ => {}, + } + } + } + + pub fn read_from_io(&mut self) -> Poll { + use bytes::BufMut; + self.read_blocked = false; + if self.read_buf.remaining_mut() < INIT_BUFFER_SIZE { + self.read_buf.reserve(INIT_BUFFER_SIZE); + } + self.io.read_buf(&mut self.read_buf).map(|ok| { + match ok { + Async::Ready(n) => { + debug!("read {} bytes", n); + Async::Ready(n) + }, + Async::NotReady => { + self.read_blocked = true; + Async::NotReady + } + } + }) + } + + pub fn into_inner(self) -> (T, Bytes) { + (self.io, self.read_buf.freeze()) + } + + pub fn io_mut(&mut self) -> &mut T { + &mut self.io + } + + pub fn is_read_blocked(&self) -> bool { + self.read_blocked + } + + pub fn flush(&mut self) -> Poll<(), io::Error> { + if self.flush_pipeline && !self.read_buf.is_empty() { + //Ok(()) + } else if self.write_buf.remaining() == 0 { + try_nb!(self.io.flush()); + } else { + match self.write_buf.strategy { + Strategy::Flatten => return self.flush_flattened(), + _ => (), + } + loop { + let n = try_ready!(self.io.write_buf(&mut self.write_buf.auto())); + debug!("flushed {} bytes", n); + if self.write_buf.remaining() == 0 { + break; + } else if n == 0 { + trace!("write returned zero, but {} bytes remaining", self.write_buf.remaining()); + return Err(io::ErrorKind::WriteZero.into()) + } + } + try_nb!(self.io.flush()) + } + Ok(Async::Ready(())) + } + + /// Specialized version of `flush` when strategy is Flatten. + /// + /// Since all buffered bytes are flattened into the single headers buffer, + /// that skips some bookkeeping around using multiple buffers. + fn flush_flattened(&mut self) -> Poll<(), io::Error> { + loop { + let n = try_nb!(self.io.write(self.write_buf.headers.bytes())); + debug!("flushed {} bytes", n); + self.write_buf.headers.advance(n); + if self.write_buf.headers.remaining() == 0 { + self.write_buf.headers.reset(); + break; + } else if n == 0 { + trace!("write returned zero, but {} bytes remaining", self.write_buf.remaining()); + return Err(io::ErrorKind::WriteZero.into()) + } + } + try_nb!(self.io.flush()); + Ok(Async::Ready(())) + } +} + +pub trait MemRead { + fn read_mem(&mut self, len: usize) -> Poll; +} + +impl MemRead for Buffered +where + T: AsyncRead + AsyncWrite, + B: Buf, +{ + fn read_mem(&mut self, len: usize) -> Poll { + if !self.read_buf.is_empty() { + let n = ::std::cmp::min(len, self.read_buf.len()); + Ok(Async::Ready(self.read_buf.split_to(n).freeze())) + } else { + let n = try_ready!(self.read_from_io()); + Ok(Async::Ready(self.read_buf.split_to(::std::cmp::min(len, n)).freeze())) + } + } +} + +#[derive(Clone)] +pub struct Cursor { + bytes: T, + pos: usize, +} + +impl> Cursor { + #[inline] + pub(crate) fn new(bytes: T) -> Cursor { + Cursor { + bytes: bytes, + pos: 0, + } + } +} + +impl Cursor> { + fn reset(&mut self) { + self.pos = 0; + unsafe { + self.bytes.set_len(0); + } + } +} + +impl> fmt::Debug for Cursor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Cursor") + .field("pos", &self.pos) + .field("len", &self.bytes.as_ref().len()) + .finish() + } +} + +impl> Buf for Cursor { + #[inline] + fn remaining(&self) -> usize { + self.bytes.as_ref().len() - self.pos + } + + #[inline] + fn bytes(&self) -> &[u8] { + &self.bytes.as_ref()[self.pos..] + } + + #[inline] + fn advance(&mut self, cnt: usize) { + debug_assert!(self.pos + cnt <= self.bytes.as_ref().len()); + self.pos += cnt; + } +} + +// an internal buffer to collect writes before flushes +pub(super) struct WriteBuf { + /// Re-usable buffer that holds message headers + headers: Cursor>, + max_buf_size: usize, + /// Deque of user buffers if strategy is Queue + queue: BufDeque, + strategy: Strategy, +} + +impl WriteBuf { + fn new() -> WriteBuf { + WriteBuf { + headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), + max_buf_size: DEFAULT_MAX_BUFFER_SIZE, + queue: BufDeque::new(), + strategy: Strategy::Auto, + } + } +} + + +impl WriteBuf +where + B: Buf, +{ + fn set_strategy(&mut self, strategy: Strategy) { + self.strategy = strategy; + } + + #[inline] + fn auto(&mut self) -> WriteBufAuto { + WriteBufAuto::new(self) + } + + pub(super) fn buffer>(&mut self, mut buf: BB) { + debug_assert!(buf.has_remaining()); + match self.strategy { + Strategy::Flatten => { + let head = self.headers_mut(); + //perf: This is a little faster than >::put, + //but accomplishes the same result. + loop { + let adv = { + let slice = buf.bytes(); + if slice.is_empty() { + return; + } + head.bytes.extend_from_slice(slice); + slice.len() + }; + buf.advance(adv); + } + }, + Strategy::Auto | Strategy::Queue => { + self.queue.bufs.push_back(buf.into()); + }, + } + } + + fn can_buffer(&self) -> bool { + match self.strategy { + Strategy::Flatten => { + self.remaining() < self.max_buf_size + }, + Strategy::Auto | Strategy::Queue => { + self.queue.bufs.len() < MAX_BUF_LIST_BUFFERS + && self.remaining() < self.max_buf_size + }, + } + } + + fn headers_mut(&mut self) -> &mut Cursor> { + debug_assert!(!self.queue.has_remaining()); + &mut self.headers + } +} + +impl fmt::Debug for WriteBuf { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("WriteBuf") + .field("remaining", &self.remaining()) + .field("strategy", &self.strategy) + .finish() + } +} + +impl Buf for WriteBuf { + #[inline] + fn remaining(&self) -> usize { + self.headers.remaining() + self.queue.remaining() + } + + #[inline] + fn bytes(&self) -> &[u8] { + let headers = self.headers.bytes(); + if !headers.is_empty() { + headers + } else { + self.queue.bytes() + } + } + + #[inline] + fn advance(&mut self, cnt: usize) { + let hrem = self.headers.remaining(); + if hrem == cnt { + self.headers.reset(); + } else if hrem > cnt { + self.headers.advance(cnt); + } else { + let qcnt = cnt - hrem; + self.headers.reset(); + self.queue.advance(qcnt); + } + } + + #[inline] + fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize { + let n = self.headers.bytes_vec(dst); + self.queue.bytes_vec(&mut dst[n..]) + n + } +} + +/// Detects when wrapped `WriteBuf` is used for vectored IO, and +/// adjusts the `WriteBuf` strategy if not. +struct WriteBufAuto<'a, B: Buf + 'a> { + bytes_called: Cell, + bytes_vec_called: Cell, + inner: &'a mut WriteBuf, +} + +impl<'a, B: Buf> WriteBufAuto<'a, B> { + fn new(inner: &'a mut WriteBuf) -> WriteBufAuto<'a, B> { + WriteBufAuto { + bytes_called: Cell::new(false), + bytes_vec_called: Cell::new(false), + inner: inner, + } + } +} + +impl<'a, B: Buf> Buf for WriteBufAuto<'a, B> { + #[inline] + fn remaining(&self) -> usize { + self.inner.remaining() + } + + #[inline] + fn bytes(&self) -> &[u8] { + self.bytes_called.set(true); + self.inner.bytes() + } + + #[inline] + fn advance(&mut self, cnt: usize) { + self.inner.advance(cnt) + } + + #[inline] + fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize { + self.bytes_vec_called.set(true); + self.inner.bytes_vec(dst) + } +} + +impl<'a, B: Buf + 'a> Drop for WriteBufAuto<'a, B> { + fn drop(&mut self) { + if let Strategy::Auto = self.inner.strategy { + if self.bytes_vec_called.get() { + self.inner.strategy = Strategy::Queue; + } else if self.bytes_called.get() { + trace!("detected no usage of vectored write, flattening"); + self.inner.strategy = Strategy::Flatten; + self.inner.headers.bytes.put(&mut self.inner.queue); + } + } + } +} + + +#[derive(Debug)] +enum Strategy { + Auto, + Flatten, + Queue, +} + +struct BufDeque { + bufs: VecDeque, +} + + +impl BufDeque { + fn new() -> BufDeque { + BufDeque { + bufs: VecDeque::new(), + } + } +} + +impl Buf for BufDeque { + #[inline] + fn remaining(&self) -> usize { + self.bufs.iter() + .map(|buf| buf.remaining()) + .sum() + } + + #[inline] + fn bytes(&self) -> &[u8] { + for buf in &self.bufs { + return buf.bytes(); + } + &[] + } + + #[inline] + fn advance(&mut self, mut cnt: usize) { + while cnt > 0 { + { + let front = &mut self.bufs[0]; + let rem = front.remaining(); + if rem > cnt { + front.advance(cnt); + return; + } else { + front.advance(rem); + cnt -= rem; + } + } + self.bufs.pop_front(); + } + } + + #[inline] + fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize { + if dst.is_empty() { + return 0; + } + let mut vecs = 0; + for buf in &self.bufs { + vecs += buf.bytes_vec(&mut dst[vecs..]); + if vecs == dst.len() { + break; + } + } + vecs + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Read; + use mock::AsyncIo; + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[cfg(test)] + impl MemRead for ::mock::AsyncIo { + fn read_mem(&mut self, len: usize) -> Poll { + let mut v = vec![0; len]; + let n = try_nb!(self.read(v.as_mut_slice())); + Ok(Async::Ready(BytesMut::from(&v[..n]).freeze())) + } + } + + #[test] + fn iobuf_write_empty_slice() { + let mut mock = AsyncIo::new_buf(vec![], 256); + mock.error(io::Error::new(io::ErrorKind::Other, "logic error")); + + let mut io_buf = Buffered::<_, Cursor>>::new(mock); + + // underlying io will return the logic error upon write, + // so we are testing that the io_buf does not trigger a write + // when there is nothing to flush + io_buf.flush().expect("should short-circuit flush"); + } + + #[test] + fn parse_reads_until_blocked() { + // missing last line ending + let raw = "HTTP/1.1 200 OK\r\n"; + + let mock = AsyncIo::new_buf(raw, raw.len()); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + }; + assert!(buffered.parse::<::proto::h1::ClientTransaction>(ctx).unwrap().is_not_ready()); + assert!(buffered.io.blocked()); + } + + #[test] + #[should_panic] + fn write_buf_requires_non_empty_bufs() { + let mock = AsyncIo::new_buf(vec![], 1024); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + buffered.buffer(Cursor::new(Vec::new())); + } + + #[test] + fn write_buf_queue() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + + let mock = AsyncIo::new_buf(vec![], 1024); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs.len(), 3); + buffered.flush().unwrap(); + + assert_eq!(buffered.io, b"hello world, it's hyper!"); + assert_eq!(buffered.io.num_writes(), 1); + assert_eq!(buffered.write_buf.queue.bufs.len(), 0); + } + + #[test] + fn write_buf_flatten() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + + let mock = AsyncIo::new_buf(vec![], 1024); + let mut buffered = Buffered::<_, Cursor>>::new(mock); + buffered.write_buf.set_strategy(Strategy::Flatten); + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs.len(), 0); + + buffered.flush().unwrap(); + + assert_eq!(buffered.io, b"hello world, it's hyper!"); + assert_eq!(buffered.io.num_writes(), 1); + } + + #[test] + fn write_buf_auto_flatten() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + + let mut mock = AsyncIo::new_buf(vec![], 1024); + mock.max_read_vecs(0); // disable vectored IO + let mut buffered = Buffered::<_, Cursor>>::new(mock); + + // we have 4 buffers, but hope to detect that vectored IO isn't + // being used, and switch to flattening automatically, + // resulting in only 2 writes + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs.len(), 3); + buffered.flush().unwrap(); + + assert_eq!(buffered.io, b"hello world, it's hyper!"); + assert_eq!(buffered.io.num_writes(), 2); + assert_eq!(buffered.write_buf.queue.bufs.len(), 0); + } + + #[test] + fn write_buf_queue_disable_auto() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + + let mut mock = AsyncIo::new_buf(vec![], 1024); + mock.max_read_vecs(0); // disable vectored IO + let mut buffered = Buffered::<_, Cursor>>::new(mock); + buffered.write_buf.set_strategy(Strategy::Queue); + + // we have 4 buffers, and vec IO disabled, but explicitly said + // don't try to auto detect (via setting strategy above) + + buffered.headers_buf().extend(b"hello "); + buffered.buffer(Cursor::new(b"world, ".to_vec())); + buffered.buffer(Cursor::new(b"it's ".to_vec())); + buffered.buffer(Cursor::new(b"hyper!".to_vec())); + assert_eq!(buffered.write_buf.queue.bufs.len(), 3); + buffered.flush().unwrap(); + + assert_eq!(buffered.io, b"hello world, it's hyper!"); + assert_eq!(buffered.io.num_writes(), 4); + assert_eq!(buffered.write_buf.queue.bufs.len(), 0); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { + let s = "Hello, World!"; + b.bytes = s.len() as u64; + + let mut write_buf = WriteBuf::<::Chunk>::new(); + write_buf.set_strategy(Strategy::Flatten); + b.iter(|| { + let chunk = ::Chunk::from(s); + write_buf.buffer(chunk); + ::test::black_box(&write_buf); + unsafe { + write_buf.headers.bytes.set_len(0); + } + }) + } +} diff --git a/third_party/rust/hyper/src/proto/h1/mod.rs b/third_party/rust/hyper/src/proto/h1/mod.rs new file mode 100644 index 000000000000..15faa2135f38 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/mod.rs @@ -0,0 +1,65 @@ +use bytes::BytesMut; +use http::{HeaderMap, Method}; + +use proto::{MessageHead, BodyLength, DecodedLength}; + +pub(crate) use self::conn::Conn; +pub(crate) use self::dispatch::Dispatcher; +pub use self::decode::Decoder; +pub use self::encode::{EncodedBuf, Encoder}; +pub use self::io::Cursor; //TODO: move out of h1::io +pub use self::io::MINIMUM_MAX_BUFFER_SIZE; + +mod conn; +mod date; +mod decode; +pub(crate) mod dispatch; +mod encode; +mod io; +mod role; + + +pub(crate) type ServerTransaction = role::Server; +pub(crate) type ClientTransaction = role::Client; + +pub(crate) trait Http1Transaction { + type Incoming; + type Outgoing: Default; + const LOG: &'static str; + fn parse(bytes: &mut BytesMut, ctx: ParseContext) -> ParseResult; + fn encode(enc: Encode, dst: &mut Vec) -> ::Result; + + fn on_error(err: &::Error) -> Option>; + + fn should_error_on_parse_eof() -> bool; + fn should_read_first() -> bool; + + fn update_date() {} +} + +/// Result newtype for Http1Transaction::parse. +pub(crate) type ParseResult = Result>, ::error::Parse>; + +#[derive(Debug)] +pub(crate) struct ParsedMessage { + head: MessageHead, + decode: DecodedLength, + expect_continue: bool, + keep_alive: bool, + wants_upgrade: bool, +} + +pub(crate) struct ParseContext<'a> { + cached_headers: &'a mut Option, + req_method: &'a mut Option, +} + +/// Passed to Http1Transaction::encode +pub(crate) struct Encode<'a, T: 'a> { + head: &'a mut MessageHead, + body: Option, + keep_alive: bool, + req_method: &'a mut Option, + title_case_headers: bool, +} + diff --git a/third_party/rust/hyper/src/proto/h1/role.rs b/third_party/rust/hyper/src/proto/h1/role.rs new file mode 100644 index 000000000000..61cfdd09dae4 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h1/role.rs @@ -0,0 +1,1535 @@ +use std::fmt::{self, Write}; +use std::mem; + +use bytes::{BytesMut}; +use http::header::{self, Entry, HeaderName, HeaderValue}; +use http::{HeaderMap, Method, StatusCode, Version}; +use httparse; + +use error::Parse; +use headers; +use proto::{BodyLength, DecodedLength, MessageHead, RequestLine, RequestHead}; +use proto::h1::{Encode, Encoder, Http1Transaction, ParseResult, ParseContext, ParsedMessage, date}; + +const MAX_HEADERS: usize = 100; +const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific + +macro_rules! header_name { + ($bytes:expr) => ({ + #[cfg(debug_assertions)] + { + match HeaderName::from_bytes($bytes) { + Ok(name) => name, + Err(_) => panic!("illegal header name from httparse: {:?}", ::bytes::Bytes::from($bytes)), + } + } + + #[cfg(not(debug_assertions))] + { + HeaderName::from_bytes($bytes) + .expect("header name validated by httparse") + } + }); +} + +macro_rules! header_value { + ($bytes:expr) => ({ + #[cfg(debug_assertions)] + { + let __hvb: ::bytes::Bytes = $bytes; + match HeaderValue::from_shared(__hvb.clone()) { + Ok(name) => name, + Err(_) => panic!("illegal header value from httparse: {:?}", __hvb), + } + } + + #[cfg(not(debug_assertions))] + { + // Unsafe: httparse already validated header value + unsafe { + HeaderValue::from_shared_unchecked($bytes) + } + } + }); +} + +// There are 2 main roles, Client and Server. + +pub(crate) enum Client {} + +pub(crate) enum Server {} + +impl Http1Transaction for Server { + type Incoming = RequestLine; + type Outgoing = StatusCode; + const LOG: &'static str = "{role=server}"; + + fn parse(buf: &mut BytesMut, ctx: ParseContext) -> ParseResult { + if buf.len() == 0 { + return Ok(None); + } + + let mut keep_alive; + let is_http_11; + let subject; + let version; + let len; + let headers_len; + + // Unsafe: both headers_indices and headers are using unitialized memory, + // but we *never* read any of it until after httparse has assigned + // values into it. By not zeroing out the stack memory, this saves + // a good ~5% on pipeline benchmarks. + let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; + { + let mut headers: [httparse::Header; MAX_HEADERS] = unsafe { mem::uninitialized() }; + trace!("Request.parse([Header; {}], [u8; {}])", headers.len(), buf.len()); + let mut req = httparse::Request::new(&mut headers); + let bytes = buf.as_ref(); + match req.parse(bytes)? { + httparse::Status::Complete(parsed_len) => { + trace!("Request.parse Complete({})", parsed_len); + len = parsed_len; + subject = RequestLine( + Method::from_bytes(req.method.unwrap().as_bytes())?, + req.path.unwrap().parse()? + ); + version = if req.version.unwrap() == 1 { + keep_alive = true; + is_http_11 = true; + Version::HTTP_11 + } else { + keep_alive = false; + is_http_11 = false; + Version::HTTP_10 + }; + + record_header_indices(bytes, &req.headers, &mut headers_indices); + headers_len = req.headers.len(); + //(len, subject, version, headers_len) + } + httparse::Status::Partial => return Ok(None), + } + }; + + let slice = buf.split_to(len).freeze(); + + // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 + // 1. (irrelevant to Request) + // 2. (irrelevant to Request) + // 3. Transfer-Encoding: chunked has a chunked body. + // 4. If multiple differing Content-Length headers or invalid, close connection. + // 5. Content-Length header has a sized body. + // 6. Length 0. + // 7. (irrelevant to Request) + + + let mut decoder = DecodedLength::ZERO; + let mut expect_continue = false; + let mut con_len = None; + let mut is_te = false; + let mut is_te_chunked = false; + let mut wants_upgrade = subject.0 == Method::CONNECT; + + let mut headers = ctx.cached_headers + .take() + .unwrap_or_else(HeaderMap::new); + + headers.reserve(headers_len); + + for header in &headers_indices[..headers_len] { + let name = header_name!(&slice[header.name.0..header.name.1]); + let value = header_value!(slice.slice(header.value.0, header.value.1)); + + match name { + header::TRANSFER_ENCODING => { + // https://tools.ietf.org/html/rfc7230#section-3.3.3 + // If Transfer-Encoding header is present, and 'chunked' is + // not the final encoding, and this is a Request, then it is + // mal-formed. A server should respond with 400 Bad Request. + if !is_http_11 { + debug!("HTTP/1.0 cannot have Transfer-Encoding header"); + return Err(Parse::Header); + } + is_te = true; + if headers::is_chunked_(&value) { + is_te_chunked = true; + decoder = DecodedLength::CHUNKED; + } + }, + header::CONTENT_LENGTH => { + if is_te { + continue; + } + let len = value.to_str() + .map_err(|_| Parse::Header) + .and_then(|s| s.parse().map_err(|_| Parse::Header))?; + if let Some(prev) = con_len { + if prev != len { + debug!( + "multiple Content-Length headers with different values: [{}, {}]", + prev, + len, + ); + return Err(Parse::Header); + } + // we don't need to append this secondary length + continue; + } + decoder = DecodedLength::checked_new(len)?; + con_len = Some(len); + }, + header::CONNECTION => { + // keep_alive was previously set to default for Version + if keep_alive { + // HTTP/1.1 + keep_alive = !headers::connection_close(&value); + + } else { + // HTTP/1.0 + keep_alive = headers::connection_keep_alive(&value); + } + }, + header::EXPECT => { + expect_continue = value.as_bytes() == b"100-continue"; + }, + header::UPGRADE => { + // Upgrades are only allowed with HTTP/1.1 + wants_upgrade = is_http_11; + }, + + _ => (), + } + + headers.append(name, value); + } + + if is_te && !is_te_chunked { + debug!("request with transfer-encoding header, but not chunked, bad request"); + return Err(Parse::Header); + } + + *ctx.req_method = Some(subject.0.clone()); + + Ok(Some(ParsedMessage { + head: MessageHead { + version, + subject, + headers, + }, + decode: decoder, + expect_continue, + keep_alive, + wants_upgrade, + })) + } + + fn encode(mut msg: Encode, mut dst: &mut Vec) -> ::Result { + trace!( + "Server::encode status={:?}, body={:?}, req_method={:?}", + msg.head.subject, + msg.body, + msg.req_method + ); + debug_assert!(!msg.title_case_headers, "no server config for title case headers"); + + // hyper currently doesn't support returning 1xx status codes as a Response + // This is because Service only allows returning a single Response, and + // so if you try to reply with a e.g. 100 Continue, you have no way of + // replying with the latter status code response. + let is_upgrade = msg.head.subject == StatusCode::SWITCHING_PROTOCOLS + || (msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success()); + let (ret, mut is_last) = if is_upgrade { + (Ok(()), true) + } else if msg.head.subject.is_informational() { + error!("response with 1xx status code not supported"); + *msg.head = MessageHead::default(); + msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR; + msg.body = None; + (Err(::Error::new_user_unsupported_status_code()), true) + } else { + (Ok(()), !msg.keep_alive) + }; + + // In some error cases, we don't know about the invalid message until already + // pushing some bytes onto the `dst`. In those cases, we don't want to send + // the half-pushed message, so rewind to before. + let orig_len = dst.len(); + let rewind = |dst: &mut Vec| { + dst.truncate(orig_len); + }; + + let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; + dst.reserve(init_cap); + if msg.head.version == Version::HTTP_11 && msg.head.subject == StatusCode::OK { + extend(dst, b"HTTP/1.1 200 OK\r\n"); + } else { + match msg.head.version { + Version::HTTP_10 => extend(dst, b"HTTP/1.0 "), + Version::HTTP_11 => extend(dst, b"HTTP/1.1 "), + _ => unreachable!(), + } + + extend(dst, msg.head.subject.as_str().as_bytes()); + extend(dst, b" "); + // a reason MUST be written, as many parsers will expect it. + extend(dst, msg.head.subject.canonical_reason().unwrap_or("").as_bytes()); + extend(dst, b"\r\n"); + } + + let mut encoder = Encoder::length(0); + let mut wrote_len = false; + let mut wrote_date = false; + 'headers: for (name, mut values) in msg.head.headers.drain() { + match name { + header::CONTENT_LENGTH => { + if wrote_len { + warn!("transfer-encoding and content-length both found, canceling"); + rewind(dst); + return Err(::Error::new_header()); + } + match msg.body { + Some(BodyLength::Known(known_len)) => { + // The Payload claims to know a length, and + // the headers are already set. For performance + // reasons, we are just going to trust that + // the values match. + // + // In debug builds, we'll assert they are the + // same to help developers find bugs. + encoder = Encoder::length(known_len); + + #[cfg(debug_assertions)] + { + let mut folded = None::<(u64, HeaderValue)>; + for value in values { + if let Some(len) = headers::content_length_parse(&value) { + if let Some(fold) = folded { + if fold.0 != len { + panic!("multiple Content-Length values found: [{}, {}]", fold.0, len); + } + folded = Some(fold); + } else { + folded = Some((len, value)); + } + } else { + panic!("illegal Content-Length value: {:?}", value); + } + } + if let Some((len, value)) = folded { + assert!( + len == known_len, + "payload claims content-length of {}, custom content-length header claims {}", + known_len, + len, + ); + extend(dst, b"content-length: "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + wrote_len = true; + continue 'headers; + } else { + // No values in content-length... ignore? + continue 'headers; + } + } + }, + Some(BodyLength::Unknown) => { + // The Payload impl didn't know how long the + // body is, but a length header was included. + // We have to parse the value to return our + // Encoder... + let mut folded = None::<(u64, HeaderValue)>; + for value in values { + if let Some(len) = headers::content_length_parse(&value) { + if let Some(fold) = folded { + if fold.0 != len { + warn!("multiple Content-Length values found: [{}, {}]", fold.0, len); + rewind(dst); + return Err(::Error::new_header()); + } + folded = Some(fold); + } else { + folded = Some((len, value)); + } + } else { + warn!("illegal Content-Length value: {:?}", value); + rewind(dst); + return Err(::Error::new_header()); + } + } + if let Some((len, value)) = folded { + encoder = Encoder::length(len); + extend(dst, b"content-length: "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + wrote_len = true; + continue 'headers; + } else { + // No values in content-length... ignore? + continue 'headers; + } + }, + None => { + // We have no body to actually send, + // but the headers claim a content-length. + // There's only 2 ways this makes sense: + // + // - The header says the length is `0`. + // - This is a response to a `HEAD` request. + if msg.req_method == &Some(Method::HEAD) { + debug_assert_eq!(encoder, Encoder::length(0)); + } else { + for value in values { + if value.as_bytes() != b"0" { + warn!("content-length value found, but empty body provided: {:?}", value); + } + } + continue 'headers; + } + } + } + wrote_len = true; + }, + header::TRANSFER_ENCODING => { + if wrote_len { + warn!("transfer-encoding and content-length both found, canceling"); + rewind(dst); + return Err(::Error::new_header()); + } + // check that we actually can send a chunked body... + if msg.head.version == Version::HTTP_10 || !Server::can_chunked(msg.req_method, msg.head.subject) { + continue; + } + wrote_len = true; + encoder = Encoder::chunked(); + + extend(dst, b"transfer-encoding: "); + + let mut saw_chunked; + if let Some(te) = values.next() { + extend(dst, te.as_bytes()); + saw_chunked = headers::is_chunked_(&te); + for value in values { + extend(dst, b", "); + extend(dst, value.as_bytes()); + saw_chunked = headers::is_chunked_(&value); + } + if !saw_chunked { + extend(dst, b", chunked\r\n"); + } else { + extend(dst, b"\r\n"); + } + } else { + // zero lines? add a chunked line then + extend(dst, b"chunked\r\n"); + } + continue 'headers; + }, + header::CONNECTION => { + if !is_last { + for value in values { + extend(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + + if headers::connection_close(&value) { + is_last = true; + } + } + continue 'headers; + } + }, + header::DATE => { + wrote_date = true; + }, + _ => (), + } + //TODO: this should perhaps instead combine them into + //single lines, as RFC7230 suggests is preferable. + for value in values { + extend(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } + } + + if !wrote_len { + encoder = match msg.body { + Some(BodyLength::Unknown) => { + if msg.head.version == Version::HTTP_10 || !Server::can_chunked(msg.req_method, msg.head.subject) { + Encoder::close_delimited() + } else { + extend(dst, b"transfer-encoding: chunked\r\n"); + Encoder::chunked() + } + }, + None | + Some(BodyLength::Known(0)) => { + extend(dst, b"content-length: 0\r\n"); + Encoder::length(0) + }, + Some(BodyLength::Known(len)) => { + extend(dst, b"content-length: "); + let _ = ::itoa::write(&mut dst, len); + extend(dst, b"\r\n"); + Encoder::length(len) + }, + }; + } + + if !Server::can_have_body(msg.req_method, msg.head.subject) { + trace!( + "server body forced to 0; method={:?}, status={:?}", + msg.req_method, + msg.head.subject + ); + encoder = Encoder::length(0); + } + + // cached date is much faster than formatting every request + if !wrote_date { + dst.reserve(date::DATE_VALUE_LENGTH + 8); + extend(dst, b"date: "); + date::extend(dst); + extend(dst, b"\r\n\r\n"); + } else { + extend(dst, b"\r\n"); + } + + ret.map(|()| encoder.set_last(is_last)) + } + + fn on_error(err: &::Error) -> Option> { + use ::error::{Kind, Parse}; + let status = match *err.kind() { + Kind::Parse(Parse::Method) | + Kind::Parse(Parse::Header) | + Kind::Parse(Parse::Uri) | + Kind::Parse(Parse::Version) => { + StatusCode::BAD_REQUEST + }, + Kind::Parse(Parse::TooLarge) => { + StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE + }, + _ => return None, + }; + + debug!("sending automatic response ({}) for parse error", status); + let mut msg = MessageHead::default(); + msg.subject = status; + Some(msg) + } + + fn should_error_on_parse_eof() -> bool { + false + } + + fn should_read_first() -> bool { + true + } + + fn update_date() { + date::update(); + } +} + +impl Server { + fn can_have_body(method: &Option, status: StatusCode) -> bool { + Server::can_chunked(method, status) + } + + fn can_chunked(method: &Option, status: StatusCode) -> bool { + if method == &Some(Method::HEAD) { + false + } else if method == &Some(Method::CONNECT) && status.is_success() { + false + } else { + match status { + // TODO: support for 1xx codes needs improvement everywhere + // would be 100...199 => false + StatusCode::SWITCHING_PROTOCOLS | + StatusCode::NO_CONTENT | + StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } +} + +impl Http1Transaction for Client { + type Incoming = StatusCode; + type Outgoing = RequestLine; + const LOG: &'static str = "{role=client}"; + + fn parse(buf: &mut BytesMut, ctx: ParseContext) -> ParseResult { + // Loop to skip information status code headers (100 Continue, etc). + loop { + if buf.len() == 0 { + return Ok(None); + } + // Unsafe: see comment in Server Http1Transaction, above. + let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; + let (len, status, version, headers_len) = { + let mut headers: [httparse::Header; MAX_HEADERS] = unsafe { mem::uninitialized() }; + trace!("Response.parse([Header; {}], [u8; {}])", headers.len(), buf.len()); + let mut res = httparse::Response::new(&mut headers); + let bytes = buf.as_ref(); + match res.parse(bytes)? { + httparse::Status::Complete(len) => { + trace!("Response.parse Complete({})", len); + let status = StatusCode::from_u16(res.code.unwrap())?; + let version = if res.version.unwrap() == 1 { + Version::HTTP_11 + } else { + Version::HTTP_10 + }; + record_header_indices(bytes, &res.headers, &mut headers_indices); + let headers_len = res.headers.len(); + (len, status, version, headers_len) + }, + httparse::Status::Partial => return Ok(None), + } + }; + + let slice = buf.split_to(len).freeze(); + + let mut headers = ctx.cached_headers + .take() + .unwrap_or_else(HeaderMap::new); + + let mut keep_alive = version == Version::HTTP_11; + + headers.reserve(headers_len); + for header in &headers_indices[..headers_len] { + let name = header_name!(&slice[header.name.0..header.name.1]); + let value = header_value!(slice.slice(header.value.0, header.value.1)); + + match name { + header::CONNECTION => { + // keep_alive was previously set to default for Version + if keep_alive { + // HTTP/1.1 + keep_alive = !headers::connection_close(&value); + + } else { + // HTTP/1.0 + keep_alive = headers::connection_keep_alive(&value); + } + }, + _ => (), + } + headers.append(name, value); + } + + let head = MessageHead { + version, + subject: status, + headers, + }; + if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { + return Ok(Some(ParsedMessage { + head, + decode, + expect_continue: false, + // a client upgrade means the connection can't be used + // again, as it is definitely upgrading. + keep_alive: keep_alive && !is_upgrade, + wants_upgrade: is_upgrade, + })); + } + + } + } + + fn encode(msg: Encode, dst: &mut Vec) -> ::Result { + trace!("Client::encode method={:?}, body={:?}", msg.head.subject.0, msg.body); + + *msg.req_method = Some(msg.head.subject.0.clone()); + + let body = Client::set_length(msg.head, msg.body); + + let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; + dst.reserve(init_cap); + + + extend(dst, msg.head.subject.0.as_str().as_bytes()); + extend(dst, b" "); + //TODO: add API to http::Uri to encode without std::fmt + let _ = write!(FastWrite(dst), "{} ", msg.head.subject.1); + + match msg.head.version { + Version::HTTP_10 => extend(dst, b"HTTP/1.0"), + Version::HTTP_11 => extend(dst, b"HTTP/1.1"), + _ => unreachable!(), + } + extend(dst, b"\r\n"); + + if msg.title_case_headers { + write_headers_title_case(&msg.head.headers, dst); + } else { + write_headers(&msg.head.headers, dst); + } + extend(dst, b"\r\n"); + msg.head.headers.clear(); //TODO: remove when switching to drain() + + Ok(body) + } + + fn on_error(_err: &::Error) -> Option> { + // we can't tell the server about any errors it creates + None + } + + fn should_error_on_parse_eof() -> bool { + true + } + + fn should_read_first() -> bool { + false + } +} + +impl Client { + /// Returns Some(length, wants_upgrade) if successful. + /// + /// Returns None if this message head should be skipped (like a 100 status). + fn decoder(inc: &MessageHead, method: &mut Option) -> Result, Parse> { + // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 + // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body. + // 2. Status 2xx to a CONNECT cannot have a body. + // 3. Transfer-Encoding: chunked has a chunked body. + // 4. If multiple differing Content-Length headers or invalid, close connection. + // 5. Content-Length header has a sized body. + // 6. (irrelevant to Response) + // 7. Read till EOF. + + match inc.subject.as_u16() { + 101 => { + return Ok(Some((DecodedLength::ZERO, true))); + }, + 100...199 => { + trace!("ignoring informational response: {}", inc.subject.as_u16()); + return Ok(None); + }, + 204 | + 304 => return Ok(Some((DecodedLength::ZERO, false))), + _ => (), + } + match *method { + Some(Method::HEAD) => { + return Ok(Some((DecodedLength::ZERO, false))); + } + Some(Method::CONNECT) => match inc.subject.as_u16() { + 200...299 => { + return Ok(Some((DecodedLength::ZERO, true))); + }, + _ => {}, + }, + Some(_) => {}, + None => { + trace!("Client::decoder is missing the Method"); + } + } + + if inc.headers.contains_key(header::TRANSFER_ENCODING) { + // https://tools.ietf.org/html/rfc7230#section-3.3.3 + // If Transfer-Encoding header is present, and 'chunked' is + // not the final encoding, and this is a Request, then it is + // mal-formed. A server should respond with 400 Bad Request. + if inc.version == Version::HTTP_10 { + debug!("HTTP/1.0 cannot have Transfer-Encoding header"); + Err(Parse::Header) + } else if headers::transfer_encoding_is_chunked(&inc.headers) { + Ok(Some((DecodedLength::CHUNKED, false))) + } else { + trace!("not chunked, read till eof"); + Ok(Some((DecodedLength::CHUNKED, false))) + } + } else if let Some(len) = headers::content_length_parse_all(&inc.headers) { + Ok(Some((DecodedLength::checked_new(len)?, false))) + } else if inc.headers.contains_key(header::CONTENT_LENGTH) { + debug!("illegal Content-Length header"); + Err(Parse::Header) + } else { + trace!("neither Transfer-Encoding nor Content-Length"); + Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) + } + } +} + +impl Client { + fn set_length(head: &mut RequestHead, body: Option) -> Encoder { + if let Some(body) = body { + let can_chunked = head.version == Version::HTTP_11 + && (head.subject.0 != Method::HEAD) + && (head.subject.0 != Method::GET) + && (head.subject.0 != Method::CONNECT); + set_length(&mut head.headers, body, can_chunked) + } else { + head.headers.remove(header::TRANSFER_ENCODING); + Encoder::length(0) + } + } +} + +fn set_length(headers: &mut HeaderMap, body: BodyLength, can_chunked: bool) -> Encoder { + // If the user already set specific headers, we should respect them, regardless + // of what the Payload knows about itself. They set them for a reason. + + // Because of the borrow checker, we can't check the for an existing + // Content-Length header while holding an `Entry` for the Transfer-Encoding + // header, so unfortunately, we must do the check here, first. + + let existing_con_len = headers::content_length_parse_all(headers); + let mut should_remove_con_len = false; + + if can_chunked { + // If the user set a transfer-encoding, respect that. Let's just + // make sure `chunked` is the final encoding. + let encoder = match headers.entry(header::TRANSFER_ENCODING) + .expect("TRANSFER_ENCODING is valid HeaderName") { + Entry::Occupied(te) => { + should_remove_con_len = true; + if headers::is_chunked(te.iter()) { + Some(Encoder::chunked()) + } else { + warn!("user provided transfer-encoding does not end in 'chunked'"); + + // There's a Transfer-Encoding, but it doesn't end in 'chunked'! + // An example that could trigger this: + // + // Transfer-Encoding: gzip + // + // This can be bad, depending on if this is a request or a + // response. + // + // - A request is illegal if there is a `Transfer-Encoding` + // but it doesn't end in `chunked`. + // - A response that has `Transfer-Encoding` but doesn't + // end in `chunked` isn't illegal, it just forces this + // to be close-delimited. + // + // We can try to repair this, by adding `chunked` ourselves. + + headers::add_chunked(te); + Some(Encoder::chunked()) + } + }, + Entry::Vacant(te) => { + if let Some(len) = existing_con_len { + Some(Encoder::length(len)) + } else if let BodyLength::Unknown = body { + should_remove_con_len = true; + te.insert(HeaderValue::from_static("chunked")); + Some(Encoder::chunked()) + } else { + None + } + }, + }; + + // This is because we need a second mutable borrow to remove + // content-length header. + if let Some(encoder) = encoder { + if should_remove_con_len && existing_con_len.is_some() { + headers.remove(header::CONTENT_LENGTH); + } + return encoder; + } + + // User didn't set transfer-encoding, AND we know body length, + // so we can just set the Content-Length automatically. + + let len = if let BodyLength::Known(len) = body { + len + } else { + unreachable!("BodyLength::Unknown would set chunked"); + }; + + set_content_length(headers, len) + } else { + // Chunked isn't legal, so if it is set, we need to remove it. + // Also, if it *is* set, then we shouldn't replace with a length, + // since the user tried to imply there isn't a length. + let encoder = if headers.remove(header::TRANSFER_ENCODING).is_some() { + trace!("removing illegal transfer-encoding header"); + should_remove_con_len = true; + Encoder::close_delimited() + } else if let Some(len) = existing_con_len { + Encoder::length(len) + } else if let BodyLength::Known(len) = body { + set_content_length(headers, len) + } else { + Encoder::close_delimited() + }; + + if should_remove_con_len && existing_con_len.is_some() { + headers.remove(header::CONTENT_LENGTH); + } + + encoder + } +} + +fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder { + // At this point, there should not be a valid Content-Length + // header. However, since we'll be indexing in anyways, we can + // warn the user if there was an existing illegal header. + // + // Or at least, we can in theory. It's actually a little bit slower, + // so perhaps only do that while the user is developing/testing. + + if cfg!(debug_assertions) { + match headers.entry(header::CONTENT_LENGTH) + .expect("CONTENT_LENGTH is valid HeaderName") { + Entry::Occupied(mut cl) => { + // Internal sanity check, we should have already determined + // that the header was illegal before calling this function. + debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none()); + // Uh oh, the user set `Content-Length` headers, but set bad ones. + // This would be an illegal message anyways, so let's try to repair + // with our known good length. + error!("user provided content-length header was invalid"); + + cl.insert(HeaderValue::from(len)); + Encoder::length(len) + }, + Entry::Vacant(cl) => { + cl.insert(HeaderValue::from(len)); + Encoder::length(len) + } + } + } else { + headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len)); + Encoder::length(len) + } +} + +#[derive(Clone, Copy)] +struct HeaderIndices { + name: (usize, usize), + value: (usize, usize), +} + +fn record_header_indices(bytes: &[u8], headers: &[httparse::Header], indices: &mut [HeaderIndices]) { + let bytes_ptr = bytes.as_ptr() as usize; + for (header, indices) in headers.iter().zip(indices.iter_mut()) { + let name_start = header.name.as_ptr() as usize - bytes_ptr; + let name_end = name_start + header.name.len(); + indices.name = (name_start, name_end); + let value_start = header.value.as_ptr() as usize - bytes_ptr; + let value_end = value_start + header.value.len(); + indices.value = (value_start, value_end); + } +} + +// Write header names as title case. The header name is assumed to be ASCII, +// therefore it is trivial to convert an ASCII character from lowercase to +// uppercase. It is as simple as XORing the lowercase character byte with +// space. +fn title_case(dst: &mut Vec, name: &[u8]) { + dst.reserve(name.len()); + + let mut iter = name.iter(); + + // Uppercase the first character + if let Some(c) = iter.next() { + if *c >= b'a' && *c <= b'z' { + dst.push(*c ^ b' '); + } + } + + while let Some(c) = iter.next() { + dst.push(*c); + + if *c == b'-' { + if let Some(c) = iter.next() { + if *c >= b'a' && *c <= b'z' { + dst.push(*c ^ b' '); + } + } + } + } +} + +fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { + for (name, value) in headers { + title_case(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } +} + +fn write_headers(headers: &HeaderMap, dst: &mut Vec) { + for (name, value) in headers { + extend(dst, name.as_str().as_bytes()); + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } +} + +struct FastWrite<'a>(&'a mut Vec); + +impl<'a> fmt::Write for FastWrite<'a> { + #[inline] + fn write_str(&mut self, s: &str) -> fmt::Result { + extend(self.0, s.as_bytes()); + Ok(()) + } + + #[inline] + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} + +#[inline] +fn extend(dst: &mut Vec, data: &[u8]) { + dst.extend_from_slice(data); +} + +#[cfg(test)] +mod tests { + use bytes::BytesMut; + + use super::*; + + #[test] + fn test_parse_request() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec()); + let mut method = None; + let msg = Server::parse(&mut raw, ParseContext { + cached_headers: &mut None, + req_method: &mut method, + }).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject.0, ::Method::GET); + assert_eq!(msg.head.subject.1, "/echo"); + assert_eq!(msg.head.version, ::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Host"], "hyper.rs"); + assert_eq!(method, Some(::Method::GET)); + } + + + #[test] + fn test_parse_response() { + extern crate pretty_env_logger; + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec()); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(::Method::GET), + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject, ::StatusCode::OK); + assert_eq!(msg.head.version, ::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Content-Length"], "0"); + } + + #[test] + fn test_parse_request_errors() { + let mut raw = BytesMut::from(b"GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec()); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + }; + Server::parse(&mut raw, ctx).unwrap_err(); + } + + + #[test] + fn test_decoder_request() { + fn parse(s: &str) -> ParsedMessage { + let mut bytes = BytesMut::from(s); + Server::parse(&mut bytes, ParseContext { + cached_headers: &mut None, + req_method: &mut None, + }) + .expect("parse ok") + .expect("parse complete") + } + + fn parse_err(s: &str, comment: &str) -> ::error::Parse { + let mut bytes = BytesMut::from(s); + Server::parse(&mut bytes, ParseContext { + cached_headers: &mut None, + req_method: &mut None, + }) + .expect_err(comment) + } + + // no length or transfer-encoding means 0-length body + assert_eq!(parse("\ + GET / HTTP/1.1\r\n\ + \r\n\ + ").decode, DecodedLength::ZERO); + + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + \r\n\ + ").decode, DecodedLength::ZERO); + + // transfer-encoding: chunked + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip, chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + // content-length + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + \r\n\ + ").decode, DecodedLength::new(10)); + + // transfer-encoding and content-length = chunked + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + content-length: 10\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + + // multiple content-lengths of same value are fine + assert_eq!(parse("\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + content-length: 10\r\n\ + \r\n\ + ").decode, DecodedLength::new(10)); + + + // multiple content-lengths with different values is an error + parse_err("\ + POST / HTTP/1.1\r\n\ + content-length: 10\r\n\ + content-length: 11\r\n\ + \r\n\ + ", "multiple content-lengths"); + + // transfer-encoding that isn't chunked is an error + parse_err("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: gzip\r\n\ + \r\n\ + ", "transfer-encoding but not chunked"); + + parse_err("\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked, gzip\r\n\ + \r\n\ + ", "transfer-encoding doesn't end in chunked"); + + + // http/1.0 + + assert_eq!(parse("\ + POST / HTTP/1.0\r\n\ + content-length: 10\r\n\ + \r\n\ + ").decode, DecodedLength::new(10)); + + + // 1.0 doesn't understand chunked, so its an error + parse_err("\ + POST / HTTP/1.0\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ", "1.0 chunked"); + } + + #[test] + fn test_decoder_response() { + + fn parse(s: &str) -> ParsedMessage { + parse_with_method(s, Method::GET) + } + + fn parse_ignores(s: &str) { + let mut bytes = BytesMut::from(s); + assert!(Client::parse(&mut bytes, ParseContext { + cached_headers: &mut None, + req_method: &mut Some(Method::GET), + }) + .expect("parse ok") + .is_none()) + } + + fn parse_with_method(s: &str, m: Method) -> ParsedMessage { + let mut bytes = BytesMut::from(s); + Client::parse(&mut bytes, ParseContext { + cached_headers: &mut None, + req_method: &mut Some(m), + }) + .expect("parse ok") + .expect("parse complete") + } + + fn parse_err(s: &str) -> ::error::Parse { + let mut bytes = BytesMut::from(s); + Client::parse(&mut bytes, ParseContext { + cached_headers: &mut None, + req_method: &mut Some(Method::GET), + }) + .expect_err("parse should err") + } + + + // no content-length or transfer-encoding means close-delimited + assert_eq!(parse("\ + HTTP/1.1 200 OK\r\n\ + \r\n\ + ").decode, DecodedLength::CLOSE_DELIMITED); + + // 204 and 304 never have a body + assert_eq!(parse("\ + HTTP/1.1 204 No Content\r\n\ + \r\n\ + ").decode, DecodedLength::ZERO); + + assert_eq!(parse("\ + HTTP/1.1 304 Not Modified\r\n\ + \r\n\ + ").decode, DecodedLength::ZERO); + + // content-length + assert_eq!(parse("\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + \r\n\ + ").decode, DecodedLength::new(8)); + + assert_eq!(parse("\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + content-length: 8\r\n\ + \r\n\ + ").decode, DecodedLength::new(8)); + + parse_err("\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + content-length: 9\r\n\ + \r\n\ + "); + + + // transfer-encoding + assert_eq!(parse("\ + HTTP/1.1 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + // transfer-encoding and content-length = chunked + assert_eq!(parse("\ + HTTP/1.1 200 OK\r\n\ + content-length: 10\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + ").decode, DecodedLength::CHUNKED); + + + // HEAD can have content-length, but not body + assert_eq!(parse_with_method("\ + HTTP/1.1 200 OK\r\n\ + content-length: 8\r\n\ + \r\n\ + ", Method::HEAD).decode, DecodedLength::ZERO); + + // CONNECT with 200 never has body + { + let msg = parse_with_method("\ + HTTP/1.1 200 OK\r\n\ + \r\n\ + ", Method::CONNECT); + assert_eq!(msg.decode, DecodedLength::ZERO); + assert!(!msg.keep_alive, "should be upgrade"); + assert!(msg.wants_upgrade, "should be upgrade"); + } + + // CONNECT receiving non 200 can have a body + assert_eq!(parse_with_method("\ + HTTP/1.1 400 Bad Request\r\n\ + \r\n\ + ", Method::CONNECT).decode, DecodedLength::CLOSE_DELIMITED); + + + // 1xx status codes + parse_ignores("\ + HTTP/1.1 100 Continue\r\n\ + \r\n\ + "); + + parse_ignores("\ + HTTP/1.1 103 Early Hints\r\n\ + \r\n\ + "); + + // 101 upgrade not supported yet + { + let msg = parse("\ + HTTP/1.1 101 Switching Protocols\r\n\ + \r\n\ + "); + assert_eq!(msg.decode, DecodedLength::ZERO); + assert!(!msg.keep_alive, "should be last"); + assert!(msg.wants_upgrade, "should be upgrade"); + } + + + // http/1.0 + assert_eq!(parse("\ + HTTP/1.0 200 OK\r\n\ + \r\n\ + ").decode, DecodedLength::CLOSE_DELIMITED); + + // 1.0 doesn't understand chunked + parse_err("\ + HTTP/1.0 200 OK\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + "); + + // keep-alive + assert!(parse("\ + HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + \r\n\ + ").keep_alive, "HTTP/1.1 keep-alive is default"); + + assert!(!parse("\ + HTTP/1.1 200 OK\r\n\ + content-length: 0\r\n\ + connection: foo, close, bar\r\n\ + \r\n\ + ").keep_alive, "connection close is always close"); + + assert!(!parse("\ + HTTP/1.0 200 OK\r\n\ + content-length: 0\r\n\ + \r\n\ + ").keep_alive, "HTTP/1.0 close is default"); + + assert!(parse("\ + HTTP/1.0 200 OK\r\n\ + content-length: 0\r\n\ + connection: foo, keep-alive, bar\r\n\ + \r\n\ + ").keep_alive, "connection keep-alive is always keep-alive"); + } + + #[test] + fn test_client_request_encode_title_case() { + use http::header::HeaderValue; + use proto::BodyLength; + + let mut head = MessageHead::default(); + head.headers.insert("content-length", HeaderValue::from_static("10")); + head.headers.insert("content-type", HeaderValue::from_static("application/json")); + + let mut vec = Vec::new(); + Client::encode(Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, &mut vec).unwrap(); + + assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n\r\n".to_vec()); + } + + #[test] + fn test_server_encode_connect_method() { + let mut head = MessageHead::default(); + + let mut vec = Vec::new(); + let encoder = Server::encode(Encode { + head: &mut head, + body: None, + keep_alive: true, + req_method: &mut Some(Method::CONNECT), + title_case_headers: false, + }, &mut vec).unwrap(); + + assert!(encoder.is_last()); + } + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[cfg(feature = "nightly")] + #[bench] + fn bench_parse_incoming(b: &mut Bencher) { + let mut raw = BytesMut::from( + b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\ + I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\ + _up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\ + foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \ + hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \ + utf8\r\nAccept-Encoding: *\r\nAccess-Control-Allow-\ + Credentials: None\r\nAccess-Control-Allow-Origin: None\r\n\ + Access-Control-Allow-Methods: None\r\nAccess-Control-Allow-\ + Headers: None\r\nContent-Encoding: utf8\r\nContent-Security-\ + Policy: None\r\nContent-Type: text/html\r\nOrigin: hyper\ + \r\nSec-Websocket-Extensions: It looks super important!\r\n\ + Sec-Websocket-Origin: hyper\r\nSec-Websocket-Version: 4.3\r\ + \nStrict-Transport-Security: None\r\nUser-Agent: hyper\r\n\ + X-Content-Duration: None\r\nX-Content-Security-Policy: None\ + \r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \ + Something important obviously\r\nX-Requested-With: Nothing\ + \r\n\r\n".to_vec() + ); + let len = raw.len(); + let mut headers = Some(HeaderMap::new()); + + b.bytes = len as u64; + b.iter(|| { + let mut msg = Server::parse(&mut raw, ParseContext { + cached_headers: &mut headers, + req_method: &mut None, + }).unwrap().unwrap(); + ::test::black_box(&msg); + msg.head.headers.clear(); + headers = Some(msg.head.headers); + restart(&mut raw, len); + }); + + + fn restart(b: &mut BytesMut, len: usize) { + b.reserve(1); + unsafe { + b.set_len(len); + } + } + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_parse_short(b: &mut Bencher) { + let s = &b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"[..]; + let mut raw = BytesMut::from(s.to_vec()); + let len = raw.len(); + let mut headers = Some(HeaderMap::new()); + + b.bytes = len as u64; + b.iter(|| { + let mut msg = Server::parse(&mut raw, ParseContext { + cached_headers: &mut headers, + req_method: &mut None, + }).unwrap().unwrap(); + ::test::black_box(&msg); + msg.head.headers.clear(); + headers = Some(msg.head.headers); + restart(&mut raw, len); + }); + + + fn restart(b: &mut BytesMut, len: usize) { + b.reserve(1); + unsafe { + b.set_len(len); + } + } + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_server_encode_headers_preset(b: &mut Bencher) { + use http::header::HeaderValue; + use proto::BodyLength; + + let len = 108; + b.bytes = len as u64; + + let mut head = MessageHead::default(); + let mut headers = HeaderMap::new(); + headers.insert("content-length", HeaderValue::from_static("10")); + headers.insert("content-type", HeaderValue::from_static("application/json")); + + b.iter(|| { + let mut vec = Vec::new(); + head.headers = headers.clone(); + Server::encode(Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut Some(Method::GET), + title_case_headers: false, + }, &mut vec).unwrap(); + assert_eq!(vec.len(), len); + ::test::black_box(vec); + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_server_encode_no_headers(b: &mut Bencher) { + use proto::BodyLength; + + let len = 76; + b.bytes = len as u64; + + let mut head = MessageHead::default(); + let mut vec = Vec::with_capacity(128); + + b.iter(|| { + Server::encode(Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut Some(Method::GET), + title_case_headers: false, + }, &mut vec).unwrap(); + assert_eq!(vec.len(), len); + ::test::black_box(&vec); + + // reset Vec to 0 (always safe) + unsafe { + vec.set_len(0); + } + }) + } +} + diff --git a/third_party/rust/hyper/src/proto/h2/client.rs b/third_party/rust/hyper/src/proto/h2/client.rs new file mode 100644 index 000000000000..1570d2ee0aa5 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h2/client.rs @@ -0,0 +1,168 @@ +use bytes::IntoBuf; +use futures::{Async, Future, Poll, Stream}; +use futures::future::{self, Either}; +use futures::sync::mpsc; +use h2::client::{Builder, Handshake, SendRequest}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use headers::content_length_parse_all; +use body::Payload; +use ::common::{Exec, Never}; +use headers; +use ::proto::Dispatched; +use super::{PipeToSendStream, SendBuf}; +use ::{Body, Request, Response}; + +type ClientRx = ::client::dispatch::Receiver, Response>; +/// An mpsc channel is used to help notify the `Connection` task when *all* +/// other handles to it have been dropped, so that it can shutdown. +type ConnDropRef = mpsc::Sender; + +pub(crate) struct Client +where + B: Payload, +{ + executor: Exec, + rx: ClientRx, + state: State>, +} + +enum State where B: IntoBuf { + Handshaking(Handshake), + Ready(SendRequest, ConnDropRef), +} + +impl Client +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload, +{ + pub(crate) fn new(io: T, rx: ClientRx, exec: Exec) -> Client { + let handshake = Builder::new() + // we don't expose PUSH promises yet + .enable_push(false) + .handshake(io); + + Client { + executor: exec, + rx: rx, + state: State::Handshaking(handshake), + } + } +} + +impl Future for Client +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, +{ + type Item = Dispatched; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + let next = match self.state { + State::Handshaking(ref mut h) => { + let (request_tx, conn) = try_ready!(h.poll().map_err(::Error::new_h2)); + // An mpsc channel is used entirely to detect when the + // 'Client' has been dropped. This is to get around a bug + // in h2 where dropping all SendRequests won't notify a + // parked Connection. + let (tx, rx) = mpsc::channel(0); + let rx = rx.into_future() + .map(|(msg, _)| match msg { + Some(never) => match never {}, + None => (), + }) + .map_err(|_| -> Never { unreachable!("mpsc cannot error") }); + let fut = conn + .inspect(|_| trace!("connection complete")) + .map_err(|e| debug!("connection error: {}", e)) + .select2(rx) + .then(|res| match res { + Ok(Either::A(((), _))) | + Err(Either::A(((), _))) => { + // conn has finished either way + Either::A(future::ok(())) + }, + Ok(Either::B(((), conn))) => { + // mpsc has been dropped, hopefully polling + // the connection some more should start shutdown + // and then close + trace!("send_request dropped, starting conn shutdown"); + Either::B(conn) + } + Err(Either::B((never, _))) => match never {}, + }); + self.executor.execute(fut)?; + State::Ready(request_tx, tx) + }, + State::Ready(ref mut tx, ref conn_dropper) => { + try_ready!(tx.poll_ready().map_err(::Error::new_h2)); + match self.rx.poll() { + Ok(Async::Ready(Some((req, mut cb)))) => { + // check that future hasn't been canceled already + if let Async::Ready(()) = cb.poll_cancel().expect("poll_cancel cannot error") { + trace!("request canceled"); + continue; + } + let (head, body) = req.into_parts(); + let mut req = ::http::Request::from_parts(head, ()); + super::strip_connection_headers(req.headers_mut()); + if let Some(len) = body.content_length() { + headers::set_content_length_if_missing(req.headers_mut(), len); + } + let eos = body.is_end_stream(); + let (fut, body_tx) = match tx.send_request(req, eos) { + Ok(ok) => ok, + Err(err) => { + debug!("client send request error: {}", err); + let _ = cb.send(Err((::Error::new_h2(err), None))); + continue; + } + }; + if !eos { + let conn_drop_ref = conn_dropper.clone(); + let pipe = PipeToSendStream::new(body, body_tx) + .map_err(|e| debug!("client request body error: {}", e)) + .then(move |x| { + drop(conn_drop_ref); + x + }); + self.executor.execute(pipe)?; + } + + let fut = fut + .then(move |result| { + match result { + Ok(res) => { + let content_length = content_length_parse_all(res.headers()); + let res = res.map(|stream| + ::Body::h2(stream, content_length)); + let _ = cb.send(Ok(res)); + }, + Err(err) => { + debug!("client response error: {}", err); + let _ = cb.send(Err((::Error::new_h2(err), None))); + } + } + Ok(()) + }); + self.executor.execute(fut)?; + continue; + }, + + Ok(Async::NotReady) => return Ok(Async::NotReady), + + Ok(Async::Ready(None)) | + Err(_) => { + trace!("client::dispatch::Sender dropped"); + return Ok(Async::Ready(Dispatched::Shutdown)); + } + } + }, + }; + self.state = next; + } + } +} diff --git a/third_party/rust/hyper/src/proto/h2/mod.rs b/third_party/rust/hyper/src/proto/h2/mod.rs new file mode 100644 index 000000000000..de877f0bc703 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h2/mod.rs @@ -0,0 +1,199 @@ +use bytes::Buf; +use futures::{Async, Future, Poll}; +use h2::{Reason, SendStream}; +use http::header::{ + HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, + TRANSFER_ENCODING, UPGRADE, +}; +use http::HeaderMap; + +use body::Payload; + +mod client; +mod server; + +pub(crate) use self::client::Client; +pub(crate) use self::server::Server; + +fn strip_connection_headers(headers: &mut HeaderMap) { + // List of connection headers from: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection + let connection_headers = [ + HeaderName::from_lowercase(b"keep-alive").unwrap(), + HeaderName::from_lowercase(b"proxy-connection").unwrap(), + PROXY_AUTHENTICATE, + PROXY_AUTHORIZATION, + TE, + TRAILER, + TRANSFER_ENCODING, + UPGRADE, + ]; + + for header in connection_headers.iter() { + if headers.remove(header).is_some() { + warn!("Connection header illegal in HTTP/2: {}", header.as_str()); + } + } + + if let Some(header) = headers.remove(CONNECTION) { + warn!( + "Connection header illegal in HTTP/2: {}", + CONNECTION.as_str() + ); + let header_contents = header.to_str().unwrap(); + + // A `Connection` header may have a comma-separated list of names of other headers that + // are meant for only this specific connection. + // + // Iterate these names and remove them as headers. Connection-specific headers are + // forbidden in HTTP2, as that information has been moved into frame types of the h2 + // protocol. + for name in header_contents.split(',') { + let name = name.trim(); + headers.remove(name); + } + } +} + +// body adapters used by both Client and Server + +struct PipeToSendStream +where + S: Payload, +{ + body_tx: SendStream>, + data_done: bool, + stream: S, +} + +impl PipeToSendStream +where + S: Payload, +{ + fn new(stream: S, tx: SendStream>) -> PipeToSendStream { + PipeToSendStream { + body_tx: tx, + data_done: false, + stream: stream, + } + } + + fn on_err(&mut self, err: S::Error) -> ::Error { + let err = ::Error::new_user_body(err); + trace!("send body user stream error: {}", err); + self.body_tx.send_reset(Reason::INTERNAL_ERROR); + err + } + + fn send_eos_frame(&mut self) -> ::Result<()> { + trace!("send body eos"); + self.body_tx + .send_data(SendBuf(None), true) + .map_err(::Error::new_body_write) + } +} + +impl Future for PipeToSendStream +where + S: Payload, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + if !self.data_done { + // we don't have the next chunk of data yet, so just reserve 1 byte to make + // sure there's some capacity available. h2 will handle the capacity management + // for the actual body chunk. + self.body_tx.reserve_capacity(1); + + if self.body_tx.capacity() == 0 { + loop { + match try_ready!(self.body_tx.poll_capacity().map_err(::Error::new_h2)) { + Some(0) => {} + Some(_) => break, + None => return Err(::Error::new_canceled(None::<::Error>)), + } + } + } else { + if let Async::Ready(reason) = + self.body_tx.poll_reset().map_err(|e| ::Error::new_h2(e))? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Err(::Error::new_h2(reason.into())); + } + } + + match try_ready!(self.stream.poll_data().map_err(|e| self.on_err(e))) { + Some(chunk) => { + let is_eos = self.stream.is_end_stream(); + trace!( + "send body chunk: {} bytes, eos={}", + chunk.remaining(), + is_eos, + ); + + let buf = SendBuf(Some(chunk)); + self.body_tx + .send_data(buf, is_eos) + .map_err(::Error::new_body_write)?; + + if is_eos { + return Ok(Async::Ready(())); + } + } + None => { + self.body_tx.reserve_capacity(0); + let is_eos = self.stream.is_end_stream(); + if is_eos { + return self.send_eos_frame().map(Async::Ready); + } else { + self.data_done = true; + // loop again to poll_trailers + } + } + } + } else { + if let Async::Ready(reason) = + self.body_tx.poll_reset().map_err(|e| ::Error::new_h2(e))? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Err(::Error::new_h2(reason.into())); + } + + match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_err(e))) { + Some(trailers) => { + self.body_tx + .send_trailers(trailers) + .map_err(::Error::new_body_write)?; + return Ok(Async::Ready(())); + } + None => { + // There were no trailers, so send an empty DATA frame... + return self.send_eos_frame().map(Async::Ready); + } + } + } + } + } +} + +struct SendBuf(Option); + +impl Buf for SendBuf { + #[inline] + fn remaining(&self) -> usize { + self.0.as_ref().map(|b| b.remaining()).unwrap_or(0) + } + + #[inline] + fn bytes(&self) -> &[u8] { + self.0.as_ref().map(|b| b.bytes()).unwrap_or(&[]) + } + + #[inline] + fn advance(&mut self, cnt: usize) { + self.0.as_mut().map(|b| b.advance(cnt)); + } +} diff --git a/third_party/rust/hyper/src/proto/h2/server.rs b/third_party/rust/hyper/src/proto/h2/server.rs new file mode 100644 index 000000000000..d78d4a254e52 --- /dev/null +++ b/third_party/rust/hyper/src/proto/h2/server.rs @@ -0,0 +1,242 @@ +use futures::{Async, Future, Poll, Stream}; +use h2::Reason; +use h2::server::{Builder, Connection, Handshake, SendResponse}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use ::headers::content_length_parse_all; +use ::body::Payload; +use ::common::Exec; +use ::headers; +use ::service::Service; +use ::proto::Dispatched; +use super::{PipeToSendStream, SendBuf}; + +use ::{Body, Response}; + +pub(crate) struct Server +where + S: Service, + B: Payload, +{ + exec: Exec, + service: S, + state: State, +} + +enum State +where + B: Payload, +{ + Handshaking(Handshake>), + Serving(Serving), + Closed, +} + +struct Serving +where + B: Payload, +{ + conn: Connection>, +} + + +impl Server +where + T: AsyncRead + AsyncWrite, + S: Service, + S::Error: Into>, + S::Future: Send + 'static, + B: Payload, +{ + pub(crate) fn new(io: T, service: S, exec: Exec) -> Server { + let handshake = Builder::new() + .handshake(io); + Server { + exec, + state: State::Handshaking(handshake), + service, + } + } + + pub fn graceful_shutdown(&mut self) { + trace!("graceful_shutdown"); + match self.state { + State::Handshaking(..) => { + // fall-through, to replace state with Closed + }, + State::Serving(ref mut srv) => { + srv.conn.graceful_shutdown(); + return; + }, + State::Closed => { + return; + } + } + self.state = State::Closed; + } +} + +impl Future for Server +where + T: AsyncRead + AsyncWrite, + S: Service, + S::Error: Into>, + S::Future: Send + 'static, + B: Payload, +{ + type Item = Dispatched; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + let next = match self.state { + State::Handshaking(ref mut h) => { + let conn = try_ready!(h.poll().map_err(::Error::new_h2)); + State::Serving(Serving { + conn: conn, + }) + }, + State::Serving(ref mut srv) => { + try_ready!(srv.poll_server(&mut self.service, &self.exec)); + return Ok(Async::Ready(Dispatched::Shutdown)); + } + State::Closed => { + // graceful_shutdown was called before handshaking finished, + // nothing to do here... + return Ok(Async::Ready(Dispatched::Shutdown)); + } + }; + self.state = next; + } + } +} + +impl Serving +where + T: AsyncRead + AsyncWrite, + B: Payload, +{ + fn poll_server(&mut self, service: &mut S, exec: &Exec) -> Poll<(), ::Error> + where + S: Service< + ReqBody=Body, + ResBody=B, + >, + S::Error: Into>, + S::Future: Send + 'static, + { + while let Some((req, respond)) = try_ready!(self.conn.poll().map_err(::Error::new_h2)) { + trace!("incoming request"); + let content_length = content_length_parse_all(req.headers()); + let req = req.map(|stream| { + ::Body::h2(stream, content_length) + }); + let fut = H2Stream::new(service.call(req), respond); + exec.execute(fut)?; + } + + // no more incoming streams... + trace!("incoming connection complete"); + Ok(Async::Ready(())) + } +} + +struct H2Stream +where + B: Payload, +{ + reply: SendResponse>, + state: H2StreamState, +} + +enum H2StreamState +where + B: Payload, +{ + Service(F), + Body(PipeToSendStream), +} + +impl H2Stream +where + F: Future>, + F::Error: Into>, + B: Payload, +{ + fn new(fut: F, respond: SendResponse>) -> H2Stream { + H2Stream { + reply: respond, + state: H2StreamState::Service(fut), + } + } + + fn poll2(&mut self) -> Poll<(), ::Error> { + loop { + let next = match self.state { + H2StreamState::Service(ref mut h) => { + let res = match h.poll() { + Ok(Async::Ready(r)) => r, + Ok(Async::NotReady) => { + // Body is not yet ready, so we want to check if the client has sent a + // RST_STREAM frame which would cancel the current request. + if let Async::Ready(reason) = + self.reply.poll_reset().map_err(|e| ::Error::new_h2(e))? + { + debug!("stream received RST_STREAM: {:?}", reason); + return Err(::Error::new_h2(reason.into())); + } + return Ok(Async::NotReady); + } + Err(e) => return Err(::Error::new_user_service(e)), + }; + + let (head, body) = res.into_parts(); + let mut res = ::http::Response::from_parts(head, ()); + super::strip_connection_headers(res.headers_mut()); + if let Some(len) = body.content_length() { + headers::set_content_length_if_missing(res.headers_mut(), len); + } + macro_rules! reply { + ($eos:expr) => ({ + match self.reply.send_response(res, $eos) { + Ok(tx) => tx, + Err(e) => { + trace!("send response error: {}", e); + self.reply.send_reset(Reason::INTERNAL_ERROR); + return Err(::Error::new_h2(e)); + } + } + }) + } + if !body.is_end_stream() { + let body_tx = reply!(false); + H2StreamState::Body(PipeToSendStream::new(body, body_tx)) + } else { + reply!(true); + return Ok(Async::Ready(())); + } + }, + H2StreamState::Body(ref mut pipe) => { + return pipe.poll(); + } + }; + self.state = next; + } + } +} + +impl Future for H2Stream +where + F: Future>, + F::Error: Into>, + B: Payload, +{ + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll { + self.poll2() + .map_err(|e| debug!("stream error: {}", e)) + } +} + diff --git a/third_party/rust/hyper/src/proto/mod.rs b/third_party/rust/hyper/src/proto/mod.rs new file mode 100644 index 000000000000..0e0cd62c67c4 --- /dev/null +++ b/third_party/rust/hyper/src/proto/mod.rs @@ -0,0 +1,106 @@ +//! Pieces pertaining to the HTTP message protocol. +use http::{HeaderMap, Method, StatusCode, Uri, Version}; + +pub(crate) use self::h1::{dispatch, Conn, ServerTransaction}; +use self::body_length::DecodedLength; + +pub(crate) mod h1; +pub(crate) mod h2; + +/// An Incoming Message head. Includes request/status line, and headers. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct MessageHead { + /// HTTP version of the message. + pub version: Version, + /// Subject (request line or status line) of Incoming message. + pub subject: S, + /// Headers of the Incoming message. + pub headers: HeaderMap, +} + +/// An incoming request message. +pub type RequestHead = MessageHead; + +#[derive(Debug, Default, PartialEq)] +pub struct RequestLine(pub Method, pub Uri); + +/// An incoming response message. +pub type ResponseHead = MessageHead; + +#[derive(Debug)] +pub enum BodyLength { + /// Content-Length + Known(u64), + /// Transfer-Encoding: chunked (if h1) + Unknown, +} + +/// Status of when a Disaptcher future completes. +pub(crate) enum Dispatched { + /// Dispatcher completely shutdown connection. + Shutdown, + /// Dispatcher has pending upgrade, and so did not shutdown. + Upgrade(::upgrade::Pending), +} + +/// A separate module to encapsulate the invariants of the DecodedLength type. +mod body_length { + use std::fmt; + + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub(crate) struct DecodedLength(u64); + + const MAX_LEN: u64 = ::std::u64::MAX - 2; + + impl DecodedLength { + pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); + pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); + pub(crate) const ZERO: DecodedLength = DecodedLength(0); + + #[cfg(test)] + pub(crate) fn new(len: u64) -> Self { + debug_assert!(len <= MAX_LEN); + DecodedLength(len) + } + + /// Takes the length as a content-length without other checks. + /// + /// Should only be called if previously confirmed this isn't + /// CLOSE_DELIMITED or CHUNKED. + #[inline] + pub(crate) fn danger_len(self) -> u64 { + debug_assert!(self.0 < Self::CHUNKED.0); + self.0 + } + + /// Converts to an Option representing a Known or Unknown length. + pub(crate) fn into_opt(self) -> Option { + match self { + DecodedLength::CHUNKED | + DecodedLength::CLOSE_DELIMITED => None, + DecodedLength(known) => Some(known) + } + } + + /// Checks the `u64` is within the maximum allowed for content-length. + pub(crate) fn checked_new(len: u64) -> Result { + if len <= MAX_LEN { + Ok(DecodedLength(len)) + } else { + warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN); + Err(::error::Parse::TooLarge) + } + } + } + + impl fmt::Display for DecodedLength { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), + DecodedLength::CHUNKED => f.write_str("chunked encoding"), + DecodedLength::ZERO => f.write_str("empty"), + DecodedLength(n) => write!(f, "content-length ({} bytes)", n), + } + } + } +} diff --git a/third_party/rust/hyper/src/rt.rs b/third_party/rust/hyper/src/rt.rs new file mode 100644 index 000000000000..3334bb12a5c7 --- /dev/null +++ b/third_party/rust/hyper/src/rt.rs @@ -0,0 +1,55 @@ +//! Default runtime +//! +//! By default, hyper includes the [tokio](https://tokio.rs) runtime. To ease +//! using it, several types are re-exported here. +//! +//! The inclusion of a default runtime can be disabled by turning off hyper's +//! `runtime` Cargo feature. + +pub use futures::{Future, Stream}; +pub use futures::future::{lazy, poll_fn}; +use tokio; + +use self::inner::Spawn; + +/// Spawns a future on the default executor. +/// +/// # Panics +/// +/// This function will panic if the default executor is not set. +/// +/// # Note +/// +/// The `Spawn` return type is not currently meant for anything other than +/// to reserve adding new trait implementations to it later. It can be +/// ignored for now. +pub fn spawn(f: F) -> Spawn +where + F: Future + Send + 'static, +{ + tokio::spawn(f); + Spawn { + _inner: (), + } +} + +/// Start the Tokio runtime using the supplied future to bootstrap execution. +/// +/// # Example +/// +/// See the [server documentation](::server) for an example of its usage. +pub fn run(f: F) +where + F: Future + Send + 'static +{ + tokio::run(f); +} + +// Make the `Spawn` type an unnameable, so we can add +// methods or trait impls to it later without a breaking change. +mod inner { + #[allow(missing_debug_implementations)] + pub struct Spawn { + pub(super) _inner: (), + } +} diff --git a/third_party/rust/hyper/src/server/conn.rs b/third_party/rust/hyper/src/server/conn.rs new file mode 100644 index 000000000000..d66c3add0e68 --- /dev/null +++ b/third_party/rust/hyper/src/server/conn.rs @@ -0,0 +1,729 @@ +//! Lower-level Server connection API. +//! +//! The types in this module are to provide a lower-level API based around a +//! single connection. Accepting a connection and binding it with a service +//! are not handled at this level. This module provides the building blocks to +//! customize those things externally. +//! +//! If you don't have need to manage connections yourself, consider using the +//! higher-level [Server](super) API. + +use std::fmt; +use std::mem; +#[cfg(feature = "runtime")] use std::net::SocketAddr; +use std::sync::Arc; +#[cfg(feature = "runtime")] use std::time::Duration; + +use bytes::Bytes; +use futures::{Async, Future, Poll, Stream}; +use futures::future::{Either, Executor}; +use tokio_io::{AsyncRead, AsyncWrite}; +#[cfg(feature = "runtime")] use tokio_reactor::Handle; + +use body::{Body, Payload}; +use common::Exec; +use common::io::Rewind; +use error::{Kind, Parse}; +use proto; +use service::{NewService, Service}; +use upgrade::Upgraded; + +use self::upgrades::UpgradeableConnection; + +#[cfg(feature = "runtime")] pub use super::tcp::AddrIncoming; + +/// A lower-level configuration of the HTTP protocol. +/// +/// This structure is used to configure options for an HTTP server connection. +/// +/// If you don't have need to manage connections yourself, consider using the +/// higher-level [Server](super) API. +#[derive(Clone, Debug)] +pub struct Http { + exec: Exec, + h1_writev: bool, + mode: ConnectionMode, + keep_alive: bool, + max_buf_size: Option, + pipeline_flush: bool, +} + +/// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs. +#[derive(Clone, Debug, PartialEq)] +enum ConnectionMode { + /// Always use HTTP/1 and do not upgrade when a parse error occurs. + H1Only, + /// Always use HTTP/2. + H2Only, + /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs. + Fallback, +} + +/// A stream mapping incoming IOs to new services. +/// +/// Yields `Connecting`s that are futures that should be put on a reactor. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct Serve { + incoming: I, + new_service: S, + protocol: Http, +} + +/// A future building a new `Service` to a `Connection`. +/// +/// Wraps the future returned from `NewService` into one that returns +/// a `Connection`. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct Connecting { + future: F, + io: Option, + protocol: Http, +} + +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub(super) struct SpawnAll { + serve: Serve, +} + +/// A future binding a connection with a Service. +/// +/// Polling this future will drive HTTP forward. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + S: Service, +{ + pub(super) conn: Option< + Either< + proto::h1::Dispatcher< + proto::h1::dispatch::Server, + S::ResBody, + T, + proto::ServerTransaction, + >, + proto::h2::Server< + Rewind, + S, + S::ResBody, + >, + >>, + fallback: bool, +} + + + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// If the client sent additional bytes after its last request, and + /// this connection "ended" with an upgrade, the read buffer will contain + /// those bytes. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + /// The `Service` used to serve this connection. + pub service: S, + _inner: (), +} + +// ===== impl Http ===== + +impl Http { + /// Creates a new instance of the HTTP protocol, ready to spawn a server or + /// start accepting connections. + pub fn new() -> Http { + Http { + exec: Exec::Default, + h1_writev: true, + mode: ConnectionMode::Fallback, + keep_alive: true, + max_buf_size: None, + pipeline_flush: false, + } + } + + /// Sets whether HTTP1 is required. + /// + /// Default is false + pub fn http1_only(&mut self, val: bool) -> &mut Self { + if val { + self.mode = ConnectionMode::H1Only; + } else { + self.mode = ConnectionMode::Fallback; + } + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Default is `true`. + #[inline] + pub fn http1_writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = val; + self + } + + /// Sets whether HTTP2 is required. + /// + /// Default is false + pub fn http2_only(&mut self, val: bool) -> &mut Self { + if val { + self.mode = ConnectionMode::H2Only; + } else { + self.mode = ConnectionMode::Fallback; + } + self + } + + /// Enables or disables HTTP keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.keep_alive = val; + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + self.max_buf_size = Some(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.pipeline_flush = enabled; + self + } + + /// Set the executor used to spawn background tasks. + /// + /// Default uses implicit default (like `tokio::spawn`). + pub fn executor(&mut self, exec: E) -> &mut Self + where + E: Executor + Send>> + Send + Sync + 'static + { + self.exec = Exec::Executor(Arc::new(exec)); + self + } + + /// Bind a connection together with a [`Service`](::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + /// + /// # Example + /// + /// ``` + /// # extern crate hyper; + /// # extern crate tokio_io; + /// # #[cfg(feature = "runtime")] + /// # extern crate tokio; + /// # use hyper::{Body, Request, Response}; + /// # use hyper::service::Service; + /// # use hyper::server::conn::Http; + /// # use tokio_io::{AsyncRead, AsyncWrite}; + /// # #[cfg(feature = "runtime")] + /// # fn run(some_io: I, some_service: S) + /// # where + /// # I: AsyncRead + AsyncWrite + Send + 'static, + /// # S: Service + Send + 'static, + /// # S::Future: Send + /// # { + /// # use hyper::rt::Future; + /// # use tokio::reactor::Handle; + /// let http = Http::new(); + /// let conn = http.serve_connection(some_io, some_service); + /// + /// let fut = conn.map_err(|e| { + /// eprintln!("server connection error: {}", e); + /// }); + /// + /// hyper::rt::spawn(fut); + /// # } + /// # fn main() {} + /// ``` + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: Service, + S::Error: Into>, + S::Future: Send + 'static, + Bd: Payload, + I: AsyncRead + AsyncWrite, + { + let either = match self.mode { + ConnectionMode::H1Only | ConnectionMode::Fallback => { + let mut conn = proto::Conn::new(io); + if !self.keep_alive { + conn.disable_keep_alive(); + } + if !self.h1_writev { + conn.set_write_strategy_flatten(); + } + conn.set_flush_pipeline(self.pipeline_flush); + if let Some(max) = self.max_buf_size { + conn.set_max_buf_size(max); + } + let sd = proto::h1::dispatch::Server::new(service); + Either::A(proto::h1::Dispatcher::new(sd, conn)) + } + ConnectionMode::H2Only => { + let rewind_io = Rewind::new(io); + let h2 = proto::h2::Server::new(rewind_io, service, self.exec.clone()); + Either::B(h2) + } + }; + + Connection { + conn: Some(either), + fallback: self.mode == ConnectionMode::Fallback, + } + } + + /// Bind the provided `addr` with the default `Handle` and return [`Serve`](Serve). + /// + /// This method will bind the `addr` provided with a new TCP listener ready + /// to accept connections. Each connection will be processed with the + /// `new_service` object provided, creating a new service per + /// connection. + #[cfg(feature = "runtime")] + pub fn serve_addr(&self, addr: &SocketAddr, new_service: S) -> ::Result> + where + S: NewService, + S::Error: Into>, + Bd: Payload, + { + let mut incoming = AddrIncoming::new(addr, None)?; + if self.keep_alive { + incoming.set_keepalive(Some(Duration::from_secs(90))); + } + Ok(self.serve_incoming(incoming, new_service)) + } + + /// Bind the provided `addr` with the `Handle` and return a [`Serve`](Serve) + /// + /// This method will bind the `addr` provided with a new TCP listener ready + /// to accept connections. Each connection will be processed with the + /// `new_service` object provided, creating a new service per + /// connection. + #[cfg(feature = "runtime")] + pub fn serve_addr_handle(&self, addr: &SocketAddr, handle: &Handle, new_service: S) -> ::Result> + where + S: NewService, + S::Error: Into>, + Bd: Payload, + { + let mut incoming = AddrIncoming::new(addr, Some(handle))?; + if self.keep_alive { + incoming.set_keepalive(Some(Duration::from_secs(90))); + } + Ok(self.serve_incoming(incoming, new_service)) + } + + /// Bind the provided stream of incoming IO objects with a `NewService`. + pub fn serve_incoming(&self, incoming: I, new_service: S) -> Serve + where + I: Stream, + I::Error: Into>, + I::Item: AsyncRead + AsyncWrite, + S: NewService, + S::Error: Into>, + Bd: Payload, + { + Serve { + incoming: incoming, + new_service: new_service, + protocol: self.clone(), + } + } +} + + +// ===== impl Connection ===== + +impl Connection +where + S: Service + 'static, + S::Error: Into>, + S::Future: Send, + I: AsyncRead + AsyncWrite + 'static, + B: Payload + 'static, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(&mut self) { + match *self.conn.as_mut().unwrap() { + Either::A(ref mut h1) => { + h1.disable_keep_alive(); + }, + Either::B(ref mut h2) => { + h2.graceful_shutdown(); + } + } + } + + /// Return the inner IO object, and additional information. + /// + /// If the IO object has been "rewound" the io will not contain those bytes rewound. + /// This should only be called after `poll_without_shutdown` signals + /// that the connection is "done". Otherwise, it may not have finished + /// flushing all necessary HTTP bytes. + /// + /// # Panics + /// This method will panic if this connection is using an h2 protocol. + pub fn into_parts(self) -> Parts { + self.try_into_parts().unwrap_or_else(|| panic!("h2 cannot into_inner")) + } + + /// Return the inner IO object, and additional information, if available. + /// + /// This method will return a `None` if this connection is using an h2 protocol. + pub fn try_into_parts(self) -> Option> { + match self.conn.unwrap() { + Either::A(h1) => { + let (io, read_buf, dispatch) = h1.into_inner(); + Some(Parts { + io: io, + read_buf: read_buf, + service: dispatch.into_service(), + _inner: (), + }) + }, + Either::B(_h2) => None, + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actally shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> { + loop { + let polled = match *self.conn.as_mut().unwrap() { + Either::A(ref mut h1) => h1.poll_without_shutdown(), + Either::B(ref mut h2) => return h2.poll().map(|x| x.map(|_| ())), + }; + match polled { + Ok(x) => return Ok(x), + Err(e) => { + debug!("error polling connection protocol without shutdown: {}", e); + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.fallback => { + self.upgrade_h2(); + continue; + } + _ => return Err(e), + } + } + } + } + } + + fn upgrade_h2(&mut self) { + trace!("Trying to upgrade connection to h2"); + let conn = self.conn.take(); + + let (io, read_buf, dispatch) = match conn.unwrap() { + Either::A(h1) => { + h1.into_inner() + }, + Either::B(_h2) => { + panic!("h2 cannot into_inner"); + } + }; + let mut rewind_io = Rewind::new(io); + rewind_io.rewind(read_buf); + let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), Exec::Default); + + debug_assert!(self.conn.is_none()); + self.conn = Some(Either::B(h2)); + } + + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](::upgrade) for more. + pub fn with_upgrades(self) -> UpgradeableConnection + where + I: Send, + { + UpgradeableConnection { + inner: self, + } + } +} + +impl Future for Connection +where + S: Service + 'static, + S::Error: Into>, + S::Future: Send, + I: AsyncRead + AsyncWrite + 'static, + B: Payload + 'static, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + match self.conn.poll() { + Ok(x) => return Ok(x.map(|opt| { + if let Some(proto::Dispatched::Upgrade(pending)) = opt { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + })), + Err(e) => { + debug!("error polling connection protocol: {}", e); + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.fallback => { + self.upgrade_h2(); + continue; + } + _ => return Err(e), + } + } + } + } + } +} + +impl fmt::Debug for Connection +where + S: Service, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Connection") + .finish() + } +} +// ===== impl Serve ===== + +impl Serve { + /// Spawn all incoming connections onto the executor in `Http`. + pub(super) fn spawn_all(self) -> SpawnAll { + SpawnAll { + serve: self, + } + } + + /// Get a reference to the incoming stream. + #[inline] + pub fn incoming_ref(&self) -> &I { + &self.incoming + } + + /// Get a mutable reference to the incoming stream. + #[inline] + pub fn incoming_mut(&mut self) -> &mut I { + &mut self.incoming + } +} + +impl Stream for Serve +where + I: Stream, + I::Item: AsyncRead + AsyncWrite, + I::Error: Into>, + S: NewService, + S::Error: Into>, + ::Future: Send + 'static, + B: Payload, +{ + type Item = Connecting; + type Error = ::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + if let Some(io) = try_ready!(self.incoming.poll().map_err(::Error::new_accept)) { + let new_fut = self.new_service.new_service(); + Ok(Async::Ready(Some(Connecting { + future: new_fut, + io: Some(io), + protocol: self.protocol.clone(), + }))) + } else { + Ok(Async::Ready(None)) + } + } +} + +// ===== impl Connecting ===== + +impl Future for Connecting +where + I: AsyncRead + AsyncWrite, + F: Future, + S: Service, + S::Future: Send + 'static, + B: Payload, +{ + type Item = Connection; + type Error = F::Error; + + fn poll(&mut self) -> Poll { + let service = try_ready!(self.future.poll()); + let io = self.io.take().expect("polled after complete"); + Ok(self.protocol.serve_connection(io, service).into()) + } +} + +// ===== impl SpawnAll ===== + +#[cfg(feature = "runtime")] +impl SpawnAll { + pub(super) fn local_addr(&self) -> SocketAddr { + self.serve.incoming.local_addr() + } +} + +impl SpawnAll { + pub(super) fn incoming_ref(&self) -> &I { + self.serve.incoming_ref() + } +} + +impl Future for SpawnAll +where + I: Stream, + I::Error: Into>, + I::Item: AsyncRead + AsyncWrite + Send + 'static, + S: NewService + Send + 'static, + S::Error: Into>, + S::Service: Send, + S::Future: Send + 'static, + ::Future: Send + 'static, + B: Payload, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + if let Some(connecting) = try_ready!(self.serve.poll()) { + let fut = connecting + .map_err(::Error::new_user_new_service) + // flatten basically + .and_then(|conn| conn.with_upgrades()) + .map_err(|err| debug!("conn error: {}", err)); + self.serve.protocol.exec.execute(fut)?; + } else { + return Ok(Async::Ready(())) + } + } + } +} + +mod upgrades { + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate, and so basically just an + // `impl Future`, without requiring Rust 1.26. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + S: Service, + { + pub(super) inner: Connection, + } + + impl UpgradeableConnection + where + S: Service + 'static, + S::Error: Into>, + S::Future: Send, + I: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, + { + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(&mut self) { + self.inner.graceful_shutdown() + } + } + + impl Future for UpgradeableConnection + where + S: Service + 'static, + S::Error: Into>, + S::Future: Send, + I: AsyncRead + AsyncWrite + Send + 'static, + B: Payload + 'static, + { + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + loop { + match self.inner.conn.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(Some(proto::Dispatched::Shutdown))) | + Ok(Async::Ready(None)) => { + return Ok(Async::Ready(())); + }, + Ok(Async::Ready(Some(proto::Dispatched::Upgrade(pending)))) => { + let h1 = match mem::replace(&mut self.inner.conn, None) { + Some(Either::A(h1)) => h1, + _ => unreachable!("Upgrade expects h1"), + }; + + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(Box::new(io), buf)); + return Ok(Async::Ready(())); + }, + Err(e) => { + debug!("error polling connection protocol: {}", e); + match *e.kind() { + Kind::Parse(Parse::VersionH2) if self.inner.fallback => { + self.inner.upgrade_h2(); + continue; + } + _ => return Err(e), + } + } + } + } + } + } +} + diff --git a/third_party/rust/hyper/src/server/listener.rs b/third_party/rust/hyper/src/server/listener.rs deleted file mode 100644 index 29db920e9cfa..000000000000 --- a/third_party/rust/hyper/src/server/listener.rs +++ /dev/null @@ -1,79 +0,0 @@ -use std::sync::{Arc, mpsc}; -use std::thread; - -use net::NetworkListener; - -pub struct ListenerPool { - acceptor: A -} - -impl ListenerPool { - /// Create a thread pool to manage the acceptor. - pub fn new(acceptor: A) -> ListenerPool { - ListenerPool { acceptor: acceptor } - } - - /// Runs the acceptor pool. Blocks until the acceptors are closed. - /// - /// ## Panics - /// - /// Panics if threads == 0. - pub fn accept(self, work: F, threads: usize) - where F: Fn(A::Stream) + Send + Sync + 'static { - assert!(threads != 0, "Can't accept on 0 threads."); - - let (super_tx, supervisor_rx) = mpsc::channel(); - - let work = Arc::new(work); - - // Begin work. - for _ in 0..threads { - spawn_with(super_tx.clone(), work.clone(), self.acceptor.clone()) - } - - // Monitor for panics. - // FIXME(reem): This won't ever exit since we still have a super_tx handle. - for _ in supervisor_rx.iter() { - spawn_with(super_tx.clone(), work.clone(), self.acceptor.clone()); - } - } -} - -fn spawn_with(supervisor: mpsc::Sender<()>, work: Arc, mut acceptor: A) -where A: NetworkListener + Send + 'static, - F: Fn(::Stream) + Send + Sync + 'static { - thread::spawn(move || { - let _sentinel = Sentinel::new(supervisor, ()); - - loop { - match acceptor.accept() { - Ok(stream) => work(stream), - Err(e) => { - info!("Connection failed: {}", e); - } - } - } - }); -} - -struct Sentinel { - value: Option, - supervisor: mpsc::Sender, -} - -impl Sentinel { - fn new(channel: mpsc::Sender, data: T) -> Sentinel { - Sentinel { - value: Some(data), - supervisor: channel, - } - } -} - -impl Drop for Sentinel { - fn drop(&mut self) { - // Respawn ourselves - let _ = self.supervisor.send(self.value.take().unwrap()); - } -} - diff --git a/third_party/rust/hyper/src/server/mod.rs b/third_party/rust/hyper/src/server/mod.rs index 12385134870d..f07ee03554a2 100644 --- a/third_party/rust/hyper/src/server/mod.rs +++ b/third_party/rust/hyper/src/server/mod.rs @@ -1,491 +1,278 @@ //! HTTP Server //! +//! A `Server` is created to listen on a port, parse HTTP requests, and hand +//! them off to a `Service`. +//! +//! There are two levels of APIs provide for constructing HTTP servers: +//! +//! - The higher-level [`Server`](Server) type. +//! - The lower-level [conn](conn) module. +//! //! # Server //! -//! A `Server` is created to listen on port, parse HTTP requests, and hand -//! them off to a `Handler`. By default, the Server will listen across multiple -//! threads, but that can be configured to a single thread if preferred. +//! The [`Server`](Server) is main way to start listening for HTTP requests. +//! It wraps a listener with a [`NewService`](::service), and then should +//! be executed to start serving requests. //! -//! # Handling requests -//! -//! You must pass a `Handler` to the Server that will handle requests. There is -//! a default implementation for `fn`s and closures, allowing you pass one of -//! those easily. +//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. //! +//! ## Example //! //! ```no_run -//! use hyper::server::{Server, Request, Response}; +//! extern crate hyper; //! -//! fn hello(req: Request, res: Response) { -//! // handle things here +//! use hyper::{Body, Response, Server}; +//! use hyper::service::service_fn_ok; +//! +//! # #[cfg(feature = "runtime")] +//! fn main() { +//! # use hyper::rt::Future; +//! // Construct our SocketAddr to listen on... +//! let addr = ([127, 0, 0, 1], 3000).into(); +//! +//! // And a NewService to handle each connection... +//! let new_service = || { +//! service_fn_ok(|_req| { +//! Response::new(Body::from("Hello World")) +//! }) +//! }; +//! +//! // Then bind and serve... +//! let server = Server::bind(&addr) +//! .serve(new_service); +//! +//! // Finally, spawn `server` onto an Executor... +//! hyper::rt::run(server.map_err(|e| { +//! eprintln!("server error: {}", e); +//! })); //! } -//! -//! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} //! ``` -//! -//! As with any trait, you can also define a struct and implement `Handler` -//! directly on your own type, and pass that to the `Server` instead. -//! -//! ```no_run -//! use std::sync::Mutex; -//! use std::sync::mpsc::{channel, Sender}; -//! use hyper::server::{Handler, Server, Request, Response}; -//! -//! struct SenderHandler { -//! sender: Mutex> -//! } -//! -//! impl Handler for SenderHandler { -//! fn handle(&self, req: Request, res: Response) { -//! self.sender.lock().unwrap().send("start").unwrap(); -//! } -//! } -//! -//! -//! let (tx, rx) = channel(); -//! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { -//! sender: Mutex::new(tx) -//! }).unwrap(); -//! ``` -//! -//! Since the `Server` will be listening on multiple threads, the `Handler` -//! must implement `Sync`: any mutable state must be synchronized. -//! -//! ```no_run -//! use std::sync::atomic::{AtomicUsize, Ordering}; -//! use hyper::server::{Server, Request, Response}; -//! -//! let counter = AtomicUsize::new(0); -//! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { -//! counter.fetch_add(1, Ordering::Relaxed); -//! }).unwrap(); -//! ``` -//! -//! # The `Request` and `Response` pair -//! -//! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The -//! `Request` includes access to the `method`, `uri`, and `headers` of the -//! incoming HTTP request. It also implements `std::io::Read`, in order to -//! read any body, such as with `POST` or `PUT` messages. -//! -//! Likewise, the `Response` includes ways to set the `status` and `headers`, -//! and implements `std::io::Write` to allow writing the response body. -//! -//! ```no_run -//! use std::io; -//! use hyper::server::{Server, Request, Response}; -//! use hyper::status::StatusCode; -//! -//! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { -//! match req.method { -//! hyper::Post => { -//! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); -//! }, -//! _ => *res.status_mut() = StatusCode::MethodNotAllowed -//! } -//! }).unwrap(); -//! ``` -//! -//! ## An aside: Write Status -//! -//! The `Response` uses a phantom type parameter to determine its write status. -//! What does that mean? In short, it ensures you never write a body before -//! adding all headers, and never add a header after writing some of the body. -//! -//! This is often done in most implementations by include a boolean property -//! on the response, such as `headers_written`, checking that each time the -//! body has something to write, so as to make sure the headers are sent once, -//! and only once. But this has 2 downsides: -//! -//! 1. You are typically never notified that your late header is doing nothing. -//! 2. There's a runtime cost to checking on every write. -//! -//! Instead, hyper handles this statically, or at compile-time. A -//! `Response` includes a `headers_mut()` method, allowing you add more -//! headers. It also does not implement `Write`, so you can't accidentally -//! write early. Once the "head" of the response is correct, you can "send" it -//! out by calling `start` on the `Response`. This will return a new -//! `Response` object, that no longer has `headers_mut()`, but does -//! implement `Write`. + +pub mod conn; +#[cfg(feature = "runtime")] mod tcp; + use std::fmt; -use std::io::{self, ErrorKind, BufWriter, Write}; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::thread::{self, JoinHandle}; -use std::time::Duration; +#[cfg(feature = "runtime")] use std::net::SocketAddr; +#[cfg(feature = "runtime")] use std::time::Duration; -use num_cpus; +use futures::{Future, Stream, Poll}; +use tokio_io::{AsyncRead, AsyncWrite}; -pub use self::request::Request; -pub use self::response::Response; +use body::{Body, Payload}; +use service::{NewService, Service}; +// Renamed `Http` as `Http_` for now so that people upgrading don't see an +// error that `hyper::server::Http` is private... +use self::conn::{Http as Http_, SpawnAll}; +#[cfg(feature = "runtime")] use self::tcp::{AddrIncoming}; -pub use net::{Fresh, Streaming}; - -use Error; -use buffer::BufReader; -use header::{Headers, Expect, Connection}; -use http; -use method::Method; -use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, SslServer}; -use status::StatusCode; -use uri::RequestUri; -use version::HttpVersion::Http11; - -use self::listener::ListenerPool; - -pub mod request; -pub mod response; - -mod listener; - -/// A server can listen on a TCP socket. +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. /// -/// Once listening, it will create a `Request`/`Response` pair for each -/// incoming connection, and hand them to the provided handler. +/// `Server` is a `Future` mapping a bound listener with a set of service +/// handlers. It is built using the [`Builder`](Builder), and the future +/// completes when the server has been shutdown. It should be run by an +/// `Executor`. +pub struct Server { + spawn_all: SpawnAll, +} + +/// A builder for a [`Server`](Server). #[derive(Debug)] -pub struct Server { - listener: L, - timeouts: Timeouts, +pub struct Builder { + incoming: I, + protocol: Http_, } -#[derive(Clone, Copy, Debug)] -struct Timeouts { - read: Option, - keep_alive: Option, -} +// ===== impl Server ===== -impl Default for Timeouts { - fn default() -> Timeouts { - Timeouts { - read: None, - keep_alive: Some(Duration::from_secs(5)) +impl Server { + /// Starts a [`Builder`](Builder) with the provided incoming stream. + pub fn builder(incoming: I) -> Builder { + Builder { + incoming, + protocol: Http_::new(), } } } -impl Server { - /// Creates a new server with the provided handler. - #[inline] - pub fn new(listener: L) -> Server { - Server { - listener: listener, - timeouts: Timeouts::default() - } - } - - /// Controls keep-alive for this server. +#[cfg(feature = "runtime")] +impl Server { + /// Binds to the provided address, and returns a [`Builder`](Builder). /// - /// The timeout duration passed will be used to determine how long - /// to keep the connection alive before dropping it. + /// # Panics /// - /// Passing `None` will disable keep-alive. - /// - /// Default is enabled with a 5 second timeout. - #[inline] - pub fn keep_alive(&mut self, timeout: Option) { - self.timeouts.keep_alive = timeout; + /// This method will panic if binding to the address fails. For a method + /// to bind to an address and return a `Result`, see `Server::try_bind`. + pub fn bind(addr: &SocketAddr) -> Builder { + let incoming = AddrIncoming::new(addr, None) + .unwrap_or_else(|e| { + panic!("error binding to {}: {}", addr, e); + }); + Server::builder(incoming) } - /// Sets the read timeout for all Request reads. - pub fn set_read_timeout(&mut self, dur: Option) { - self.listener.set_read_timeout(dur); - self.timeouts.read = dur; - } - - /// Sets the write timeout for all Response writes. - pub fn set_write_timeout(&mut self, dur: Option) { - self.listener.set_write_timeout(dur); - } - - /// Get the address that the server is listening on. - pub fn local_addr(&mut self) -> io::Result { - self.listener.local_addr() + /// Tries to bind to the provided address, and returns a [`Builder`](Builder). + pub fn try_bind(addr: &SocketAddr) -> ::Result> { + AddrIncoming::new(addr, None) + .map(Server::builder) } } -impl Server { - /// Creates a new server that will handle `HttpStream`s. - pub fn http(addr: To) -> ::Result> { - HttpListener::new(addr).map(Server::new) +#[cfg(feature = "runtime")] +impl Server { + /// Returns the local address that this server is bound to. + pub fn local_addr(&self) -> SocketAddr { + self.spawn_all.local_addr() } } -impl Server> { - /// Creates a new server that will handle `HttpStream`s over SSL. - /// - /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. - pub fn https(addr: A, ssl: S) -> ::Result>> { - HttpsListener::new(addr, ssl).map(Server::new) +impl Future for Server +where + I: Stream, + I::Error: Into>, + I::Item: AsyncRead + AsyncWrite + Send + 'static, + S: NewService + Send + 'static, + S::Error: Into>, + S::Service: Send, + S::Future: Send + 'static, + ::Future: Send + 'static, + B: Payload, +{ + type Item = (); + type Error = ::Error; + + fn poll(&mut self) -> Poll { + self.spawn_all.poll() } } -impl Server { - /// Binds to a socket and starts handling connections. - pub fn handle(self, handler: H) -> ::Result { - self.handle_threads(handler, num_cpus::get() * 5 / 4) - } - - /// Binds to a socket and starts handling connections with the provided - /// number of threads. - pub fn handle_threads(self, handler: H, - threads: usize) -> ::Result { - handle(self, handler, threads) - } -} - -fn handle(mut server: Server, handler: H, threads: usize) -> ::Result -where H: Handler + 'static, L: NetworkListener + Send + 'static { - let socket = try!(server.listener.local_addr()); - - debug!("threads = {:?}", threads); - let pool = ListenerPool::new(server.listener); - let worker = Worker::new(handler, server.timeouts); - let work = move |mut stream| worker.handle_connection(&mut stream); - - let guard = thread::spawn(move || pool.accept(work, threads)); - - Ok(Listening { - _guard: Some(guard), - socket: socket, - }) -} - -struct Worker { - handler: H, - timeouts: Timeouts, -} - -impl Worker { - fn new(handler: H, timeouts: Timeouts) -> Worker { - Worker { - handler: handler, - timeouts: timeouts, - } - } - - fn handle_connection(&self, stream: &mut S) where S: NetworkStream + Clone { - debug!("Incoming stream"); - - self.handler.on_connection_start(); - - let addr = match stream.peer_addr() { - Ok(addr) => addr, - Err(e) => { - info!("Peer Name error: {:?}", e); - return; - } - }; - - // FIXME: Use Type ascription - let stream_clone: &mut NetworkStream = &mut stream.clone(); - let mut rdr = BufReader::new(stream_clone); - let mut wrt = BufWriter::new(stream); - - while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { - if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive) { - info!("set_read_timeout keep_alive {:?}", e); - break; - } - } - - self.handler.on_connection_end(); - - debug!("keep_alive loop ending for {}", addr); - } - - fn set_read_timeout(&self, s: &NetworkStream, timeout: Option) -> io::Result<()> { - s.set_read_timeout(timeout) - } - - fn keep_alive_loop(&self, rdr: &mut BufReader<&mut NetworkStream>, - wrt: &mut W, addr: SocketAddr) -> bool { - let req = match Request::new(rdr, addr) { - Ok(req) => req, - Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { - trace!("tcp closed, cancelling keep-alive loop"); - return false; - } - Err(Error::Io(e)) => { - debug!("ioerror in keepalive loop = {:?}", e); - return false; - } - Err(e) => { - //TODO: send a 400 response - info!("request error = {:?}", e); - return false; - } - }; - - if !self.handle_expect(&req, wrt) { - return false; - } - - if let Err(e) = req.set_read_timeout(self.timeouts.read) { - info!("set_read_timeout {:?}", e); - return false; - } - - let mut keep_alive = self.timeouts.keep_alive.is_some() && - http::should_keep_alive(req.version, &req.headers); - let version = req.version; - let mut res_headers = Headers::new(); - if !keep_alive { - res_headers.set(Connection::close()); - } - { - let mut res = Response::new(wrt, &mut res_headers); - res.version = version; - self.handler.handle(req, res); - } - - // if the request was keep-alive, we need to check that the server agrees - // if it wasn't, then the server cannot force it to be true anyways - if keep_alive { - keep_alive = http::should_keep_alive(version, &res_headers); - } - - debug!("keep_alive = {:?} for {}", keep_alive, addr); - keep_alive - } - - fn handle_expect(&self, req: &Request, wrt: &mut W) -> bool { - if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { - let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); - match write!(wrt, "{} {}\r\n\r\n", Http11, status).and_then(|_| wrt.flush()) { - Ok(..) => (), - Err(e) => { - info!("error writing 100-continue: {:?}", e); - return false; - } - } - - if status != StatusCode::Continue { - debug!("non-100 status ({}) for Expect 100 request", status); - return false; - } - } - - true - } -} - -/// A listening server, which can later be closed. -pub struct Listening { - _guard: Option>, - /// The socket addresses that the server is bound to. - pub socket: SocketAddr, -} - -impl fmt::Debug for Listening { +impl fmt::Debug for Server { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Listening {{ socket: {:?} }}", self.socket) + f.debug_struct("Server") + .field("listener", &self.spawn_all.incoming_ref()) + .finish() } } -impl Drop for Listening { - fn drop(&mut self) { - let _ = self._guard.take().map(|g| g.join()); - } -} +// ===== impl Builder ===== -impl Listening { - /// Warning: This function doesn't work. The server remains listening after you called - /// it. See https://github.com/hyperium/hyper/issues/338 for more details. +impl Builder { + /// Start a new builder, wrapping an incoming stream and low-level options. /// - /// Stop the server from listening to its socket address. - pub fn close(&mut self) -> ::Result<()> { - let _ = self._guard.take(); - debug!("closing server"); - Ok(()) - } -} - -/// A handler that can handle incoming requests for a server. -pub trait Handler: Sync + Send { - /// Receives a `Request`/`Response` pair, and should perform some action on them. - /// - /// This could reading from the request, and writing to the response. - fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); - - /// Called when a Request includes a `Expect: 100-continue` header. - /// - /// By default, this will always immediately response with a `StatusCode::Continue`, - /// but can be overridden with custom behavior. - fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { - StatusCode::Continue - } - - /// This is run after a connection is received, on a per-connection basis (not a - /// per-request basis, as a connection with keep-alive may handle multiple - /// requests) - fn on_connection_start(&self) { } - - /// This is run before a connection is closed, on a per-connection basis (not a - /// per-request basis, as a connection with keep-alive may handle multiple - /// requests) - fn on_connection_end(&self) { } -} - -impl Handler for F where F: Fn(Request, Response), F: Sync + Send { - fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { - self(req, res) - } -} - -#[cfg(test)] -mod tests { - use header::Headers; - use method::Method; - use mock::MockStream; - use status::StatusCode; - use uri::RequestUri; - - use super::{Request, Response, Fresh, Handler, Worker}; - - #[test] - fn test_check_continue_default() { - let mut mock = MockStream::with_input(b"\ - POST /upload HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Expect: 100-continue\r\n\ - Content-Length: 10\r\n\ - \r\n\ - 1234567890\ - "); - - fn handle(_: Request, res: Response) { - res.start().unwrap().end().unwrap(); + /// For a more convenient constructor, see [`Server::bind`](Server::bind). + pub fn new(incoming: I, protocol: Http_) -> Self { + Builder { + incoming, + protocol, } - - Worker::new(handle, Default::default()).handle_connection(&mut mock); - let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; - assert_eq!(&mock.write[..cont.len()], cont); - let res = b"HTTP/1.1 200 OK\r\n"; - assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); } - #[test] - fn test_check_continue_reject() { - struct Reject; - impl Handler for Reject { - fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { - res.start().unwrap().end().unwrap(); - } + /// Sets whether HTTP/1 is required. + /// + /// Default is `false`. + pub fn http1_only(mut self, val: bool) -> Self { + self.protocol.http1_only(val); + self + } - fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { - StatusCode::ExpectationFailed - } + // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. + // + // This isn't really desirable in most cases, only really being useful in + // silly pipeline benchmarks. + #[doc(hidden)] + pub fn http1_pipeline_flush(mut self, val: bool) -> Self { + self.protocol.pipeline_flush(val); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// # Note + /// + /// Setting this to `false` may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Default is `true`. + pub fn http1_writev(mut self, val: bool) -> Self { + self.protocol.http1_writev(val); + self + } + + /// Sets whether HTTP/2 is required. + /// + /// Default is `false`. + pub fn http2_only(mut self, val: bool) -> Self { + self.protocol.http2_only(val); + self + } + + /// Consume this `Builder`, creating a [`Server`](Server). + /// + /// # Example + /// + /// ``` + /// # extern crate hyper; + /// # fn main() {} + /// # #[cfg(feature = "runtime")] + /// # fn run() { + /// use hyper::{Body, Response, Server}; + /// use hyper::service::service_fn_ok; + /// + /// // Construct our SocketAddr to listen on... + /// let addr = ([127, 0, 0, 1], 3000).into(); + /// + /// // And a NewService to handle each connection... + /// let new_service = || { + /// service_fn_ok(|_req| { + /// Response::new(Body::from("Hello World")) + /// }) + /// }; + /// + /// // Then bind and serve... + /// let server = Server::bind(&addr) + /// .serve(new_service); + /// + /// // Finally, spawn `server` onto an Executor... + /// # } + /// ``` + pub fn serve(self, new_service: S) -> Server + where + I: Stream, + I::Error: Into>, + I::Item: AsyncRead + AsyncWrite + Send + 'static, + S: NewService + Send + 'static, + S::Error: Into>, + S::Service: Send, + ::Future: Send + 'static, + B: Payload, + { + let serve = self.protocol.serve_incoming(self.incoming, new_service); + let spawn_all = serve.spawn_all(); + Server { + spawn_all, } - - let mut mock = MockStream::with_input(b"\ - POST /upload HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Expect: 100-continue\r\n\ - Content-Length: 10\r\n\ - \r\n\ - 1234567890\ - "); - - Worker::new(Reject, Default::default()).handle_connection(&mut mock); - assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); } } + +#[cfg(feature = "runtime")] +impl Builder { + /// Set whether TCP keepalive messages are enabled on accepted connections. + /// + /// If `None` is specified, keepalive is disabled, otherwise the duration + /// specified will be the time to remain idle before sending TCP keepalive + /// probes. + pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { + self.incoming.set_keepalive(keepalive); + self + } + + /// Set the value of `TCP_NODELAY` option for accepted connections. + pub fn tcp_nodelay(mut self, enabled: bool) -> Self { + self.incoming.set_nodelay(enabled); + self + } +} + diff --git a/third_party/rust/hyper/src/server/request.rs b/third_party/rust/hyper/src/server/request.rs deleted file mode 100644 index c632d27270f3..000000000000 --- a/third_party/rust/hyper/src/server/request.rs +++ /dev/null @@ -1,307 +0,0 @@ -//! Server Requests -//! -//! These are requests that a `hyper::Server` receives, and include its method, -//! target URI, headers, and message body. -use std::io::{self, Read}; -use std::net::SocketAddr; -use std::time::Duration; - -use buffer::BufReader; -use net::NetworkStream; -use version::{HttpVersion}; -use method::Method; -use header::{Headers, ContentLength, TransferEncoding}; -use http::h1::{self, Incoming, HttpReader}; -use http::h1::HttpReader::{SizedReader, ChunkedReader, EmptyReader}; -use uri::RequestUri; - -/// A request bundles several parts of an incoming `NetworkStream`, given to a `Handler`. -pub struct Request<'a, 'b: 'a> { - /// The IP address of the remote connection. - pub remote_addr: SocketAddr, - /// The `Method`, such as `Get`, `Post`, etc. - pub method: Method, - /// The headers of the incoming request. - pub headers: Headers, - /// The target request-uri for this request. - pub uri: RequestUri, - /// The version of HTTP for this request. - pub version: HttpVersion, - body: HttpReader<&'a mut BufReader<&'b mut NetworkStream>> -} - - -impl<'a, 'b: 'a> Request<'a, 'b> { - /// Create a new Request, reading the StartLine and Headers so they are - /// immediately useful. - pub fn new(stream: &'a mut BufReader<&'b mut NetworkStream>, addr: SocketAddr) - -> ::Result> { - - let Incoming { version, subject: (method, uri), headers } = try!(h1::parse_request(stream)); - debug!("Request Line: {:?} {:?} {:?}", method, uri, version); - debug!("{:?}", headers); - - let body = if headers.has::() { - match headers.get::() { - Some(&ContentLength(len)) => SizedReader(stream, len), - None => unreachable!() - } - } else if headers.has::() { - todo!("check for Transfer-Encoding: chunked"); - ChunkedReader(stream, None) - } else { - EmptyReader(stream) - }; - - Ok(Request { - remote_addr: addr, - method: method, - uri: uri, - headers: headers, - version: version, - body: body - }) - } - - /// Set the read timeout of the underlying NetworkStream. - #[inline] - pub fn set_read_timeout(&self, timeout: Option) -> io::Result<()> { - self.body.get_ref().get_ref().set_read_timeout(timeout) - } - - /// Get a reference to the underlying `NetworkStream`. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - self.body.get_ref().get_ref().downcast_ref() - } - - /// Get a reference to the underlying Ssl stream, if connected - /// over HTTPS. - /// - /// This is actually just an alias for `downcast_ref`. - #[inline] - pub fn ssl(&self) -> Option<&T> { - self.downcast_ref() - } - - /// Deconstruct a Request into its constituent parts. - #[inline] - pub fn deconstruct(self) -> (SocketAddr, Method, Headers, - RequestUri, HttpVersion, - HttpReader<&'a mut BufReader<&'b mut NetworkStream>>) { - (self.remote_addr, self.method, self.headers, - self.uri, self.version, self.body) - } -} - -impl<'a, 'b> Read for Request<'a, 'b> { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.body.read(buf) - } -} - -#[cfg(test)] -mod tests { - use buffer::BufReader; - use header::{Host, TransferEncoding, Encoding}; - use net::NetworkStream; - use mock::MockStream; - use super::Request; - - use std::io::{self, Read}; - use std::net::SocketAddr; - - fn sock(s: &str) -> SocketAddr { - s.parse().unwrap() - } - - fn read_to_string(mut req: Request) -> io::Result { - let mut s = String::new(); - try!(req.read_to_string(&mut s)); - Ok(s) - } - - #[test] - fn test_get_empty_body() { - let mut mock = MockStream::with_input(b"\ - GET / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - \r\n\ - I'm a bad request.\r\n\ - "); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - assert_eq!(read_to_string(req).unwrap(), "".to_owned()); - } - - #[test] - fn test_get_with_body() { - let mut mock = MockStream::with_input(b"\ - GET / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Content-Length: 19\r\n\ - \r\n\ - I'm a good request.\r\n\ - "); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - assert_eq!(read_to_string(req).unwrap(), "I'm a good request.".to_owned()); - } - - #[test] - fn test_head_empty_body() { - let mut mock = MockStream::with_input(b"\ - HEAD / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - \r\n\ - I'm a bad request.\r\n\ - "); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - assert_eq!(read_to_string(req).unwrap(), "".to_owned()); - } - - #[test] - fn test_post_empty_body() { - let mut mock = MockStream::with_input(b"\ - POST / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - \r\n\ - I'm a bad request.\r\n\ - "); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - assert_eq!(read_to_string(req).unwrap(), "".to_owned()); - } - - #[test] - fn test_parse_chunked_request() { - let mut mock = MockStream::with_input(b"\ - POST / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1\r\n\ - q\r\n\ - 2\r\n\ - we\r\n\ - 2\r\n\ - rt\r\n\ - 0\r\n\ - \r\n" - ); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - - // The headers are correct? - match req.headers.get::() { - Some(host) => { - assert_eq!("example.domain", host.hostname); - }, - None => panic!("Host header expected!"), - }; - match req.headers.get::() { - Some(encodings) => { - assert_eq!(1, encodings.len()); - assert_eq!(Encoding::Chunked, encodings[0]); - } - None => panic!("Transfer-Encoding: chunked expected!"), - }; - // The content is correctly read? - assert_eq!(read_to_string(req).unwrap(), "qwert".to_owned()); - } - - /// Tests that when a chunk size is not a valid radix-16 number, an error - /// is returned. - #[test] - fn test_invalid_chunk_size_not_hex_digit() { - let mut mock = MockStream::with_input(b"\ - POST / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - X\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - - assert!(read_to_string(req).is_err()); - } - - /// Tests that when a chunk size contains an invalid extension, an error is - /// returned. - #[test] - fn test_invalid_chunk_size_extension() { - let mut mock = MockStream::with_input(b"\ - POST / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1 this is an invalid extension\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - - assert!(read_to_string(req).is_err()); - } - - /// Tests that when a valid extension that contains a digit is appended to - /// the chunk size, the chunk is correctly read. - #[test] - fn test_chunk_size_with_extension() { - let mut mock = MockStream::with_input(b"\ - POST / HTTP/1.1\r\n\ - Host: example.domain\r\n\ - Transfer-Encoding: chunked\r\n\ - \r\n\ - 1;this is an extension with a digit 1\r\n\ - 1\r\n\ - 0\r\n\ - \r\n" - ); - - // FIXME: Use Type ascription - let mock: &mut NetworkStream = &mut mock; - let mut stream = BufReader::new(mock); - - let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); - - assert_eq!(read_to_string(req).unwrap(), "1".to_owned()); - } - -} diff --git a/third_party/rust/hyper/src/server/response.rs b/third_party/rust/hyper/src/server/response.rs deleted file mode 100644 index 539f6df9ef89..000000000000 --- a/third_party/rust/hyper/src/server/response.rs +++ /dev/null @@ -1,432 +0,0 @@ -//! Server Responses -//! -//! These are responses sent by a `hyper::Server` to clients, after -//! receiving a request. -use std::any::{Any, TypeId}; -use std::marker::PhantomData; -use std::mem; -use std::io::{self, Write}; -use std::ptr; -use std::thread; - -use time::now_utc; - -use header; -use http::h1::{LINE_ENDING, HttpWriter}; -use http::h1::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter, EmptyWriter}; -use status; -use net::{Fresh, Streaming}; -use version; - - -/// The outgoing half for a Tcp connection, created by a `Server` and given to a `Handler`. -/// -/// The default `StatusCode` for a `Response` is `200 OK`. -/// -/// There is a `Drop` implementation for `Response` that will automatically -/// write the head and flush the body, if the handler has not already done so, -/// so that the server doesn't accidentally leave dangling requests. -#[derive(Debug)] -pub struct Response<'a, W: Any = Fresh> { - /// The HTTP version of this response. - pub version: version::HttpVersion, - // Stream the Response is writing to, not accessible through UnwrittenResponse - body: HttpWriter<&'a mut (Write + 'a)>, - // The status code for the request. - status: status::StatusCode, - // The outgoing headers on this response. - headers: &'a mut header::Headers, - - _writing: PhantomData -} - -impl<'a, W: Any> Response<'a, W> { - /// The status of this response. - #[inline] - pub fn status(&self) -> status::StatusCode { self.status } - - /// The headers of this response. - #[inline] - pub fn headers(&self) -> &header::Headers { &*self.headers } - - /// Construct a Response from its constituent parts. - #[inline] - pub fn construct(version: version::HttpVersion, - body: HttpWriter<&'a mut (Write + 'a)>, - status: status::StatusCode, - headers: &'a mut header::Headers) -> Response<'a, Fresh> { - Response { - status: status, - version: version, - body: body, - headers: headers, - _writing: PhantomData, - } - } - - /// Deconstruct this Response into its constituent parts. - #[inline] - pub fn deconstruct(self) -> (version::HttpVersion, HttpWriter<&'a mut (Write + 'a)>, - status::StatusCode, &'a mut header::Headers) { - unsafe { - let parts = ( - self.version, - ptr::read(&self.body), - self.status, - ptr::read(&self.headers) - ); - mem::forget(self); - parts - } - } - - fn write_head(&mut self) -> io::Result { - debug!("writing head: {:?} {:?}", self.version, self.status); - try!(write!(&mut self.body, "{} {}\r\n", self.version, self.status)); - - if !self.headers.has::() { - self.headers.set(header::Date(header::HttpDate(now_utc()))); - } - - let body_type = match self.status { - status::StatusCode::NoContent | status::StatusCode::NotModified => Body::Empty, - c if c.class() == status::StatusClass::Informational => Body::Empty, - _ => if let Some(cl) = self.headers.get::() { - Body::Sized(**cl) - } else { - Body::Chunked - } - }; - - // can't do in match above, thanks borrowck - if body_type == Body::Chunked { - let encodings = match self.headers.get_mut::() { - Some(&mut header::TransferEncoding(ref mut encodings)) => { - //TODO: check if chunked is already in encodings. use HashSet? - encodings.push(header::Encoding::Chunked); - false - }, - None => true - }; - - if encodings { - self.headers.set::( - header::TransferEncoding(vec![header::Encoding::Chunked])) - } - } - - - debug!("headers [\n{:?}]", self.headers); - try!(write!(&mut self.body, "{}", self.headers)); - try!(write!(&mut self.body, "{}", LINE_ENDING)); - - Ok(body_type) - } -} - -impl<'a> Response<'a, Fresh> { - /// Creates a new Response that can be used to write to a network stream. - #[inline] - pub fn new(stream: &'a mut (Write + 'a), headers: &'a mut header::Headers) -> - Response<'a, Fresh> { - Response { - status: status::StatusCode::Ok, - version: version::HttpVersion::Http11, - headers: headers, - body: ThroughWriter(stream), - _writing: PhantomData, - } - } - - /// Writes the body and ends the response. - /// - /// This is a shortcut method for when you have a response with a fixed - /// size, and would only need a single `write` call normally. - /// - /// # Example - /// - /// ``` - /// # use hyper::server::Response; - /// fn handler(res: Response) { - /// res.send(b"Hello World!").unwrap(); - /// } - /// ``` - /// - /// The above is the same, but shorter, than the longer: - /// - /// ``` - /// # use hyper::server::Response; - /// use std::io::Write; - /// use hyper::header::ContentLength; - /// fn handler(mut res: Response) { - /// let body = b"Hello World!"; - /// res.headers_mut().set(ContentLength(body.len() as u64)); - /// let mut res = res.start().unwrap(); - /// res.write_all(body).unwrap(); - /// } - /// ``` - #[inline] - pub fn send(self, body: &[u8]) -> io::Result<()> { - self.headers.set(header::ContentLength(body.len() as u64)); - let mut stream = try!(self.start()); - try!(stream.write_all(body)); - stream.end() - } - - /// Consume this Response, writing the Headers and Status and - /// creating a Response - pub fn start(mut self) -> io::Result> { - let body_type = try!(self.write_head()); - let (version, body, status, headers) = self.deconstruct(); - let stream = match body_type { - Body::Chunked => ChunkedWriter(body.into_inner()), - Body::Sized(len) => SizedWriter(body.into_inner(), len), - Body::Empty => EmptyWriter(body.into_inner()), - }; - - // "copy" to change the phantom type - Ok(Response { - version: version, - body: stream, - status: status, - headers: headers, - _writing: PhantomData, - }) - } - /// Get a mutable reference to the status. - #[inline] - pub fn status_mut(&mut self) -> &mut status::StatusCode { &mut self.status } - - /// Get a mutable reference to the Headers. - #[inline] - pub fn headers_mut(&mut self) -> &mut header::Headers { self.headers } -} - - -impl<'a> Response<'a, Streaming> { - /// Flushes all writing of a response to the client. - #[inline] - pub fn end(self) -> io::Result<()> { - trace!("ending"); - let (_, body, _, _) = self.deconstruct(); - try!(body.end()); - Ok(()) - } -} - -impl<'a> Write for Response<'a, Streaming> { - #[inline] - fn write(&mut self, msg: &[u8]) -> io::Result { - debug!("write {:?} bytes", msg.len()); - self.body.write(msg) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - self.body.flush() - } -} - -#[derive(PartialEq)] -enum Body { - Chunked, - Sized(u64), - Empty, -} - -impl<'a, T: Any> Drop for Response<'a, T> { - fn drop(&mut self) { - if TypeId::of::() == TypeId::of::() { - if thread::panicking() { - self.status = status::StatusCode::InternalServerError; - } - - let mut body = match self.write_head() { - Ok(Body::Chunked) => ChunkedWriter(self.body.get_mut()), - Ok(Body::Sized(len)) => SizedWriter(self.body.get_mut(), len), - Ok(Body::Empty) => EmptyWriter(self.body.get_mut()), - Err(e) => { - debug!("error dropping request: {:?}", e); - return; - } - }; - end(&mut body); - } else { - end(&mut self.body); - }; - - - #[inline] - fn end(w: &mut W) { - match w.write(&[]) { - Ok(_) => match w.flush() { - Ok(_) => debug!("drop successful"), - Err(e) => debug!("error dropping request: {:?}", e) - }, - Err(e) => debug!("error dropping request: {:?}", e) - } - } - } -} - -#[cfg(test)] -mod tests { - use header::Headers; - use mock::MockStream; - use super::Response; - - macro_rules! lines { - ($s:ident = $($line:pat),+) => ({ - let s = String::from_utf8($s.write).unwrap(); - let mut lines = s.split_terminator("\r\n"); - - $( - match lines.next() { - Some($line) => (), - other => panic!("line mismatch: {:?} != {:?}", other, stringify!($line)) - } - )+ - - assert_eq!(lines.next(), None); - }) - } - - #[test] - fn test_fresh_start() { - let mut headers = Headers::new(); - let mut stream = MockStream::new(); - { - let res = Response::new(&mut stream, &mut headers); - res.start().unwrap().deconstruct(); - } - - lines! { stream = - "HTTP/1.1 200 OK", - _date, - _transfer_encoding, - "" - } - } - - #[test] - fn test_streaming_end() { - let mut headers = Headers::new(); - let mut stream = MockStream::new(); - { - let res = Response::new(&mut stream, &mut headers); - res.start().unwrap().end().unwrap(); - } - - lines! { stream = - "HTTP/1.1 200 OK", - _date, - _transfer_encoding, - "", - "0", - "" // empty zero body - } - } - - #[test] - fn test_fresh_drop() { - use status::StatusCode; - let mut headers = Headers::new(); - let mut stream = MockStream::new(); - { - let mut res = Response::new(&mut stream, &mut headers); - *res.status_mut() = StatusCode::NotFound; - } - - lines! { stream = - "HTTP/1.1 404 Not Found", - _date, - _transfer_encoding, - "", - "0", - "" // empty zero body - } - } - - // x86 windows msvc does not support unwinding - // See https://github.com/rust-lang/rust/issues/25869 - #[cfg(not(all(windows, target_arch="x86", target_env="msvc")))] - #[test] - fn test_fresh_drop_panicing() { - use std::thread; - use std::sync::{Arc, Mutex}; - - use status::StatusCode; - - let stream = MockStream::new(); - let stream = Arc::new(Mutex::new(stream)); - let inner_stream = stream.clone(); - let join_handle = thread::spawn(move || { - let mut headers = Headers::new(); - let mut stream = inner_stream.lock().unwrap(); - let mut res = Response::new(&mut *stream, &mut headers); - *res.status_mut() = StatusCode::NotFound; - - panic!("inside") - }); - - assert!(join_handle.join().is_err()); - - let stream = match stream.lock() { - Err(poisoned) => poisoned.into_inner().clone(), - Ok(_) => unreachable!() - }; - - lines! { stream = - "HTTP/1.1 500 Internal Server Error", - _date, - _transfer_encoding, - "", - "0", - "" // empty zero body - } - } - - - #[test] - fn test_streaming_drop() { - use std::io::Write; - use status::StatusCode; - let mut headers = Headers::new(); - let mut stream = MockStream::new(); - { - let mut res = Response::new(&mut stream, &mut headers); - *res.status_mut() = StatusCode::NotFound; - let mut stream = res.start().unwrap(); - stream.write_all(b"foo").unwrap(); - } - - lines! { stream = - "HTTP/1.1 404 Not Found", - _date, - _transfer_encoding, - "", - "3", - "foo", - "0", - "" // empty zero body - } - } - - #[test] - fn test_no_content() { - use status::StatusCode; - let mut headers = Headers::new(); - let mut stream = MockStream::new(); - { - let mut res = Response::new(&mut stream, &mut headers); - *res.status_mut() = StatusCode::NoContent; - res.start().unwrap(); - } - - lines! { stream = - "HTTP/1.1 204 No Content", - _date, - "" - } - } -} diff --git a/third_party/rust/hyper/src/server/tcp.rs b/third_party/rust/hyper/src/server/tcp.rs new file mode 100644 index 000000000000..462b3d3da614 --- /dev/null +++ b/third_party/rust/hyper/src/server/tcp.rs @@ -0,0 +1,247 @@ +use std::fmt; +use std::io; +use std::net::{SocketAddr, TcpListener as StdTcpListener}; +use std::time::{Duration, Instant}; + +use futures::{Async, Future, Poll, Stream}; +use tokio_reactor::Handle; +use tokio_tcp::TcpListener; +use tokio_timer::Delay; + +use self::addr_stream::AddrStream; + +/// A stream of connections from binding to an address. +#[must_use = "streams do nothing unless polled"] +pub struct AddrIncoming { + addr: SocketAddr, + listener: TcpListener, + sleep_on_errors: bool, + tcp_keepalive_timeout: Option, + tcp_nodelay: bool, + timeout: Option, +} + +impl AddrIncoming { + pub(super) fn new(addr: &SocketAddr, handle: Option<&Handle>) -> ::Result { + let listener = if let Some(handle) = handle { + let std_listener = StdTcpListener::bind(addr) + .map_err(::Error::new_listen)?; + TcpListener::from_std(std_listener, handle) + .map_err(::Error::new_listen)? + } else { + TcpListener::bind(addr).map_err(::Error::new_listen)? + }; + + let addr = listener.local_addr().map_err(::Error::new_listen)?; + + Ok(AddrIncoming { + addr: addr, + listener: listener, + sleep_on_errors: true, + tcp_keepalive_timeout: None, + tcp_nodelay: false, + timeout: None, + }) + } + + /// Get the local address bound to this listener. + pub fn local_addr(&self) -> SocketAddr { + self.addr + } + + /// Set whether TCP keepalive messages are enabled on accepted connections. + /// + /// If `None` is specified, keepalive is disabled, otherwise the duration + /// specified will be the time to remain idle before sending TCP keepalive + /// probes. + pub fn set_keepalive(&mut self, keepalive: Option) -> &mut Self { + self.tcp_keepalive_timeout = keepalive; + self + } + + /// Set the value of `TCP_NODELAY` option for accepted connections. + pub fn set_nodelay(&mut self, enabled: bool) -> &mut Self { + self.tcp_nodelay = enabled; + self + } + + /// Set whether to sleep on accept errors. + /// + /// A possible scenario is that the process has hit the max open files + /// allowed, and so trying to accept a new connection will fail with + /// `EMFILE`. In some cases, it's preferable to just wait for some time, if + /// the application will likely close some files (or connections), and try + /// to accept the connection again. If this option is `true`, the error + /// will be logged at the `error` level, since it is still a big deal, + /// and then the listener will sleep for 1 second. + /// + /// In other cases, hitting the max open files should be treat similarly + /// to being out-of-memory, and simply error (and shutdown). Setting + /// this option to `false` will allow that. + /// + /// Default is `true`. + pub fn set_sleep_on_errors(&mut self, val: bool) { + self.sleep_on_errors = val; + } +} + +impl Stream for AddrIncoming { + // currently unnameable... + type Item = AddrStream; + type Error = ::std::io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + // Check if a previous timeout is active that was set by IO errors. + if let Some(ref mut to) = self.timeout { + match to.poll() { + Ok(Async::Ready(())) => {} + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(err) => { + error!("sleep timer error: {}", err); + } + } + } + self.timeout = None; + loop { + match self.listener.poll_accept() { + Ok(Async::Ready((socket, addr))) => { + if let Some(dur) = self.tcp_keepalive_timeout { + if let Err(e) = socket.set_keepalive(Some(dur)) { + trace!("error trying to set TCP keepalive: {}", e); + } + } + if let Err(e) = socket.set_nodelay(self.tcp_nodelay) { + trace!("error trying to set TCP nodelay: {}", e); + } + return Ok(Async::Ready(Some(AddrStream::new(socket, addr)))); + }, + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + if self.sleep_on_errors { + // Connection errors can be ignored directly, continue by + // accepting the next request. + if is_connection_error(&e) { + debug!("accepted connection already errored: {}", e); + continue; + } + // Sleep 1s. + let delay = Instant::now() + Duration::from_secs(1); + let mut timeout = Delay::new(delay); + + match timeout.poll() { + Ok(Async::Ready(())) => { + // Wow, it's been a second already? Ok then... + error!("accept error: {}", e); + continue + }, + Ok(Async::NotReady) => { + error!("accept error: {}", e); + self.timeout = Some(timeout); + return Ok(Async::NotReady); + }, + Err(timer_err) => { + error!("couldn't sleep on error, timer error: {}", timer_err); + return Err(e); + } + } + } else { + return Err(e); + } + }, + } + } + } +} + +/// This function defines errors that are per-connection. Which basically +/// means that if we get this error from `accept()` system call it means +/// next connection might be ready to be accepted. +/// +/// All other errors will incur a timeout before next `accept()` is performed. +/// The timeout is useful to handle resource exhaustion errors like ENFILE +/// and EMFILE. Otherwise, could enter into tight loop. +fn is_connection_error(e: &io::Error) -> bool { + e.kind() == io::ErrorKind::ConnectionRefused || + e.kind() == io::ErrorKind::ConnectionAborted || + e.kind() == io::ErrorKind::ConnectionReset +} + +impl fmt::Debug for AddrIncoming { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("AddrIncoming") + .field("addr", &self.addr) + .field("sleep_on_errors", &self.sleep_on_errors) + .field("tcp_keepalive_timeout", &self.tcp_keepalive_timeout) + .field("tcp_nodelay", &self.tcp_nodelay) + .finish() + } +} + +mod addr_stream { + use std::io::{self, Read, Write}; + use std::net::SocketAddr; + use bytes::{Buf, BufMut}; + use futures::Poll; + use tokio_tcp::TcpStream; + use tokio_io::{AsyncRead, AsyncWrite}; + + + #[derive(Debug)] + pub struct AddrStream { + inner: TcpStream, + pub(super) remote_addr: SocketAddr, + } + + impl AddrStream { + pub(super) fn new(tcp: TcpStream, addr: SocketAddr) -> AddrStream { + AddrStream { + inner: tcp, + remote_addr: addr, + } + } + } + + impl Read for AddrStream { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } + } + + impl Write for AddrStream { + #[inline] + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { + // TcpStream::flush is a noop, so skip calling it... + Ok(()) + } + } + + impl AsyncRead for AddrStream { + #[inline] + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } + + #[inline] + fn read_buf(&mut self, buf: &mut B) -> Poll { + self.inner.read_buf(buf) + } + } + + impl AsyncWrite for AddrStream { + #[inline] + fn shutdown(&mut self) -> Poll<(), io::Error> { + AsyncWrite::shutdown(&mut self.inner) + } + + #[inline] + fn write_buf(&mut self, buf: &mut B) -> Poll { + self.inner.write_buf(buf) + } + } +} diff --git a/third_party/rust/hyper/src/service/mod.rs b/third_party/rust/hyper/src/service/mod.rs new file mode 100644 index 000000000000..534519df9f2c --- /dev/null +++ b/third_party/rust/hyper/src/service/mod.rs @@ -0,0 +1,35 @@ +//! Services and NewServices +//! +//! - A [`Service`](Service) is a trait representing an asynchronous function +//! of a request to a response. It's similar to +//! `async fn(Request) -> Result`. +//! - A [`NewService`](NewService) is a trait creating specific instances of a +//! `Service`. +//! +//! These types are conceptually similar to those in +//! [tower](https://crates.io/crates/tower), while being specific to hyper. +//! +//! # Service +//! +//! In hyper, especially in the server setting, a `Service` is usually bound +//! to a single connection. It defines how to respond to **all** requests that +//! connection will receive. +//! +//! While it's possible to implement `Service` for a type manually, the helpers +//! [`service_fn`](service_fn) and [`service_fn_ok`](service_fn_ok) should be +//! sufficient for most cases. +//! +//! # NewService +//! +//! Since a `Service` is bound to a single connection, a [`Server`](::Server) +//! needs a way to make them as it accepts connections. This is what a +//! `NewService` does. +//! +//! Resources that need to be shared by all `Service`s can be put into a +//! `NewService`, and then passed to individual `Service`s when `new_service` +//! is called. +mod new_service; +mod service; + +pub use self::new_service::{NewService}; +pub use self::service::{service_fn, service_fn_ok, Service}; diff --git a/third_party/rust/hyper/src/service/new_service.rs b/third_party/rust/hyper/src/service/new_service.rs new file mode 100644 index 000000000000..37a7dbe6253c --- /dev/null +++ b/third_party/rust/hyper/src/service/new_service.rs @@ -0,0 +1,55 @@ +use std::error::Error as StdError; + +use futures::{Future, IntoFuture}; + +use body::Payload; +use super::Service; + +/// An asynchronous constructor of `Service`s. +pub trait NewService { + /// The `Payload` body of the `http::Request`. + type ReqBody: Payload; + + /// The `Payload` body of the `http::Response`. + type ResBody: Payload; + + /// The error type that can be returned by `Service`s. + type Error: Into>; + + /// The resolved `Service` from `new_service()`. + type Service: Service< + ReqBody=Self::ReqBody, + ResBody=Self::ResBody, + Error=Self::Error, + >; + + /// The future returned from `new_service` of a `Service`. + type Future: Future; + + /// The error type that can be returned when creating a new `Service`. + type InitError: Into>; + + /// Create a new `Service`. + fn new_service(&self) -> Self::Future; +} + +impl NewService for F +where + F: Fn() -> R, + R: IntoFuture, + R::Error: Into>, + S: Service, +{ + type ReqBody = S::ReqBody; + type ResBody = S::ResBody; + type Error = S::Error; + type Service = S; + type Future = R::Future; + type InitError = R::Error; + + + fn new_service(&self) -> Self::Future { + (*self)().into_future() + } +} + diff --git a/third_party/rust/hyper/src/service/service.rs b/third_party/rust/hyper/src/service/service.rs new file mode 100644 index 000000000000..0f9d7e20201c --- /dev/null +++ b/third_party/rust/hyper/src/service/service.rs @@ -0,0 +1,165 @@ +use std::error::Error as StdError; +use std::fmt; +use std::marker::PhantomData; + +use futures::{future, Future, IntoFuture}; + +use body::Payload; +use common::Never; +use ::{Request, Response}; + +/// An asynchronous function from `Request` to `Response`. +pub trait Service { + /// The `Payload` body of the `http::Request`. + type ReqBody: Payload; + + /// The `Payload` body of the `http::Response`. + type ResBody: Payload; + + /// The error type that can occur within this `Service`. + /// + /// Note: Returning an `Error` to a hyper server will cause the connection + /// to be abruptly aborted. In most cases, it is better to return a `Response` + /// with a 4xx or 5xx status code. + type Error: Into>; + + /// The `Future` returned by this `Service`. + type Future: Future, Error=Self::Error>; + + /// Calls this `Service` with a request, returning a `Future` of the response. + fn call(&mut self, req: Request) -> Self::Future; +} + + +/// Create a `Service` from a function. +/// +/// # Example +/// +/// ```rust +/// use hyper::{Body, Request, Response, Version}; +/// use hyper::service::service_fn; +/// +/// let service = service_fn(|req: Request| { +/// if req.version() == Version::HTTP_11 { +/// Ok(Response::new(Body::from("Hello World"))) +/// } else { +/// // Note: it's usually better to return a Response +/// // with an appropriate StatusCode instead of an Err. +/// Err("not HTTP/1.1, abort connection") +/// } +/// }); +/// ``` +pub fn service_fn(f: F) -> ServiceFn +where + F: Fn(Request) -> S, + S: IntoFuture, +{ + ServiceFn { + f, + _req: PhantomData, + } +} + +/// Create a `Service` from a function that never errors. +/// +/// # Example +/// +/// ```rust +/// use hyper::{Body, Request, Response}; +/// use hyper::service::service_fn_ok; +/// +/// let service = service_fn_ok(|req: Request| { +/// println!("request: {} {}", req.method(), req.uri()); +/// Response::new(Body::from("Hello World")) +/// }); +/// ``` +pub fn service_fn_ok(f: F) -> ServiceFnOk +where + F: Fn(Request) -> Response, + S: Payload, +{ + ServiceFnOk { + f, + _req: PhantomData, + } +} + +// Not exported from crate as this will likely be replaced with `impl Service`. +pub struct ServiceFn { + f: F, + _req: PhantomData, +} + +impl Service for ServiceFn +where + F: Fn(Request) -> Ret, + ReqBody: Payload, + Ret: IntoFuture>, + Ret::Error: Into>, + ResBody: Payload, +{ + type ReqBody = ReqBody; + type ResBody = ResBody; + type Error = Ret::Error; + type Future = Ret::Future; + + fn call(&mut self, req: Request) -> Self::Future { + (self.f)(req).into_future() + } +} + +impl IntoFuture for ServiceFn { + type Future = future::FutureResult; + type Item = Self; + type Error = Never; + + fn into_future(self) -> Self::Future { + future::ok(self) + } +} + +impl fmt::Debug for ServiceFn { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("impl Service") + .finish() + } +} + +// Not exported from crate as this will likely be replaced with `impl Service`. +pub struct ServiceFnOk { + f: F, + _req: PhantomData, +} + +impl Service for ServiceFnOk +where + F: Fn(Request) -> Response, + ReqBody: Payload, + ResBody: Payload, +{ + type ReqBody = ReqBody; + type ResBody = ResBody; + type Error = Never; + type Future = future::FutureResult, Never>; + + fn call(&mut self, req: Request) -> Self::Future { + future::ok((self.f)(req)) + } +} + +impl IntoFuture for ServiceFnOk { + type Future = future::FutureResult; + type Item = Self; + type Error = Never; + + fn into_future(self) -> Self::Future { + future::ok(self) + } +} + +impl fmt::Debug for ServiceFnOk { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("impl Service") + .finish() + } +} diff --git a/third_party/rust/hyper/src/status.rs b/third_party/rust/hyper/src/status.rs deleted file mode 100644 index 5435182559c9..000000000000 --- a/third_party/rust/hyper/src/status.rs +++ /dev/null @@ -1,748 +0,0 @@ -//! HTTP status codes -use std::fmt; -use std::cmp::Ordering; - -// shamelessly lifted from Teepee. I tried a few schemes, this really -// does seem like the best. Improved scheme to support arbitrary status codes. - -/// An HTTP status code (`status-code` in RFC 7230 et al.). -/// -/// This enum contains all common status codes and an Unregistered -/// extension variant. It allows status codes in the range [0, 65535], as any -/// `u16` integer may be used as a status code for XHR requests. It is -/// recommended to only use values between [100, 599], since only these are -/// defined as valid status codes with a status class by HTTP. -/// -/// If you encounter a status code that you do not know how to deal with, you -/// should treat it as the `x00` status code—e.g. for code 123, treat it as -/// 100 (Continue). This can be achieved with -/// `self.class().default_code()`: -/// -/// ```rust -/// # use hyper::status::StatusCode; -/// let status = StatusCode::Unregistered(123); -/// assert_eq!(status.class().default_code(), StatusCode::Continue); -/// ``` -/// -/// IANA maintain the [Hypertext Transfer Protocol (HTTP) Status Code -/// Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) which is -/// the source for this enum (with one exception, 418 I'm a teapot, which is -/// inexplicably not in the register). -#[derive(Debug, Hash)] -pub enum StatusCode { - /// 100 Continue - /// [[RFC7231, Section 6.2.1](https://tools.ietf.org/html/rfc7231#section-6.2.1)] - Continue, - /// 101 Switching Protocols - /// [[RFC7231, Section 6.2.2](https://tools.ietf.org/html/rfc7231#section-6.2.2)] - SwitchingProtocols, - /// 102 Processing - /// [[RFC2518](https://tools.ietf.org/html/rfc2518)] - Processing, - - /// 200 OK - /// [[RFC7231, Section 6.3.1](https://tools.ietf.org/html/rfc7231#section-6.3.1)] - Ok, - /// 201 Created - /// [[RFC7231, Section 6.3.2](https://tools.ietf.org/html/rfc7231#section-6.3.2)] - Created, - /// 202 Accepted - /// [[RFC7231, Section 6.3.3](https://tools.ietf.org/html/rfc7231#section-6.3.3)] - Accepted, - /// 203 Non-Authoritative Information - /// [[RFC7231, Section 6.3.4](https://tools.ietf.org/html/rfc7231#section-6.3.4)] - NonAuthoritativeInformation, - /// 204 No Content - /// [[RFC7231, Section 6.3.5](https://tools.ietf.org/html/rfc7231#section-6.3.5)] - NoContent, - /// 205 Reset Content - /// [[RFC7231, Section 6.3.6](https://tools.ietf.org/html/rfc7231#section-6.3.6)] - ResetContent, - /// 206 Partial Content - /// [[RFC7233, Section 4.1](https://tools.ietf.org/html/rfc7233#section-4.1)] - PartialContent, - /// 207 Multi-Status - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - MultiStatus, - /// 208 Already Reported - /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] - AlreadyReported, - - /// 226 IM Used - /// [[RFC3229](https://tools.ietf.org/html/rfc3229)] - ImUsed, - - /// 300 Multiple Choices - /// [[RFC7231, Section 6.4.1](https://tools.ietf.org/html/rfc7231#section-6.4.1)] - MultipleChoices, - /// 301 Moved Permanently - /// [[RFC7231, Section 6.4.2](https://tools.ietf.org/html/rfc7231#section-6.4.2)] - MovedPermanently, - /// 302 Found - /// [[RFC7231, Section 6.4.3](https://tools.ietf.org/html/rfc7231#section-6.4.3)] - Found, - /// 303 See Other - /// [[RFC7231, Section 6.4.4](https://tools.ietf.org/html/rfc7231#section-6.4.4)] - SeeOther, - /// 304 Not Modified - /// [[RFC7232, Section 4.1](https://tools.ietf.org/html/rfc7232#section-4.1)] - NotModified, - /// 305 Use Proxy - /// [[RFC7231, Section 6.4.5](https://tools.ietf.org/html/rfc7231#section-6.4.5)] - UseProxy, - /// 307 Temporary Redirect - /// [[RFC7231, Section 6.4.7](https://tools.ietf.org/html/rfc7231#section-6.4.7)] - TemporaryRedirect, - /// 308 Permanent Redirect - /// [[RFC7238](https://tools.ietf.org/html/rfc7238)] - PermanentRedirect, - - /// 400 Bad Request - /// [[RFC7231, Section 6.5.1](https://tools.ietf.org/html/rfc7231#section-6.5.1)] - BadRequest, - /// 401 Unauthorized - /// [[RFC7235, Section 3.1](https://tools.ietf.org/html/rfc7235#section-3.1)] - Unauthorized, - /// 402 Payment Required - /// [[RFC7231, Section 6.5.2](https://tools.ietf.org/html/rfc7231#section-6.5.2)] - PaymentRequired, - /// 403 Forbidden - /// [[RFC7231, Section 6.5.3](https://tools.ietf.org/html/rfc7231#section-6.5.3)] - Forbidden, - /// 404 Not Found - /// [[RFC7231, Section 6.5.4](https://tools.ietf.org/html/rfc7231#section-6.5.4)] - NotFound, - /// 405 Method Not Allowed - /// [[RFC7231, Section 6.5.5](https://tools.ietf.org/html/rfc7231#section-6.5.5)] - MethodNotAllowed, - /// 406 Not Acceptable - /// [[RFC7231, Section 6.5.6](https://tools.ietf.org/html/rfc7231#section-6.5.6)] - NotAcceptable, - /// 407 Proxy Authentication Required - /// [[RFC7235, Section 3.2](https://tools.ietf.org/html/rfc7235#section-3.2)] - ProxyAuthenticationRequired, - /// 408 Request Timeout - /// [[RFC7231, Section 6.5.7](https://tools.ietf.org/html/rfc7231#section-6.5.7)] - RequestTimeout, - /// 409 Conflict - /// [[RFC7231, Section 6.5.8](https://tools.ietf.org/html/rfc7231#section-6.5.8)] - Conflict, - /// 410 Gone - /// [[RFC7231, Section 6.5.9](https://tools.ietf.org/html/rfc7231#section-6.5.9)] - Gone, - /// 411 Length Required - /// [[RFC7231, Section 6.5.10](https://tools.ietf.org/html/rfc7231#section-6.5.10)] - LengthRequired, - /// 412 Precondition Failed - /// [[RFC7232, Section 4.2](https://tools.ietf.org/html/rfc7232#section-4.2)] - PreconditionFailed, - /// 413 Payload Too Large - /// [[RFC7231, Section 6.5.11](https://tools.ietf.org/html/rfc7231#section-6.5.11)] - PayloadTooLarge, - /// 414 URI Too Long - /// [[RFC7231, Section 6.5.12](https://tools.ietf.org/html/rfc7231#section-6.5.12)] - UriTooLong, - /// 415 Unsupported Media Type - /// [[RFC7231, Section 6.5.13](https://tools.ietf.org/html/rfc7231#section-6.5.13)] - UnsupportedMediaType, - /// 416 Range Not Satisfiable - /// [[RFC7233, Section 4.4](https://tools.ietf.org/html/rfc7233#section-4.4)] - RangeNotSatisfiable, - /// 417 Expectation Failed - /// [[RFC7231, Section 6.5.14](https://tools.ietf.org/html/rfc7231#section-6.5.14)] - ExpectationFailed, - /// 418 I'm a teapot - /// [curiously, not registered by IANA, but [RFC2324](https://tools.ietf.org/html/rfc2324)] - ImATeapot, - - /// 421 Misdirected Request - /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) - MisdirectedRequest, - /// 422 Unprocessable Entity - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - UnprocessableEntity, - /// 423 Locked - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - Locked, - /// 424 Failed Dependency - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - FailedDependency, - - /// 426 Upgrade Required - /// [[RFC7231, Section 6.5.15](https://tools.ietf.org/html/rfc7231#section-6.5.15)] - UpgradeRequired, - - /// 428 Precondition Required - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - PreconditionRequired, - /// 429 Too Many Requests - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - TooManyRequests, - - /// 431 Request Header Fields Too Large - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - RequestHeaderFieldsTooLarge, - - /// 451 Unavailable For Legal Reasons - /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] - UnavailableForLegalReasons, - - /// 500 Internal Server Error - /// [[RFC7231, Section 6.6.1](https://tools.ietf.org/html/rfc7231#section-6.6.1)] - InternalServerError, - /// 501 Not Implemented - /// [[RFC7231, Section 6.6.2](https://tools.ietf.org/html/rfc7231#section-6.6.2)] - NotImplemented, - /// 502 Bad Gateway - /// [[RFC7231, Section 6.6.3](https://tools.ietf.org/html/rfc7231#section-6.6.3)] - BadGateway, - /// 503 Service Unavailable - /// [[RFC7231, Section 6.6.4](https://tools.ietf.org/html/rfc7231#section-6.6.4)] - ServiceUnavailable, - /// 504 Gateway Timeout - /// [[RFC7231, Section 6.6.5](https://tools.ietf.org/html/rfc7231#section-6.6.5)] - GatewayTimeout, - /// 505 HTTP Version Not Supported - /// [[RFC7231, Section 6.6.6](https://tools.ietf.org/html/rfc7231#section-6.6.6)] - HttpVersionNotSupported, - /// 506 Variant Also Negotiates - /// [[RFC2295](https://tools.ietf.org/html/rfc2295)] - VariantAlsoNegotiates, - /// 507 Insufficient Storage - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - InsufficientStorage, - /// 508 Loop Detected - /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] - LoopDetected, - - /// 510 Not Extended - /// [[RFC2774](https://tools.ietf.org/html/rfc2774)] - NotExtended, - /// 511 Network Authentication Required - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - NetworkAuthenticationRequired, - - /// A status code not in the IANA HTTP status code registry or very well known - // `ImATeapot` is not registered. - Unregistered(u16), -} - -impl StatusCode { - - #[doc(hidden)] - pub fn from_u16(n: u16) -> StatusCode { - match n { - 100 => StatusCode::Continue, - 101 => StatusCode::SwitchingProtocols, - 102 => StatusCode::Processing, - 200 => StatusCode::Ok, - 201 => StatusCode::Created, - 202 => StatusCode::Accepted, - 203 => StatusCode::NonAuthoritativeInformation, - 204 => StatusCode::NoContent, - 205 => StatusCode::ResetContent, - 206 => StatusCode::PartialContent, - 207 => StatusCode::MultiStatus, - 208 => StatusCode::AlreadyReported, - 226 => StatusCode::ImUsed, - 300 => StatusCode::MultipleChoices, - 301 => StatusCode::MovedPermanently, - 302 => StatusCode::Found, - 303 => StatusCode::SeeOther, - 304 => StatusCode::NotModified, - 305 => StatusCode::UseProxy, - 307 => StatusCode::TemporaryRedirect, - 308 => StatusCode::PermanentRedirect, - 400 => StatusCode::BadRequest, - 401 => StatusCode::Unauthorized, - 402 => StatusCode::PaymentRequired, - 403 => StatusCode::Forbidden, - 404 => StatusCode::NotFound, - 405 => StatusCode::MethodNotAllowed, - 406 => StatusCode::NotAcceptable, - 407 => StatusCode::ProxyAuthenticationRequired, - 408 => StatusCode::RequestTimeout, - 409 => StatusCode::Conflict, - 410 => StatusCode::Gone, - 411 => StatusCode::LengthRequired, - 412 => StatusCode::PreconditionFailed, - 413 => StatusCode::PayloadTooLarge, - 414 => StatusCode::UriTooLong, - 415 => StatusCode::UnsupportedMediaType, - 416 => StatusCode::RangeNotSatisfiable, - 417 => StatusCode::ExpectationFailed, - 418 => StatusCode::ImATeapot, - 421 => StatusCode::MisdirectedRequest, - 422 => StatusCode::UnprocessableEntity, - 423 => StatusCode::Locked, - 424 => StatusCode::FailedDependency, - 426 => StatusCode::UpgradeRequired, - 428 => StatusCode::PreconditionRequired, - 429 => StatusCode::TooManyRequests, - 431 => StatusCode::RequestHeaderFieldsTooLarge, - 451 => StatusCode::UnavailableForLegalReasons, - 500 => StatusCode::InternalServerError, - 501 => StatusCode::NotImplemented, - 502 => StatusCode::BadGateway, - 503 => StatusCode::ServiceUnavailable, - 504 => StatusCode::GatewayTimeout, - 505 => StatusCode::HttpVersionNotSupported, - 506 => StatusCode::VariantAlsoNegotiates, - 507 => StatusCode::InsufficientStorage, - 508 => StatusCode::LoopDetected, - 510 => StatusCode::NotExtended, - 511 => StatusCode::NetworkAuthenticationRequired, - _ => StatusCode::Unregistered(n), - } - } - - #[doc(hidden)] - pub fn to_u16(&self) -> u16 { - match *self { - StatusCode::Continue => 100, - StatusCode::SwitchingProtocols => 101, - StatusCode::Processing => 102, - StatusCode::Ok => 200, - StatusCode::Created => 201, - StatusCode::Accepted => 202, - StatusCode::NonAuthoritativeInformation => 203, - StatusCode::NoContent => 204, - StatusCode::ResetContent => 205, - StatusCode::PartialContent => 206, - StatusCode::MultiStatus => 207, - StatusCode::AlreadyReported => 208, - StatusCode::ImUsed => 226, - StatusCode::MultipleChoices => 300, - StatusCode::MovedPermanently => 301, - StatusCode::Found => 302, - StatusCode::SeeOther => 303, - StatusCode::NotModified => 304, - StatusCode::UseProxy => 305, - StatusCode::TemporaryRedirect => 307, - StatusCode::PermanentRedirect => 308, - StatusCode::BadRequest => 400, - StatusCode::Unauthorized => 401, - StatusCode::PaymentRequired => 402, - StatusCode::Forbidden => 403, - StatusCode::NotFound => 404, - StatusCode::MethodNotAllowed => 405, - StatusCode::NotAcceptable => 406, - StatusCode::ProxyAuthenticationRequired => 407, - StatusCode::RequestTimeout => 408, - StatusCode::Conflict => 409, - StatusCode::Gone => 410, - StatusCode::LengthRequired => 411, - StatusCode::PreconditionFailed => 412, - StatusCode::PayloadTooLarge => 413, - StatusCode::UriTooLong => 414, - StatusCode::UnsupportedMediaType => 415, - StatusCode::RangeNotSatisfiable => 416, - StatusCode::ExpectationFailed => 417, - StatusCode::ImATeapot => 418, - StatusCode::MisdirectedRequest => 421, - StatusCode::UnprocessableEntity => 422, - StatusCode::Locked => 423, - StatusCode::FailedDependency => 424, - StatusCode::UpgradeRequired => 426, - StatusCode::PreconditionRequired => 428, - StatusCode::TooManyRequests => 429, - StatusCode::RequestHeaderFieldsTooLarge => 431, - StatusCode::UnavailableForLegalReasons => 451, - StatusCode::InternalServerError => 500, - StatusCode::NotImplemented => 501, - StatusCode::BadGateway => 502, - StatusCode::ServiceUnavailable => 503, - StatusCode::GatewayTimeout => 504, - StatusCode::HttpVersionNotSupported => 505, - StatusCode::VariantAlsoNegotiates => 506, - StatusCode::InsufficientStorage => 507, - StatusCode::LoopDetected => 508, - StatusCode::NotExtended => 510, - StatusCode::NetworkAuthenticationRequired => 511, - StatusCode::Unregistered(n) => n, - } - } - - /// Get the standardised `reason-phrase` for this status code. - /// - /// This is mostly here for servers writing responses, but could potentially have application - /// at other times. - /// - /// The reason phrase is defined as being exclusively for human readers. You should avoid - /// deriving any meaning from it at all costs. - /// - /// Bear in mind also that in HTTP/2.0 the reason phrase is abolished from transmission, and so - /// this canonical reason phrase really is the only reason phrase you’ll find. - pub fn canonical_reason(&self) -> Option<&'static str> { - match *self { - StatusCode::Continue => Some("Continue"), - StatusCode::SwitchingProtocols => Some("Switching Protocols"), - StatusCode::Processing => Some("Processing"), - - StatusCode::Ok => Some("OK"), - StatusCode::Created => Some("Created"), - StatusCode::Accepted => Some("Accepted"), - StatusCode::NonAuthoritativeInformation => Some("Non-Authoritative Information"), - StatusCode::NoContent => Some("No Content"), - StatusCode::ResetContent => Some("Reset Content"), - StatusCode::PartialContent => Some("Partial Content"), - StatusCode::MultiStatus => Some("Multi-Status"), - StatusCode::AlreadyReported => Some("Already Reported"), - - StatusCode::ImUsed => Some("IM Used"), - - StatusCode::MultipleChoices => Some("Multiple Choices"), - StatusCode::MovedPermanently => Some("Moved Permanently"), - StatusCode::Found => Some("Found"), - StatusCode::SeeOther => Some("See Other"), - StatusCode::NotModified => Some("Not Modified"), - StatusCode::UseProxy => Some("Use Proxy"), - - StatusCode::TemporaryRedirect => Some("Temporary Redirect"), - StatusCode::PermanentRedirect => Some("Permanent Redirect"), - - StatusCode::BadRequest => Some("Bad Request"), - StatusCode::Unauthorized => Some("Unauthorized"), - StatusCode::PaymentRequired => Some("Payment Required"), - StatusCode::Forbidden => Some("Forbidden"), - StatusCode::NotFound => Some("Not Found"), - StatusCode::MethodNotAllowed => Some("Method Not Allowed"), - StatusCode::NotAcceptable => Some("Not Acceptable"), - StatusCode::ProxyAuthenticationRequired => Some("Proxy Authentication Required"), - StatusCode::RequestTimeout => Some("Request Timeout"), - StatusCode::Conflict => Some("Conflict"), - StatusCode::Gone => Some("Gone"), - StatusCode::LengthRequired => Some("Length Required"), - StatusCode::PreconditionFailed => Some("Precondition Failed"), - StatusCode::PayloadTooLarge => Some("Payload Too Large"), - StatusCode::UriTooLong => Some("URI Too Long"), - StatusCode::UnsupportedMediaType => Some("Unsupported Media Type"), - StatusCode::RangeNotSatisfiable => Some("Range Not Satisfiable"), - StatusCode::ExpectationFailed => Some("Expectation Failed"), - StatusCode::ImATeapot => Some("I'm a teapot"), - - StatusCode::MisdirectedRequest => Some("Misdirected Request"), - StatusCode::UnprocessableEntity => Some("Unprocessable Entity"), - StatusCode::Locked => Some("Locked"), - StatusCode::FailedDependency => Some("Failed Dependency"), - - StatusCode::UpgradeRequired => Some("Upgrade Required"), - - StatusCode::PreconditionRequired => Some("Precondition Required"), - StatusCode::TooManyRequests => Some("Too Many Requests"), - - StatusCode::RequestHeaderFieldsTooLarge => Some("Request Header Fields Too Large"), - - StatusCode::UnavailableForLegalReasons => Some("Unavailable For Legal Reasons"), - - StatusCode::InternalServerError => Some("Internal Server Error"), - StatusCode::NotImplemented => Some("Not Implemented"), - StatusCode::BadGateway => Some("Bad Gateway"), - StatusCode::ServiceUnavailable => Some("Service Unavailable"), - StatusCode::GatewayTimeout => Some("Gateway Timeout"), - StatusCode::HttpVersionNotSupported => Some("HTTP Version Not Supported"), - StatusCode::VariantAlsoNegotiates => Some("Variant Also Negotiates"), - StatusCode::InsufficientStorage => Some("Insufficient Storage"), - StatusCode::LoopDetected => Some("Loop Detected"), - - StatusCode::NotExtended => Some("Not Extended"), - StatusCode::NetworkAuthenticationRequired => Some("Network Authentication Required"), - StatusCode::Unregistered(..) => None - } - } - - /// Determine the class of a status code, based on its first digit. - pub fn class(&self) -> StatusClass { - match self.to_u16() { - 100...199 => StatusClass::Informational, - 200...299 => StatusClass::Success, - 300...399 => StatusClass::Redirection, - 400...499 => StatusClass::ClientError, - 500...599 => StatusClass::ServerError, - _ => StatusClass::NoClass, - } - } - - /// Check if class is Informational. - pub fn is_informational(&self) -> bool { - self.class() == StatusClass::Informational - } - - /// Check if class is Success. - pub fn is_success(&self) -> bool { - self.class() == StatusClass::Success - } - - /// Check if class is Redirection. - pub fn is_redirection(&self) -> bool { - self.class() == StatusClass::Redirection - } - - /// Check if class is ClientError. - pub fn is_client_error(&self) -> bool { - self.class() == StatusClass::ClientError - } - - /// Check if class is ServerError. - pub fn is_server_error(&self) -> bool { - self.class() == StatusClass::ServerError - } - - /// Check if class is NoClass - pub fn is_strange_status(&self) -> bool { - self.class() == StatusClass::NoClass - } -} - -impl Copy for StatusCode {} - -/// Formats the status code, *including* the canonical reason. -/// -/// ```rust -/// # use hyper::status::StatusCode::{ImATeapot, Unregistered}; -/// assert_eq!(format!("{}", ImATeapot), "418 I'm a teapot"); -/// assert_eq!(format!("{}", Unregistered(123)), -/// "123 "); -/// ``` -impl fmt::Display for StatusCode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} {}", self.to_u16(), - self.canonical_reason().unwrap_or("")) - } -} - -impl PartialEq for StatusCode { - #[inline] - fn eq(&self, other: &StatusCode) -> bool { - self.to_u16() == other.to_u16() - } -} - -impl Eq for StatusCode {} - -impl Clone for StatusCode { - #[inline] - fn clone(&self) -> StatusCode { - *self - } -} - -impl PartialOrd for StatusCode { - #[inline] - fn partial_cmp(&self, other: &StatusCode) -> Option { - self.to_u16().partial_cmp(&(other.to_u16())) - } -} - -impl Ord for StatusCode { - #[inline] - fn cmp(&self, other: &StatusCode) -> Ordering { - if *self < *other { - Ordering::Less - } else if *self > *other { - Ordering::Greater - } else { - Ordering::Equal - } - } -} - -/// The class of an HTTP `status-code`. -/// -/// [RFC 7231, section 6 (Response Status Codes)](https://tools.ietf.org/html/rfc7231#section-6): -/// -/// > The first digit of the status-code defines the class of response. -/// > The last two digits do not have any categorization role. -/// -/// And: -/// -/// > HTTP status codes are extensible. HTTP clients are not required to -/// > understand the meaning of all registered status codes, though such -/// > understanding is obviously desirable. However, a client MUST -/// > understand the class of any status code, as indicated by the first -/// > digit, and treat an unrecognized status code as being equivalent to -/// > the x00 status code of that class, with the exception that a -/// > recipient MUST NOT cache a response with an unrecognized status code. -/// > -/// > For example, if an unrecognized status code of 471 is received by a -/// > client, the client can assume that there was something wrong with its -/// > request and treat the response as if it had received a 400 (Bad -/// > Request) status code. The response message will usually contain a -/// > representation that explains the status. -/// -/// This can be used in cases where a status code’s meaning is unknown, also, -/// to get the appropriate *category* of status. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Copy)] -pub enum StatusClass { - /// 1xx (Informational): The request was received, continuing process - Informational, - - /// 2xx (Success): The request was successfully received, understood, and accepted - Success, - - /// 3xx (Redirection): Further action needs to be taken in order to complete the request - Redirection, - - /// 4xx (Client Error): The request contains bad syntax or cannot be fulfilled - ClientError, - - /// 5xx (Server Error): The server failed to fulfill an apparently valid request - ServerError, - - /// A status code lower than 100 or higher than 599. These codes do no belong to any class. - NoClass, -} - -impl StatusClass { - /// Get the default status code for the class. - /// - /// This produces the x00 status code; thus, for `ClientError` (4xx), for - /// example, this will produce `BadRequest` (400): - /// - /// ```rust - /// # use hyper::status::StatusClass::ClientError; - /// # use hyper::status::StatusCode::BadRequest; - /// assert_eq!(ClientError.default_code(), BadRequest); - /// ``` - /// - /// The use for this is outlined in [RFC 7231, section 6 (Response Status - /// Codes)](https://tools.ietf.org/html/rfc7231#section-6): - /// - /// > HTTP status codes are extensible. HTTP clients are not required to - /// > understand the meaning of all registered status codes, though such - /// > understanding is obviously desirable. However, a client MUST - /// > understand the class of any status code, as indicated by the first - /// > digit, and treat an unrecognized status code as being equivalent to - /// > the x00 status code of that class, with the exception that a - /// > recipient MUST NOT cache a response with an unrecognized status code. - /// > - /// > For example, if an unrecognized status code of 471 is received by a - /// > client, the client can assume that there was something wrong with its - /// > request and treat the response as if it had received a 400 (Bad - /// > Request) status code. The response message will usually contain a - /// > representation that explains the status. - /// - /// This is demonstrated thusly: - /// - /// ```rust - /// # use hyper::status::StatusCode::{Unregistered, BadRequest}; - /// // Suppose we have received this status code. - /// // You will never directly create an unregistered status code. - /// let status = Unregistered(471); - /// - /// // Uh oh! Don’t know what to do with it. - /// // Let’s fall back to the default: - /// let status = status.class().default_code(); - /// - /// // And look! That is 400 Bad Request. - /// assert_eq!(status, BadRequest); - /// // So now let’s treat it as that. - /// ``` - /// All status codes that do not map to an existing status class are matched - /// by a `NoClass`, variant that resolves to 200 (Ok) as default code. - /// This is a common handling for unknown status codes in major browsers. - pub fn default_code(&self) -> StatusCode { - match *self { - StatusClass::Informational => StatusCode::Continue, - StatusClass::Success => StatusCode::Ok, - StatusClass::Redirection => StatusCode::MultipleChoices, - StatusClass::ClientError => StatusCode::BadRequest, - StatusClass::ServerError => StatusCode::InternalServerError, - StatusClass::NoClass => StatusCode::Ok, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use super::StatusCode::*; - - // Check that the following entities are properly inter-connected: - // - numerical code - // - status code - // - default code (for the given status code) - // - canonical reason - fn validate(num: u16, status_code: StatusCode, default_code: StatusCode, reason: Option<&str>) { - assert_eq!(StatusCode::from_u16(num), status_code); - assert_eq!(status_code.to_u16(), num); - assert_eq!(status_code.class().default_code(), default_code); - assert_eq!(status_code.canonical_reason(), reason); - } - - #[test] - fn test_status_code() { - validate(99, Unregistered(99), Ok, None); - - validate(100, Continue, Continue, Some("Continue")); - validate(101, SwitchingProtocols, Continue, Some("Switching Protocols")); - validate(102, Processing, Continue, Some("Processing")); - - validate(200, Ok, Ok, Some("OK")); - validate(201, Created, Ok, Some("Created")); - validate(202, Accepted, Ok, Some("Accepted")); - validate(203, NonAuthoritativeInformation, Ok, Some("Non-Authoritative Information")); - validate(204, NoContent, Ok, Some("No Content")); - validate(205, ResetContent, Ok, Some("Reset Content")); - validate(206, PartialContent, Ok, Some("Partial Content")); - validate(207, MultiStatus, Ok, Some("Multi-Status")); - validate(208, AlreadyReported, Ok, Some("Already Reported")); - validate(226, ImUsed, Ok, Some("IM Used")); - - validate(300, MultipleChoices, MultipleChoices, Some("Multiple Choices")); - validate(301, MovedPermanently, MultipleChoices, Some("Moved Permanently")); - validate(302, Found, MultipleChoices, Some("Found")); - validate(303, SeeOther, MultipleChoices, Some("See Other")); - validate(304, NotModified, MultipleChoices, Some("Not Modified")); - validate(305, UseProxy, MultipleChoices, Some("Use Proxy")); - validate(307, TemporaryRedirect, MultipleChoices, Some("Temporary Redirect")); - validate(308, PermanentRedirect, MultipleChoices, Some("Permanent Redirect")); - - validate(400, BadRequest, BadRequest, Some("Bad Request")); - validate(401, Unauthorized, BadRequest, Some("Unauthorized")); - validate(402, PaymentRequired, BadRequest, Some("Payment Required")); - validate(403, Forbidden, BadRequest, Some("Forbidden")); - validate(404, NotFound, BadRequest, Some("Not Found")); - validate(405, MethodNotAllowed, BadRequest, Some("Method Not Allowed")); - validate(406, NotAcceptable, BadRequest, Some("Not Acceptable")); - validate(407, ProxyAuthenticationRequired, BadRequest, - Some("Proxy Authentication Required")); - validate(408, RequestTimeout, BadRequest, Some("Request Timeout")); - validate(409, Conflict, BadRequest, Some("Conflict")); - validate(410, Gone, BadRequest, Some("Gone")); - validate(411, LengthRequired, BadRequest, Some("Length Required")); - validate(412, PreconditionFailed, BadRequest, Some("Precondition Failed")); - validate(413, PayloadTooLarge, BadRequest, Some("Payload Too Large")); - validate(414, UriTooLong, BadRequest, Some("URI Too Long")); - validate(415, UnsupportedMediaType, BadRequest, Some("Unsupported Media Type")); - validate(416, RangeNotSatisfiable, BadRequest, Some("Range Not Satisfiable")); - validate(417, ExpectationFailed, BadRequest, Some("Expectation Failed")); - validate(418, ImATeapot, BadRequest, Some("I'm a teapot")); - validate(421, MisdirectedRequest, BadRequest, Some("Misdirected Request")); - validate(422, UnprocessableEntity, BadRequest, Some("Unprocessable Entity")); - validate(423, Locked, BadRequest, Some("Locked")); - validate(424, FailedDependency, BadRequest, Some("Failed Dependency")); - validate(426, UpgradeRequired, BadRequest, Some("Upgrade Required")); - validate(428, PreconditionRequired, BadRequest, Some("Precondition Required")); - validate(429, TooManyRequests, BadRequest, Some("Too Many Requests")); - validate(431, RequestHeaderFieldsTooLarge, BadRequest, - Some("Request Header Fields Too Large")); - validate(451, UnavailableForLegalReasons, BadRequest, - Some("Unavailable For Legal Reasons")); - - validate(500, InternalServerError, InternalServerError, Some("Internal Server Error")); - validate(501, NotImplemented, InternalServerError, Some("Not Implemented")); - validate(502, BadGateway, InternalServerError, Some("Bad Gateway")); - validate(503, ServiceUnavailable, InternalServerError, Some("Service Unavailable")); - validate(504, GatewayTimeout, InternalServerError, Some("Gateway Timeout")); - validate(505, HttpVersionNotSupported, InternalServerError, - Some("HTTP Version Not Supported")); - validate(506, VariantAlsoNegotiates, InternalServerError, Some("Variant Also Negotiates")); - validate(507, InsufficientStorage, InternalServerError, Some("Insufficient Storage")); - validate(508, LoopDetected, InternalServerError, Some("Loop Detected")); - validate(510, NotExtended, InternalServerError, Some("Not Extended")); - validate(511, NetworkAuthenticationRequired, InternalServerError, - Some("Network Authentication Required")); - - } -} diff --git a/third_party/rust/hyper/src/upgrade.rs b/third_party/rust/hyper/src/upgrade.rs new file mode 100644 index 000000000000..4cac855728cf --- /dev/null +++ b/third_party/rust/hyper/src/upgrade.rs @@ -0,0 +1,256 @@ +//! HTTP Upgrades +//! +//! See [this example][example] showing how upgrades work with both +//! Clients and Servers. +//! +//! [example]: https://github.com/hyperium/hyper/blob/master/examples/upgrades.rs + +use std::any::TypeId; +use std::error::Error as StdError; +use std::fmt; +use std::io::{self, Read, Write}; + +use bytes::{Buf, BufMut, Bytes}; +use futures::{Async, Future, Poll}; +use futures::sync::oneshot; +use tokio_io::{AsyncRead, AsyncWrite}; + +use common::io::Rewind; + +/// An upgraded HTTP connection. +/// +/// This type holds a trait object internally of the original IO that +/// was used to speak HTTP before the upgrade. It can be used directly +/// as a `Read` or `Write` for convenience. +/// +/// Alternatively, if the exact type is known, this can be deconstructed +/// into its parts. +pub struct Upgraded { + io: Rewind>, +} + +/// A future for a possible HTTP upgrade. +/// +/// If no upgrade was available, or it doesn't succeed, yields an `Error`. +pub struct OnUpgrade { + rx: Option>>, +} + +/// The deconstructed parts of an [`Upgraded`](Upgraded) type. +/// +/// Includes the original IO type, and a read buffer of bytes that the +/// HTTP state machine may have already read before completing an upgrade. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used before the upgrade. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + +pub(crate) struct Pending { + tx: oneshot::Sender<::Result> +} + +/// Error cause returned when an upgrade was expected but canceled +/// for whatever reason. +/// +/// This likely means the actual `Conn` future wasn't polled and upgraded. +#[derive(Debug)] +struct UpgradeExpected(()); + +pub(crate) fn pending() -> (Pending, OnUpgrade) { + let (tx, rx) = oneshot::channel(); + ( + Pending { + tx, + }, + OnUpgrade { + rx: Some(rx), + }, + ) +} + +pub(crate) trait Io: AsyncRead + AsyncWrite + 'static { + fn __hyper_type_id(&self) -> TypeId { + TypeId::of::() + } +} + +impl Io + Send { + fn __hyper_is(&self) -> bool { + let t = TypeId::of::(); + self.__hyper_type_id() == t + } + + fn __hyper_downcast(self: Box) -> Result, Box> { + if self.__hyper_is::() { + // Taken from `std::error::Error::downcast()`. + unsafe { + let raw: *mut Io = Box::into_raw(self); + Ok(Box::from_raw(raw as *mut T)) + } + } else { + Err(self) + } + } +} + +impl Io for T {} + +// ===== impl Upgraded ===== + +impl Upgraded { + pub(crate) fn new(io: Box, read_buf: Bytes) -> Self { + Upgraded { + io: Rewind::new_buffered(io, read_buf), + } + } + + /// Tries to downcast the internal trait object to the type passed. + /// + /// On success, returns the downcasted parts. On error, returns the + /// `Upgraded` back. + pub fn downcast(self) -> Result, Self> { + let (io, buf) = self.io.into_inner(); + match io.__hyper_downcast() { + Ok(t) => Ok(Parts { + io: *t, + read_buf: buf, + _inner: (), + }), + Err(io) => Err(Upgraded { + io: Rewind::new_buffered(io, buf), + }) + } + } +} + +impl Read for Upgraded { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.io.read(buf) + } +} + +impl Write for Upgraded { + #[inline] + fn write(&mut self, buf: &[u8]) -> io::Result { + self.io.write(buf) + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { + self.io.flush() + } +} + +impl AsyncRead for Upgraded { + #[inline] + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.io.prepare_uninitialized_buffer(buf) + } + + #[inline] + fn read_buf(&mut self, buf: &mut B) -> Poll { + self.io.read_buf(buf) + } +} + +impl AsyncWrite for Upgraded { + #[inline] + fn shutdown(&mut self) -> Poll<(), io::Error> { + AsyncWrite::shutdown(&mut self.io) + } + + #[inline] + fn write_buf(&mut self, buf: &mut B) -> Poll { + self.io.write_buf(buf) + } +} + +impl fmt::Debug for Upgraded { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Upgraded") + .finish() + } +} + +// ===== impl OnUpgrade ===== + +impl OnUpgrade { + pub(crate) fn none() -> Self { + OnUpgrade { + rx: None, + } + } + + pub(crate) fn is_none(&self) -> bool { + self.rx.is_none() + } +} + +impl Future for OnUpgrade { + type Item = Upgraded; + type Error = ::Error; + + fn poll(&mut self) -> Poll { + match self.rx { + Some(ref mut rx) => match rx.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(Ok(upgraded))) => Ok(Async::Ready(upgraded)), + Ok(Async::Ready(Err(err))) => Err(err), + Err(_oneshot_canceled) => Err( + ::Error::new_canceled(Some(UpgradeExpected(()))) + ), + }, + None => Err(::Error::new_user_no_upgrade()), + } + } +} + +impl fmt::Debug for OnUpgrade { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OnUpgrade") + .finish() + } +} + +// ===== impl Pending ===== + +impl Pending { + pub(crate) fn fulfill(self, upgraded: Upgraded) { + trace!("pending upgrade fulfill"); + let _ = self.tx.send(Ok(upgraded)); + } + + /// Don't fulfill the pending Upgrade, but instead signal that + /// upgrades are handled manually. + pub(crate) fn manual(self) { + trace!("pending upgrade handled manually"); + let _ = self.tx.send(Err(::Error::new_user_manual_upgrade())); + } +} + +// ===== impl UpgradeExpected ===== + +impl fmt::Display for UpgradeExpected { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.description()) + } +} + +impl StdError for UpgradeExpected { + fn description(&self) -> &str { + "upgrade expected but not completed" + } +} + diff --git a/third_party/rust/hyper/src/uri.rs b/third_party/rust/hyper/src/uri.rs deleted file mode 100644 index 26423756fd60..000000000000 --- a/third_party/rust/hyper/src/uri.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! HTTP RequestUris -use std::fmt::{Display, self}; -use std::str::FromStr; -use url::Url; -use url::ParseError as UrlError; - -use Error; - -/// The Request-URI of a Request's StartLine. -/// -/// From Section 5.3, Request Target: -/// > Once an inbound connection is obtained, the client sends an HTTP -/// > request message (Section 3) with a request-target derived from the -/// > target URI. There are four distinct formats for the request-target, -/// > depending on both the method being requested and whether the request -/// > is to a proxy. -/// > -/// > ```notrust -/// > request-target = origin-form -/// > / absolute-form -/// > / authority-form -/// > / asterisk-form -/// > ``` -#[derive(Debug, PartialEq, Clone)] -pub enum RequestUri { - /// The most common request target, an absolute path and optional query. - /// - /// For example, the line `GET /where?q=now HTTP/1.1` would parse the URI - /// as `AbsolutePath("/where?q=now".to_string())`. - AbsolutePath(String), - - /// An absolute URI. Used in conjunction with proxies. - /// - /// > When making a request to a proxy, other than a CONNECT or server-wide - /// > OPTIONS request (as detailed below), a client MUST send the target - /// > URI in absolute-form as the request-target. - /// - /// An example StartLine with an `AbsoluteUri` would be - /// `GET http://www.example.org/pub/WWW/TheProject.html HTTP/1.1`. - AbsoluteUri(Url), - - /// The authority form is only for use with `CONNECT` requests. - /// - /// An example StartLine: `CONNECT www.example.com:80 HTTP/1.1`. - Authority(String), - - /// The star is used to target the entire server, instead of a specific resource. - /// - /// This is only used for a server-wide `OPTIONS` request. - Star, -} - -impl FromStr for RequestUri { - type Err = Error; - - fn from_str(s: &str) -> Result { - let bytes = s.as_bytes(); - if bytes.is_empty() { - Err(Error::Uri(UrlError::RelativeUrlWithoutBase)) - } else if bytes == b"*" { - Ok(RequestUri::Star) - } else if bytes.starts_with(b"/") { - Ok(RequestUri::AbsolutePath(s.to_owned())) - } else if bytes.contains(&b'/') { - Ok(RequestUri::AbsoluteUri(try!(Url::parse(s)))) - } else { - let mut temp = "http://".to_owned(); - temp.push_str(s); - try!(Url::parse(&temp[..])); - todo!("compare vs u.authority()"); - Ok(RequestUri::Authority(s.to_owned())) - } - } -} - -impl Display for RequestUri { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - RequestUri::AbsolutePath(ref path) => f.write_str(path), - RequestUri::AbsoluteUri(ref url) => write!(f, "{}", url), - RequestUri::Authority(ref path) => f.write_str(path), - RequestUri::Star => f.write_str("*") - } - } -} - -#[test] -fn test_uri_fromstr() { - fn read(s: &str, result: RequestUri) { - assert_eq!(s.parse::().unwrap(), result); - } - - read("*", RequestUri::Star); - read("http://hyper.rs/", RequestUri::AbsoluteUri(Url::parse("http://hyper.rs/").unwrap())); - read("hyper.rs", RequestUri::Authority("hyper.rs".to_owned())); - read("/", RequestUri::AbsolutePath("/".to_owned())); -} - -#[test] -fn test_uri_display() { - fn assert_display(expected_string: &str, request_uri: RequestUri) { - assert_eq!(expected_string, format!("{}", request_uri)); - } - - assert_display("*", RequestUri::Star); - assert_display("http://hyper.rs/", RequestUri::AbsoluteUri(Url::parse("http://hyper.rs/").unwrap())); - assert_display("hyper.rs", RequestUri::Authority("hyper.rs".to_owned())); - assert_display("/", RequestUri::AbsolutePath("/".to_owned())); - -} diff --git a/third_party/rust/hyper/src/version.rs b/third_party/rust/hyper/src/version.rs deleted file mode 100644 index a7f12f2a0cad..000000000000 --- a/third_party/rust/hyper/src/version.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! HTTP Versions enum -//! -//! Instead of relying on typo-prone Strings, use expected HTTP versions as -//! the `HttpVersion` enum. -use std::fmt; -use std::str::FromStr; - -use error::Error; -use self::HttpVersion::{Http09, Http10, Http11, Http20}; - -/// Represents a version of the HTTP spec. -#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)] -pub enum HttpVersion { - /// `HTTP/0.9` - Http09, - /// `HTTP/1.0` - Http10, - /// `HTTP/1.1` - Http11, - /// `HTTP/2.0` - Http20, -} - -impl fmt::Display for HttpVersion { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(self.as_ref()) - } -} - -impl AsRef for HttpVersion { - fn as_ref(&self) -> &str { - match *self { - Http09 => "HTTP/0.9", - Http10 => "HTTP/1.0", - Http11 => "HTTP/1.1", - Http20 => "HTTP/2.0", - } - } -} - -impl FromStr for HttpVersion { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - "HTTP/0.9" => Ok(Http09), - "HTTP/1.0" => Ok(Http10), - "HTTP/1.1" => Ok(Http11), - "HTTP/2.0" => Ok(Http20), - _ => Err(Error::Version), - } - } -} diff --git a/third_party/rust/indexmap/.cargo-checksum.json b/third_party/rust/indexmap/.cargo-checksum.json new file mode 100644 index 000000000000..eefb0c9d2d39 --- /dev/null +++ b/third_party/rust/indexmap/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"0600e977b88280afee20ce3b56e64bf240038c2e180c8b9e1e02d34f7d61a654","Cargo.toml":"cda49f1901a9b83815525a0f559b0dbb38009f1f6021ce0eec2acd173d77e890","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ecc269ef87fd38a1d98e30bfac9ba964a9dbd9315c3770fed98d4d7cb5882055","README.rst":"37feee5ed30117b168ded40caeadba99e6fd7c45a9e64b69717ff785e9607724","benches/bench.rs":"ed5108d81b3cb3806bfc652e745ec0ec353766bdb9537d834606c6cb233bb27e","benches/faststring.rs":"724c5dc83e333c00d28393e346f3feb6a93c64d5c3d6d5a00f680b22b8259c31","src/equivalent.rs":"4d07c0ae8c8ff405fdbb45e6c891158d3fdcfedd47001e4cec090c79b5c56564","src/lib.rs":"483aa0587bd21942988ab094430179b65f80678734c4ac7cf734d10541a75b0e","src/macros.rs":"472c9ec707495e6de60b4e67c1b25f2201bb962fa6672fae32addde2eb4df376","src/map.rs":"8be24b99c4245463bff51bf79c3b1da6a301db6437c91529387f77f6dec35c0b","src/mutable_keys.rs":"2bf26fb36ad0ccd3c40b0f2cc4e5b8429e6627207f50fca07110a5011880a9dc","src/serde.rs":"16eae1b7fb2d8d0a030e221d180355b967aa6d48fe431a06bec75a182cb97f69","src/set.rs":"e1813c349167874390783267142681e0c451c5d111325107e35f62f264c76290","src/util.rs":"331f80b48387878caa01ab9cfd43927ea0c15129c6beb755d6b40abc6ada900f","tests/equivalent_trait.rs":"f48ef255e4bc6bc85ed11fd9bee4cc53759efb182e448d315f8d12af1f80b05d","tests/quick.rs":"865be7928ec19dbf58c896bd2a3fd848a756328a429934a9aad5a364298228f0","tests/serde.rs":"48f2a2184c819ffaa5c234ccea9c3bea1c58edf8ad9ada1476eedc179438d07d","tests/tests.rs":"c916ae9c5d08c042b7c3a0447ef3db5a1b9d37b3122fddace4235296a623725b"},"package":"08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220"} \ No newline at end of file diff --git a/third_party/rust/indexmap/.travis.yml b/third_party/rust/indexmap/.travis.yml new file mode 100644 index 000000000000..bc6b02ff86cd --- /dev/null +++ b/third_party/rust/indexmap/.travis.yml @@ -0,0 +1,22 @@ +language: rust +sudo: false +matrix: + include: + - rust: 1.18.0 + - rust: stable + env: + - FEATURES='serde-1' + - rust: beta + - rust: nightly + - rust: nightly + env: + - FEATURES='test_low_transition_point' +branches: + only: + - master +script: + - | + cargo build --verbose --features "$FEATURES" && + cargo test --verbose --features "$FEATURES" && + cargo test --release --verbose --features "$FEATURES" && + cargo doc --verbose --features "$FEATURES" diff --git a/third_party/rust/indexmap/Cargo.toml b/third_party/rust/indexmap/Cargo.toml new file mode 100644 index 000000000000..95e51cb1f802 --- /dev/null +++ b/third_party/rust/indexmap/Cargo.toml @@ -0,0 +1,58 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "indexmap" +version = "1.0.1" +authors = ["bluss", "Josh Stone "] +description = "A hash table with consistent order and fast iteration.\n\nThe indexmap is a hash table where the iteration order of the key-value\npairs is independent of the hash values of the keys. It has the usual\nhash table functionality, it preserves insertion order except after\nremovals, and it allows lookup of its elements by either hash table key\nor numerical index. A corresponding hash set type is also provided.\n\nThis crate was initially published under the name ordermap, but it was renamed to\nindexmap.\n" +documentation = "https://docs.rs/indexmap/" +keywords = ["hashmap"] +categories = ["data-structures"] +license = "Apache-2.0/MIT" +repository = "https://github.com/bluss/indexmap" +[package.metadata.docs.rs] +features = ["serde-1"] + +[package.metadata.release] +no-dev-version = true +[profile.bench] +debug = true + +[lib] +bench = false +[dependencies.serde] +version = "1.0" +optional = true +[dev-dependencies.fnv] +version = "1.0" + +[dev-dependencies.itertools] +version = "0.7.0" + +[dev-dependencies.lazy_static] +version = "1" + +[dev-dependencies.quickcheck] +version = "0.6" +default-features = false + +[dev-dependencies.rand] +version = "0.4" + +[dev-dependencies.serde_test] +version = "1.0.5" + +[features] +serde-1 = ["serde"] +test_debug = [] +test_low_transition_point = [] diff --git a/third_party/rust/indexmap/LICENSE-APACHE b/third_party/rust/indexmap/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/indexmap/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/bytes/LICENSE-MIT b/third_party/rust/indexmap/LICENSE-MIT similarity index 97% rename from third_party/rust/bytes/LICENSE-MIT rename to third_party/rust/indexmap/LICENSE-MIT index 6c296bec80df..8b8181068b3c 100644 --- a/third_party/rust/bytes/LICENSE-MIT +++ b/third_party/rust/indexmap/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2017 Carl Lerche +Copyright (c) 2016--2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/third_party/rust/indexmap/README.rst b/third_party/rust/indexmap/README.rst new file mode 100644 index 000000000000..81af405e401c --- /dev/null +++ b/third_party/rust/indexmap/README.rst @@ -0,0 +1,271 @@ + +Awesome hash table implementation in just Rust (stable, no unsafe code). + +Please read the `API documentation here`__ + +__ https://docs.rs/indexmap/ + +|build_status|_ |crates|_ + +.. |crates| image:: https://img.shields.io/crates/v/indexmap.svg +.. _crates: https://crates.io/crates/indexmap + +.. |build_status| image:: https://travis-ci.org/bluss/indexmap.svg +.. _build_status: https://travis-ci.org/bluss/indexmap + +Crate Name +========== + +This crate was originally released under the name ``ordermap``, but it was +renamed (with no change in functionality) to ``indexmap`` to better emphasize +its features. + +Background +========== + +This was inspired by Python 3.6's new dict implementation (which remembers +the insertion order and is fast to iterate, and is compact in memory). + +Some of those features were translated to Rust, and some were not. The result +was indexmap, a hash table that has following properties: + +- Order is **independent of hash function** and hash values of keys. +- Fast to iterate. +- Indexed in compact space. +- Preserves insertion order **as long** as you don't call ``.remove()``. +- Uses robin hood hashing just like Rust's libstd ``HashMap``. + + - It's the usual backwards shift deletion, but only on the index vector, so + it's cheaper because it's moving less memory around. + +Does not implement (Yet) +------------------------ + +- ``.reserve()`` exists but does not have a complete implementation + +Performance +----------- + +``IndexMap`` derives a couple of performance facts directly from how it is constructed, +which is roughly: + + Two vectors, the first, sparse, with hashes and key-value indices, and the + second, dense, the key-value pairs. + +- Iteration is very fast since it is on the dense key-values. +- Removal is fast since it moves memory areas only in the first vector, + and uses a single swap in the second vector. +- Lookup is fast-ish because the hashes and indices are densely stored. + Lookup also is slow-ish since hashes and key-value pairs are stored in + separate places. (Visible when cpu caches size is limiting.) + +- In practice, ``IndexMap`` has been tested out as the hashmap in rustc in PR45282_ and + the performance was roughly on par across the whole workload. +- If you want the properties of ``IndexMap``, or its strongest performance points + fits your workload, it might be the best hash table implementation. + +.. _PR45282: https://github.com/rust-lang/rust/pull/45282 + +Interesting Features +-------------------- + +- Insertion order is preserved (``.swap_remove()`` perturbs the order, like the method name says). +- Implements ``.pop() -> Option<(K, V)>`` in O(1) time. +- ``IndexMap::new()`` is empty and uses no allocation until you insert something. +- Lookup key-value pairs by index and vice versa. +- No ``unsafe``. +- Supports ``IndexMut``. + + +Where to go from here? +---------------------- + +- Ideas and PRs for how to implement insertion-order preserving remove (for example tombstones) + are welcome. The plan is to split the crate into two hash table implementations + a) the current compact index space version and b) the full insertion order version. + + +Ideas that we already did +------------------------- + +- It can be an *indexable* ordered map in the current fashion + (This was implemented in 0.2.0, for potential use as a graph datastructure). + +- Idea for more cache efficient lookup (This was implemented in 0.1.2). + + Current ``indices: Vec``. ``Pos`` is interpreted as ``(u32, u32)`` more + or less when ``.raw_capacity()`` fits in 32 bits. ``Pos`` then stores both the lower + half of the hash and the entry index. + This means that the hash values in ``Bucket`` don't need to be accessed + while scanning for an entry. + + +Recent Changes +============== + +- 1.0.1 + + - Document Rust version policy for the crate (see rustdoc) + +- 1.0.0 + + - This is the 1.0 release for ``indexmap``! (the crate and datastructure + formerly known as “ordermap”) + - ``OccupiedEntry::insert`` changed its signature, to use ``&mut self`` for + the method receiver, matching the equivalent method for a standard + ``HashMap``. Thanks to @dtolnay for finding this bug. + - The deprecated old names from ordermap were removed: ``OrderMap``, + ``OrderSet``, ``ordermap!{}``, ``orderset!{}``. Use the new ``IndexMap`` + etc names instead. + +- 0.4.1 + + - Renamed crate to ``indexmap``; the ``ordermap`` crate is now deprecated + and the types ``OrderMap/Set`` now have a deprecation notice. + +- 0.4.0 + + - This is the last release series for this ``ordermap`` under that name, + because the crate is **going to be renamed** to ``indexmap`` (with types + ``IndexMap``, ``IndexSet``) and no change in functionality! + - The map and its associated structs moved into the ``map`` submodule of the + crate, so that the map and set are symmetric + + + The iterators, ``Entry`` and other structs are now under ``ordermap::map::`` + + - Internally refactored ``OrderMap`` so that all the main algorithms + (insertion, lookup, removal etc) that don't use the ``S`` parameter (the + hasher) are compiled without depending on ``S``, which reduces generics bloat. + + - ``Entry`` no longer has a type parameter ``S``, which is just like + the standard ``HashMap``'s entry. + + - Minimum Rust version requirement increased to Rust 1.18 + +- 0.3.5 + + - Documentation improvements + +- 0.3.4 + + - The ``.retain()`` methods for ``OrderMap`` and ``OrderSet`` now + traverse the elements in order, and the retained elements **keep their order** + - Added new methods ``.sort_by()``, ``.sort_keys()`` to ``OrderMap`` and + ``.sort_by()``, ``.sort()`` to ``OrderSet``. These methods allow you to + sort the maps in place efficiently. + +- 0.3.3 + + - Document insertion behaviour better by @lucab + - Updated dependences (no feature changes) by @ignatenkobrain + +- 0.3.2 + + - Add ``OrderSet`` by @cuviper! + - ``OrderMap::drain`` is now (too) a double ended iterator. + +- 0.3.1 + + - In all ordermap iterators, forward the ``collect`` method to the underlying + iterator as well. + - Add crates.io categories. + +- 0.3.0 + + - The methods ``get_pair``, ``get_pair_index`` were both replaced by + ``get_full`` (and the same for the mutable case). + - Method ``swap_remove_pair`` replaced by ``swap_remove_full``. + - Add trait ``MutableKeys`` for opt-in mutable key access. Mutable key access + is only possible through the methods of this extension trait. + - Add new trait ``Equivalent`` for key equivalence. This extends the + ``Borrow`` trait mechanism for ``OrderMap::get`` in a backwards compatible + way, just some minor type inference related issues may become apparent. + See `#10`__ for more information. + - Implement ``Extend<(&K, &V)>`` by @xfix. + +__ https://github.com/bluss/ordermap/pull/10 + +- 0.2.13 + + - Fix deserialization to support custom hashers by @Techcable. + - Add methods ``.index()`` on the entry types by @garro95. + +- 0.2.12 + + - Add methods ``.with_hasher()``, ``.hasher()``. + +- 0.2.11 + + - Support ``ExactSizeIterator`` for the iterators. By @Binero. + - Use ``Box<[Pos]>`` internally, saving a word in the ``OrderMap`` struct. + - Serde support, with crate feature ``"serde-1"``. By @xfix. + +- 0.2.10 + + - Add iterator ``.drain(..)`` by @stevej. + +- 0.2.9 + + - Add method ``.is_empty()`` by @overvenus. + - Implement ``PartialEq, Eq`` by @overvenus. + - Add method ``.sorted_by()``. + +- 0.2.8 + + - Add iterators ``.values()`` and ``.values_mut()``. + - Fix compatibility with 32-bit platforms. + +- 0.2.7 + + - Add ``.retain()``. + +- 0.2.6 + + - Add ``OccupiedEntry::remove_entry`` and other minor entry methods, + so that it now has all the features of ``HashMap``'s entries. + +- 0.2.5 + + - Improved ``.pop()`` slightly. + +- 0.2.4 + + - Improved performance of ``.insert()`` (`#3`__) by @pczarn. + +__ https://github.com/bluss/ordermap/pull/3 + +- 0.2.3 + + - Generalize ``Entry`` for now, so that it works on hashmaps with non-default + hasher. However, there's a lingering compat issue since libstd ``HashMap`` + does not parameterize its entries by the hasher (``S`` typarm). + - Special case some iterator methods like ``.nth()``. + +- 0.2.2 + + - Disable the verbose ``Debug`` impl by default. + +- 0.2.1 + + - Fix doc links and clarify docs. + +- 0.2.0 + + - Add more ``HashMap`` methods & compat with its API. + - Experimental support for ``.entry()`` (the simplest parts of the API). + - Add ``.reserve()`` (placeholder impl). + - Add ``.remove()`` as synonym for ``.swap_remove()``. + - Changed ``.insert()`` to swap value if the entry already exists, and + return ``Option``. + - Experimental support as an *indexed* hash map! Added methods + ``.get_index()``, ``.get_index_mut()``, ``.swap_remove_index()``, + ``.get_pair_index()``, ``.get_pair_index_mut()``. + +- 0.1.2 + + - Implement the 32/32 split idea for ``Pos`` which improves cache utilization + and lookup performance. + +- 0.1.1 + + - Initial release. diff --git a/third_party/rust/indexmap/benches/bench.rs b/third_party/rust/indexmap/benches/bench.rs new file mode 100644 index 000000000000..b3e9915f1349 --- /dev/null +++ b/third_party/rust/indexmap/benches/bench.rs @@ -0,0 +1,745 @@ +#![feature(test)] +extern crate test; +extern crate rand; +extern crate fnv; +#[macro_use] +extern crate lazy_static; + +use std::hash::Hash; +use fnv::FnvHasher; +use std::hash::BuildHasherDefault; +type FnvBuilder = BuildHasherDefault; + +use test::Bencher; +use test::black_box; + +extern crate indexmap; + +use indexmap::IndexMap; + +use std::collections::HashMap; +use std::iter::FromIterator; + +use rand::{weak_rng, Rng}; + +#[bench] +fn new_hashmap(b: &mut Bencher) { + b.iter(|| { + HashMap::::new() + }); +} + +#[bench] +fn new_orderedmap(b: &mut Bencher) { + b.iter(|| { + IndexMap::::new() + }); +} + +#[bench] +fn with_capacity_10e5_hashmap(b: &mut Bencher) { + b.iter(|| { + HashMap::::with_capacity(10_000) + }); +} + +#[bench] +fn with_capacity_10e5_orderedmap(b: &mut Bencher) { + b.iter(|| { + IndexMap::::with_capacity(10_000) + }); +} + +#[bench] +fn insert_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_str_10_000(b: &mut Bencher) { + let c = 10_000; + let ss = Vec::from_iter((0..c).map(|x| x.to_string())); + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for key in &ss { + map.insert(&key[..], ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_str_10_000(b: &mut Bencher) { + let c = 10_000; + let ss = Vec::from_iter((0..c).map(|x| x.to_string())); + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for key in &ss { + map.insert(&key[..], ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { + let c = 10_000; + let value = [0u64; 10]; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for i in 0..c { + map.insert(i, value); + } + map + }); +} + +#[bench] +fn insert_orderedmap_int_bigvalue_10_000(b: &mut Bencher) { + let c = 10_000; + let value = [0u64; 10]; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for i in 0..c { + map.insert(i, value); + } + map + }); +} + +#[bench] +fn insert_hashmap_100_000(b: &mut Bencher) { + let c = 100_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_100_000(b: &mut Bencher) { + let c = 100_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn entry_hashmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.entry(x).or_insert(()); + } + map + }); +} + +#[bench] +fn entry_orderedmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.entry(x).or_insert(()); + } + map + }); +} + +#[bench] +fn iter_sum_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let len = c - c/10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + map.keys().sum::() + }); +} + +#[bench] +fn iter_sum_orderedmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let len = c - c/10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + map.keys().sum::() + }); +} + +#[bench] +fn iter_black_box_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let len = c - c/10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + for &key in map.keys() { + black_box(key); + } + }); +} + +#[bench] +fn iter_black_box_orderedmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let len = c - c/10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + for &key in map.keys() { + black_box(key); + } + }); +} + +fn shuffled_keys(iter: I) -> Vec + where I: IntoIterator +{ + let mut v = Vec::from_iter(iter); + let mut rng = weak_rng(); + rng.shuffle(&mut v); + v +} + +#[bench] +fn lookup_hashmap_10_000_exist(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in 5000..c { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in c..15000 { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_orderedmap_10_000_exist(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in 5000..c { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_orderedmap_10_000_noexist(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in c..15000 { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +// number of items to look up +const LOOKUP_MAP_SIZE: u32 = 100_000_u32; +const LOOKUP_SAMPLE_SIZE: u32 = 5000; +const SORT_MAP_SIZE: usize = 10_000; + + +// use lazy_static so that comparison benchmarks use the exact same inputs +lazy_static! { + static ref KEYS: Vec = { + shuffled_keys(0..LOOKUP_MAP_SIZE) + }; +} + +lazy_static! { + static ref HMAP_100K: HashMap = { + let c = LOOKUP_MAP_SIZE; + let mut map = HashMap::with_capacity(c as usize); + let keys = &*KEYS; + for &key in keys { + map.insert(key, key); + } + map + }; +} + +lazy_static! { + static ref OMAP_100K: IndexMap = { + let c = LOOKUP_MAP_SIZE; + let mut map = IndexMap::with_capacity(c as usize); + let keys = &*KEYS; + for &key in keys { + map.insert(key, key); + } + map + }; +} + +lazy_static! { + static ref OMAP_SORT_U32: IndexMap = { + let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); + for &key in &KEYS[..SORT_MAP_SIZE] { + map.insert(key, key); + } + map + }; +} +lazy_static! { + static ref OMAP_SORT_S: IndexMap = { + let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); + for &key in &KEYS[..SORT_MAP_SIZE] { + map.insert(format!("{:^16x}", &key), String::new()); + } + map + }; +} + +#[bench] +fn lookup_hashmap_100_000_multi(b: &mut Bencher) { + let map = &*HMAP_100K; + b.iter(|| { + let mut found = 0; + for key in 0..LOOKUP_SAMPLE_SIZE { + found += map.get(&key).is_some() as u32; + } + found + }); +} + + +#[bench] +fn lookup_ordermap_100_000_multi(b: &mut Bencher) { + let map = &*OMAP_100K; + b.iter(|| { + let mut found = 0; + for key in 0..LOOKUP_SAMPLE_SIZE { + found += map.get(&key).is_some() as u32; + } + found + }); +} + +// inorder: Test looking up keys in the same order as they were inserted +#[bench] +fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { + let map = &*HMAP_100K; + let keys = &*KEYS; + b.iter(|| { + let mut found = 0; + for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { + found += map.get(key).is_some() as u32; + } + found + }); +} + + +#[bench] +fn lookup_ordermap_100_000_inorder_multi(b: &mut Bencher) { + let map = &*OMAP_100K; + let keys = &*KEYS; + b.iter(|| { + let mut found = 0; + for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { + found += map.get(key).is_some() as u32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_100_000_single(b: &mut Bencher) { + let map = &*HMAP_100K; + let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); + b.iter(|| { + let key = iter.next().unwrap(); + map.get(&key).is_some() + }); +} + + +#[bench] +fn lookup_ordermap_100_000_single(b: &mut Bencher) { + let map = &*OMAP_100K; + let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); + b.iter(|| { + let key = iter.next().unwrap(); + map.get(&key).is_some() + }); +} + +const GROW_SIZE: usize = 100_000; +type GrowKey = u32; + +// Test grow/resize without preallocation +#[bench] +fn grow_fnv_hashmap_100_000(b: &mut Bencher) { + b.iter(|| { + let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); + for x in 0..GROW_SIZE { + map.insert(x as GrowKey, x as GrowKey); + } + map + }); +} + +#[bench] +fn grow_fnv_ordermap_100_000(b: &mut Bencher) { + b.iter(|| { + let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); + for x in 0..GROW_SIZE { + map.insert(x as GrowKey, x as GrowKey); + } + map + }); +} + + +const MERGE: u64 = 10_000; +#[bench] +fn hashmap_merge_simple(b: &mut Bencher) { + let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + b.iter(|| { + let mut merged = first_map.clone(); + merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); + merged + }); +} + +#[bench] +fn hashmap_merge_shuffle(b: &mut Bencher) { + let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + let mut v = Vec::new(); + let mut rng = weak_rng(); + b.iter(|| { + let mut merged = first_map.clone(); + v.extend(second_map.iter().map(|(&k, &v)| (k, v))); + rng.shuffle(&mut v); + merged.extend(v.drain(..)); + + merged + }); +} + +#[bench] +fn ordermap_merge_simple(b: &mut Bencher) { + let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + b.iter(|| { + let mut merged = first_map.clone(); + merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); + merged + }); +} + +#[bench] +fn ordermap_merge_shuffle(b: &mut Bencher) { + let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + let mut v = Vec::new(); + let mut rng = weak_rng(); + b.iter(|| { + let mut merged = first_map.clone(); + v.extend(second_map.iter().map(|(&k, &v)| (k, v))); + rng.shuffle(&mut v); + merged.extend(v.drain(..)); + + merged + }); +} + +#[bench] +fn remove_ordermap_100_000(b: &mut Bencher) { + let map = OMAP_100K.clone(); + let mut keys = Vec::from_iter(map.keys().cloned()); + weak_rng().shuffle(&mut keys); + + b.iter(|| { + let mut map = map.clone(); + for key in &keys { + map.remove(key).is_some(); + } + assert_eq!(map.len(), 0); + map + }); +} + +#[bench] +fn pop_ordermap_100_000(b: &mut Bencher) { + let map = OMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + while map.len() > 0 { + map.pop(); + } + assert_eq!(map.len(), 0); + map + }); +} + +#[bench] +fn few_retain_ordermap_100_000(b: &mut Bencher) { + let map = OMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 7 == 0); + map + }); +} + +#[bench] +fn few_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 7 == 0); + map + }); +} + +#[bench] +fn half_retain_ordermap_100_000(b: &mut Bencher) { + let map = OMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 2 == 0); + map + }); +} + +#[bench] +fn half_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 2 == 0); + map + }); +} + +#[bench] +fn many_retain_ordermap_100_000(b: &mut Bencher) { + let map = OMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 100 != 0); + map + }); +} + +#[bench] +fn many_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 100 != 0); + map + }); +} + + +// simple sort impl for comparison +pub fn simple_sort(m: &mut IndexMap) { + let mut ordered: Vec<_> = m.drain(..).collect(); + ordered.sort_by(|left, right| left.0.cmp(&right.0)); + m.extend(ordered); +} + + +#[bench] +fn ordermap_sort_s(b: &mut Bencher) { + let map = OMAP_SORT_S.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + map.sort_keys(); + map + }); +} + +#[bench] +fn ordermap_simple_sort_s(b: &mut Bencher) { + let map = OMAP_SORT_S.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + simple_sort(&mut map); + map + }); +} + +#[bench] +fn ordermap_sort_u32(b: &mut Bencher) { + let map = OMAP_SORT_U32.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + map.sort_keys(); + map + }); +} + +#[bench] +fn ordermap_simple_sort_u32(b: &mut Bencher) { + let map = OMAP_SORT_U32.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + simple_sort(&mut map); + map + }); +} + +// measure the fixed overhead of cloning in sort benchmarks +#[bench] +fn ordermap_clone_for_sort_s(b: &mut Bencher) { + let map = OMAP_SORT_S.clone(); + + b.iter(|| { + map.clone() + }); +} + +#[bench] +fn ordermap_clone_for_sort_u32(b: &mut Bencher) { + let map = OMAP_SORT_U32.clone(); + + b.iter(|| { + map.clone() + }); +} + diff --git a/third_party/rust/indexmap/benches/faststring.rs b/third_party/rust/indexmap/benches/faststring.rs new file mode 100644 index 000000000000..65e780134dfb --- /dev/null +++ b/third_party/rust/indexmap/benches/faststring.rs @@ -0,0 +1,183 @@ +#![feature(test)] +extern crate test; +extern crate rand; +extern crate lazy_static; + +use test::Bencher; + +extern crate indexmap; + +use indexmap::IndexMap; + +use std::collections::HashMap; +use std::iter::FromIterator; + +use rand::{weak_rng, Rng}; + +use std::hash::{Hash, Hasher}; + +use std::borrow::Borrow; +use std::ops::Deref; +use std::mem; + +#[derive(PartialEq, Eq, Copy, Clone)] +pub struct OneShot(pub T); + +impl Hash for OneShot +{ + fn hash(&self, h: &mut H) { + h.write(self.0.as_bytes()) + } +} + +impl<'a, S> From<&'a S> for &'a OneShot + where S: AsRef +{ + fn from(s: &'a S) -> Self { + let s: &str = s.as_ref(); + unsafe { + mem::transmute(s) + } + } +} + +impl Hash for OneShot +{ + fn hash(&self, h: &mut H) { + h.write(self.0.as_bytes()) + } +} + +impl Borrow> for OneShot +{ + fn borrow(&self) -> &OneShot { + <&OneShot>::from(&self.0) + } +} + +impl Deref for OneShot +{ + type Target = T; + fn deref(&self) -> &T { + &self.0 + } +} + + +fn shuffled_keys(iter: I) -> Vec + where I: IntoIterator +{ + let mut v = Vec::from_iter(iter); + let mut rng = weak_rng(); + rng.shuffle(&mut v); + v +} + + +#[bench] +fn insert_hashmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(OneShot(x.to_string()), ()); + } + map + }); +} + +#[bench] +fn insert_orderedmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key.to_string(), 1); + } + let lookups = (5000..c).map(|x| x.to_string()).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(OneShot(key.to_string()), 1); + } + let lookups = (5000..c).map(|x| OneShot(x.to_string())).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_ordermap_10_000_exist_string(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key.to_string(), 1); + } + let lookups = (5000..c).map(|x| x.to_string()).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_ordermap_10_000_exist_string_oneshot(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(OneShot(key.to_string()), 1); + } + let lookups = (5000..c).map(|x| OneShot(x.to_string())).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} diff --git a/third_party/rust/indexmap/src/equivalent.rs b/third_party/rust/indexmap/src/equivalent.rs new file mode 100644 index 000000000000..d72b2ef3a202 --- /dev/null +++ b/third_party/rust/indexmap/src/equivalent.rs @@ -0,0 +1,27 @@ + +use std::borrow::Borrow; + +/// Key equivalence trait. +/// +/// This trait allows hash table lookup to be customized. +/// It has one blanket implementation that uses the regular `Borrow` solution, +/// just like `HashMap` and `BTreeMap` do, so that you can pass `&str` to lookup +/// into a map with `String` keys and so on. +/// +/// # Contract +/// +/// The implementor **must** hash like `K`, if it is hashable. +pub trait Equivalent { + /// Compare self to `key` and return `true` if they are equal. + fn equivalent(&self, key: &K) -> bool; +} + +impl Equivalent for Q + where Q: Eq, + K: Borrow, +{ + #[inline] + fn equivalent(&self, key: &K) -> bool { + *self == *key.borrow() + } +} diff --git a/third_party/rust/indexmap/src/lib.rs b/third_party/rust/indexmap/src/lib.rs new file mode 100644 index 000000000000..1a0655e3f66b --- /dev/null +++ b/third_party/rust/indexmap/src/lib.rs @@ -0,0 +1,79 @@ + +#![deny(unsafe_code)] +#![doc(html_root_url = "https://docs.rs/indexmap/1/")] + +//! [`IndexMap`] is a hash table where the iteration order of the key-value +//! pairs is independent of the hash values of the keys. +//! +//! [`IndexSet`] is a corresponding hash set using the same implementation and +//! with similar properties. +//! +//! [`IndexMap`]: map/struct.IndexMap.html +//! [`IndexSet`]: set/struct.IndexSet.html +//! +//! +//! ## Rust Version +//! +//! This version of indexmap requires Rust 1.18 or later. +//! +//! The indexmap 1.x release series will use a carefully considered version +//! upgrade policy, where in a later 1.x version, we will raise the minimum +//! required Rust version. + +#[macro_use] +mod macros; +#[cfg(feature = "serde-1")] +mod serde; +mod util; +mod equivalent; +mod mutable_keys; + +pub mod set; +pub mod map; + +pub use equivalent::Equivalent; +pub use map::IndexMap; +pub use set::IndexSet; + +// shared private items + +/// Hash value newtype. Not larger than usize, since anything larger +/// isn't used for selecting position anyway. +#[derive(Copy, Debug)] +struct HashValue(usize); + +impl HashValue { + #[inline(always)] + fn get(self) -> usize { self.0 } +} + +impl Clone for HashValue { + #[inline] + fn clone(&self) -> Self { *self } +} +impl PartialEq for HashValue { + #[inline] + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +#[derive(Copy, Clone, Debug)] +struct Bucket { + hash: HashValue, + key: K, + value: V, +} + +impl Bucket { + // field accessors -- used for `f` instead of closures in `.map(f)` + fn key_ref(&self) -> &K { &self.key } + fn value_ref(&self) -> &V { &self.value } + fn value_mut(&mut self) -> &mut V { &mut self.value } + fn key(self) -> K { self.key } + fn key_value(self) -> (K, V) { (self.key, self.value) } + fn refs(&self) -> (&K, &V) { (&self.key, &self.value) } + fn ref_mut(&mut self) -> (&K, &mut V) { (&self.key, &mut self.value) } + fn muts(&mut self) -> (&mut K, &mut V) { (&mut self.key, &mut self.value) } +} + diff --git a/third_party/rust/indexmap/src/macros.rs b/third_party/rust/indexmap/src/macros.rs new file mode 100644 index 000000000000..b8c6f9b1fc7f --- /dev/null +++ b/third_party/rust/indexmap/src/macros.rs @@ -0,0 +1,122 @@ + +#[macro_export] +/// Create an `IndexMap` from a list of key-value pairs +/// +/// ## Example +/// +/// ``` +/// #[macro_use] extern crate indexmap; +/// # fn main() { +/// +/// let map = indexmap!{ +/// "a" => 1, +/// "b" => 2, +/// }; +/// assert_eq!(map["a"], 1); +/// assert_eq!(map["b"], 2); +/// assert_eq!(map.get("c"), None); +/// +/// // "a" is the first key +/// assert_eq!(map.keys().next(), Some(&"a")); +/// # } +/// ``` +macro_rules! indexmap { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$(indexmap!(@single $rest)),*])); + + ($($key:expr => $value:expr,)+) => { indexmap!($($key => $value),+) }; + ($($key:expr => $value:expr),*) => { + { + let _cap = indexmap!(@count $($key),*); + let mut _map = $crate::IndexMap::with_capacity(_cap); + $( + _map.insert($key, $value); + )* + _map + } + }; +} + +#[macro_export] +/// Create an `IndexSet` from a list of values +/// +/// ## Example +/// +/// ``` +/// #[macro_use] extern crate indexmap; +/// # fn main() { +/// +/// let set = indexset!{ +/// "a", +/// "b", +/// }; +/// assert!(set.contains("a")); +/// assert!(set.contains("b")); +/// assert!(!set.contains("c")); +/// +/// // "a" is the first value +/// assert_eq!(set.iter().next(), Some(&"a")); +/// # } +/// ``` +macro_rules! indexset { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$(indexset!(@single $rest)),*])); + + ($($value:expr,)+) => { indexset!($($value),+) }; + ($($value:expr),*) => { + { + let _cap = indexset!(@count $($value),*); + let mut _set = $crate::IndexSet::with_capacity(_cap); + $( + _set.insert($value); + )* + _set + } + }; +} + +// generate all the Iterator methods by just forwarding to the underlying +// self.iter and mapping its element. +macro_rules! iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + // same mapping function for both options and iterators + ($map_elt:expr) => { + fn next(&mut self) -> Option { + self.iter.next().map($map_elt) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.len() + } + + fn nth(&mut self, n: usize) -> Option { + self.iter.nth(n).map($map_elt) + } + + fn last(mut self) -> Option { + self.next_back() + } + + fn collect(self) -> C + where C: FromIterator + { + // NB: forwarding this directly to standard iterators will + // allow it to leverage unstable traits like `TrustedLen`. + self.iter.map($map_elt).collect() + } + } +} + +macro_rules! double_ended_iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + // same mapping function for both options and iterators + ($map_elt:expr) => { + fn next_back(&mut self) -> Option { + self.iter.next_back().map($map_elt) + } + } +} diff --git a/third_party/rust/indexmap/src/map.rs b/third_party/rust/indexmap/src/map.rs new file mode 100644 index 000000000000..f138a4091618 --- /dev/null +++ b/third_party/rust/indexmap/src/map.rs @@ -0,0 +1,1941 @@ +//! `IndexMap` is a hash table where the iteration order of the key-value +//! pairs is independent of the hash values of the keys. + +pub use mutable_keys::MutableKeys; + +use std::hash::Hash; +use std::hash::BuildHasher; +use std::hash::Hasher; +use std::iter::FromIterator; +use std::collections::hash_map::RandomState; +use std::ops::RangeFull; + +use std::cmp::{max, Ordering}; +use std::fmt; +use std::mem::{replace}; +use std::marker::PhantomData; + +use util::{third, ptrdistance, enumerate}; +use equivalent::Equivalent; +use { + Bucket, + HashValue, +}; + +fn hash_elem_using(build: &B, k: &K) -> HashValue { + let mut h = build.build_hasher(); + k.hash(&mut h); + HashValue(h.finish() as usize) +} + +/// A possibly truncated hash value. +/// +#[derive(Debug)] +struct ShortHash(usize, PhantomData); + +impl ShortHash { + /// Pretend this is a full HashValue, which + /// is completely ok w.r.t determining bucket index + /// + /// - Sz = u32: 32-bit hash is enough to select bucket index + /// - Sz = u64: hash is not truncated + fn into_hash(self) -> HashValue { + HashValue(self.0) + } +} + +impl Copy for ShortHash { } +impl Clone for ShortHash { + #[inline] + fn clone(&self) -> Self { *self } +} + +impl PartialEq for ShortHash { + #[inline] + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +// Compare ShortHash == HashValue by truncating appropriately +// if applicable before the comparison +impl PartialEq for ShortHash where Sz: Size { + #[inline] + fn eq(&self, rhs: &HashValue) -> bool { + if Sz::is_64_bit() { + self.0 == rhs.0 + } else { + lo32(self.0 as u64) == lo32(rhs.0 as u64) + } + } +} +impl From> for HashValue { + fn from(x: ShortHash) -> Self { HashValue(x.0) } +} + +/// `Pos` is stored in the `indices` array and it points to the index of a +/// `Bucket` in self.core.entries. +/// +/// Pos can be interpreted either as a 64-bit index, or as a 32-bit index and +/// a 32-bit hash. +/// +/// Storing the truncated hash next to the index saves loading the hash from the +/// entry, increasing the cache efficiency. +/// +/// Note that the lower 32 bits of the hash is enough to compute desired +/// position and probe distance in a hash map with less than 2**32 buckets. +/// +/// The IndexMap will simply query its **current raw capacity** to see what its +/// current size class is, and dispatch to the 32-bit or 64-bit lookup code as +/// appropriate. Only the growth code needs some extra logic to handle the +/// transition from one class to another +#[derive(Copy)] +struct Pos { + index: u64, +} + +impl Clone for Pos { + #[inline(always)] + fn clone(&self) -> Self { *self } +} + +impl fmt::Debug for Pos { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.pos() { + Some(i) => write!(f, "Pos({} / {:x})", i, self.index), + None => write!(f, "Pos(None)"), + } + } +} + +impl Pos { + #[inline] + fn none() -> Self { Pos { index: !0 } } + + #[inline] + fn is_none(&self) -> bool { self.index == !0 } + + /// Return the index part of the Pos value inside `Some(_)` if the position + /// is not none, otherwise return `None`. + #[inline] + fn pos(&self) -> Option { + if self.index == !0 { None } else { Some(lo32(self.index as u64)) } + } + + /// Set the index part of the Pos value to `i` + #[inline] + fn set_pos(&mut self, i: usize) + where Sz: Size, + { + debug_assert!(!self.is_none()); + if Sz::is_64_bit() { + self.index = i as u64; + } else { + self.index = i as u64 | ((self.index >> 32) << 32) + } + } + + #[inline] + fn with_hash(i: usize, hash: HashValue) -> Self + where Sz: Size + { + if Sz::is_64_bit() { + Pos { + index: i as u64, + } + } else { + Pos { + index: i as u64 | ((hash.0 as u64) << 32) + } + } + } + + /// “Resolve” the Pos into a combination of its index value and + /// a proxy value to the hash (whether it contains the hash or not + /// depends on the size class of the hash map). + #[inline] + fn resolve(&self) -> Option<(usize, ShortHashProxy)> + where Sz: Size + { + if Sz::is_64_bit() { + if !self.is_none() { + Some((self.index as usize, ShortHashProxy::new(0))) + } else { + None + } + } else { + if !self.is_none() { + let (i, hash) = split_lo_hi(self.index); + Some((i as usize, ShortHashProxy::new(hash as usize))) + } else { + None + } + } + } + + /// Like resolve, but the Pos **must** be non-none. Return its index. + #[inline] + fn resolve_existing_index(&self) -> usize + where Sz: Size + { + debug_assert!(!self.is_none(), "datastructure inconsistent: none where valid Pos expected"); + if Sz::is_64_bit() { + self.index as usize + } else { + let (i, _) = split_lo_hi(self.index); + i as usize + } + } + +} + +#[inline] +fn lo32(x: u64) -> usize { (x & 0xFFFF_FFFF) as usize } + +// split into low, hi parts +#[inline] +fn split_lo_hi(x: u64) -> (u32, u32) { (x as u32, (x >> 32) as u32) } + +// Possibly contains the truncated hash value for an entry, depending on +// the size class. +struct ShortHashProxy(usize, PhantomData); + +impl ShortHashProxy + where Sz: Size +{ + fn new(x: usize) -> Self { + ShortHashProxy(x, PhantomData) + } + + /// Get the hash from either `self` or from a lookup into `entries`, + /// depending on `Sz`. + fn get_short_hash(&self, entries: &[Bucket], index: usize) -> ShortHash { + if Sz::is_64_bit() { + ShortHash(entries[index].hash.0, PhantomData) + } else { + ShortHash(self.0, PhantomData) + } + } +} + +/// A hash table where the iteration order of the key-value pairs is independent +/// of the hash values of the keys. +/// +/// The interface is closely compatible with the standard `HashMap`, but also +/// has additional features. +/// +/// # Order +/// +/// The key-value pairs have a consistent order that is determined by +/// the sequence of insertion and removal calls on the map. The order does +/// not depend on the keys or the hash function at all. +/// +/// All iterators traverse the map in *the order*. +/// +/// The insertion order is preserved, with **notable exceptions** like the +/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of +/// course result in a new order, depending on the sorting order. +/// +/// # Indices +/// +/// The key-value pairs are indexed in a compact range without holes in the +/// range `0..self.len()`. For example, the method `.get_full` looks up the +/// index for a key, and the method `.get_index` looks up the key-value pair by +/// index. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// // count the frequency of each letter in a sentence. +/// let mut letters = IndexMap::new(); +/// for ch in "a short treatise on fungi".chars() { +/// *letters.entry(ch).or_insert(0) += 1; +/// } +/// +/// assert_eq!(letters[&'s'], 2); +/// assert_eq!(letters[&'t'], 3); +/// assert_eq!(letters[&'u'], 1); +/// assert_eq!(letters.get(&'y'), None); +/// ``` +#[derive(Clone)] +pub struct IndexMap { + core: OrderMapCore, + hash_builder: S, +} + +// core of the map that does not depend on S +#[derive(Clone)] +struct OrderMapCore { + pub(crate) mask: usize, + /// indices are the buckets. indices.len() == raw capacity + pub(crate) indices: Box<[Pos]>, + /// entries is a dense vec of entries in their order. entries.len() == len + pub(crate) entries: Vec>, +} + +#[inline(always)] +fn desired_pos(mask: usize, hash: HashValue) -> usize { + hash.0 & mask +} + +/// The number of steps that `current` is forward of the desired position for hash +#[inline(always)] +fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { + current.wrapping_sub(desired_pos(mask, hash)) & mask +} + +enum Inserted { + Done, + Swapped { prev_value: V }, + RobinHood { + probe: usize, + old_pos: Pos, + } +} + +impl fmt::Debug for IndexMap + where K: fmt::Debug + Hash + Eq, + V: fmt::Debug, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(f.debug_map().entries(self.iter()).finish()); + if cfg!(not(feature = "test_debug")) { + return Ok(()); + } + try!(writeln!(f, "")); + for (i, index) in enumerate(&*self.core.indices) { + try!(write!(f, "{}: {:?}", i, index)); + if let Some(pos) = index.pos() { + let hash = self.core.entries[pos].hash; + let key = &self.core.entries[pos].key; + let desire = desired_pos(self.core.mask, hash); + try!(write!(f, ", desired={}, probe_distance={}, key={:?}", + desire, + probe_distance(self.core.mask, hash, i), + key)); + } + try!(writeln!(f, "")); + } + try!(writeln!(f, "cap={}, raw_cap={}, entries.cap={}", + self.capacity(), + self.raw_capacity(), + self.core.entries.capacity())); + Ok(()) + } +} + +#[inline] +fn usable_capacity(cap: usize) -> usize { + cap - cap / 4 +} + +#[inline] +fn to_raw_capacity(n: usize) -> usize { + n + n / 3 +} + +// this could not be captured in an efficient iterator +macro_rules! probe_loop { + ($probe_var: ident < $len: expr, $body: expr) => { + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + } +} + +impl IndexMap { + /// Create a new map. (Does not allocate.) + pub fn new() -> Self { + Self::with_capacity(0) + } + + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity(n: usize) -> Self { + Self::with_capacity_and_hasher(n, <_>::default()) + } +} + +impl IndexMap +{ + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self + where S: BuildHasher + { + if n == 0 { + IndexMap { + core: OrderMapCore { + mask: 0, + indices: Box::new([]), + entries: Vec::new(), + }, + hash_builder: hash_builder, + } + } else { + let raw = to_raw_capacity(n); + let raw_cap = max(raw.next_power_of_two(), 8); + IndexMap { + core: OrderMapCore { + mask: raw_cap.wrapping_sub(1), + indices: vec![Pos::none(); raw_cap].into_boxed_slice(), + entries: Vec::with_capacity(usable_capacity(raw_cap)), + }, + hash_builder: hash_builder, + } + } + } + + /// Return the number of key-value pairs in the map. + /// + /// Computes in **O(1)** time. + pub fn len(&self) -> usize { self.core.len() } + + /// Returns true if the map contains no elements. + /// + /// Computes in **O(1)** time. + pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// Create a new map with `hash_builder` + pub fn with_hasher(hash_builder: S) -> Self + where S: BuildHasher + { + Self::with_capacity_and_hasher(0, hash_builder) + } + + /// Return a reference to the map's `BuildHasher`. + pub fn hasher(&self) -> &S + where S: BuildHasher + { + &self.hash_builder + } + + /// Computes in **O(1)** time. + pub fn capacity(&self) -> usize { + self.core.capacity() + } + + #[inline] + fn size_class_is_64bit(&self) -> bool { + self.core.size_class_is_64bit() + } + + #[inline(always)] + fn raw_capacity(&self) -> usize { + self.core.raw_capacity() + } +} + +impl OrderMapCore { + // Return whether we need 32 or 64 bits to specify a bucket or entry index + #[cfg(not(feature = "test_low_transition_point"))] + fn size_class_is_64bit(&self) -> bool { + usize::max_value() > u32::max_value() as usize && + self.raw_capacity() >= u32::max_value() as usize + } + + // for testing + #[cfg(feature = "test_low_transition_point")] + fn size_class_is_64bit(&self) -> bool { + self.raw_capacity() >= 64 + } + + #[inline(always)] + fn raw_capacity(&self) -> usize { + self.indices.len() + } +} + +/// Trait for the "size class". Either u32 or u64 depending on the index +/// size needed to address an entry's indes in self.core.entries. +trait Size { + fn is_64_bit() -> bool; + fn is_same_size() -> bool { + Self::is_64_bit() == T::is_64_bit() + } +} + +impl Size for u32 { + #[inline] + fn is_64_bit() -> bool { false } +} + +impl Size for u64 { + #[inline] + fn is_64_bit() -> bool { true } +} + +/// Call self.method(args) with `::` or `::` depending on `self` +/// size class. +/// +/// The u32 or u64 is *prepended* to the type parameter list! +macro_rules! dispatch_32_vs_64 { + // self.methodname with other explicit type params, + // size is prepended + ($self_:ident . $method:ident::<$($t:ty),*>($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $self_.$method::($($arg),*) + } else { + $self_.$method::($($arg),*) + } + }; + // self.methodname with only one type param, the size. + ($self_:ident . $method:ident ($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $self_.$method::($($arg),*) + } else { + $self_.$method::($($arg),*) + } + }; + // functionname with size_class_is_64bit as the first argument, only one + // type param, the size. + ($self_:ident => $function:ident ($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $function::($($arg),*) + } else { + $function::($($arg),*) + } + }; +} + +/// Entry for an existing key-value pair or a vacant location to +/// insert one. +pub enum Entry<'a, K: 'a, V: 'a> { + /// Existing slot with equivalent key. + Occupied(OccupiedEntry<'a, K, V>), + /// Vacant slot (no equivalent key in the map). + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K, V> Entry<'a, K, V> { + /// Computes in **O(1)** time (amortized average). + pub fn or_insert(self, default: V) -> &'a mut V { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Computes in **O(1)** time (amortized average). + pub fn or_insert_with(self, call: F) -> &'a mut V + where F: FnOnce() -> V, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(call()), + } + } + + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Return the index where the key-value pair exists or will be inserted. + pub fn index(&self) -> usize { + match *self { + Entry::Occupied(ref entry) => entry.index(), + Entry::Vacant(ref entry) => entry.index(), + } + } +} + +pub struct OccupiedEntry<'a, K: 'a, V: 'a> { + map: &'a mut OrderMapCore, + key: K, + probe: usize, + index: usize, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + pub fn key(&self) -> &K { &self.key } + pub fn get(&self) -> &V { + &self.map.entries[self.index].value + } + pub fn get_mut(&mut self) -> &mut V { + &mut self.map.entries[self.index].value + } + + /// Put the new key in the occupied entry's key slot + pub(crate) fn replace_key(self) -> K { + let old_key = &mut self.map.entries[self.index].key; + replace(old_key, self.key) + } + + /// Return the index of the key-value pair + pub fn index(&self) -> usize { + self.index + } + pub fn into_mut(self) -> &'a mut V { + &mut self.map.entries[self.index].value + } + + /// Sets the value of the entry to `value`, and returns the entry's old value. + pub fn insert(&mut self, value: V) -> V { + replace(self.get_mut(), value) + } + + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Remove and return the key, value pair stored in the map for this entry + pub fn remove_entry(self) -> (K, V) { + self.map.remove_found(self.probe, self.index) + } +} + + +pub struct VacantEntry<'a, K: 'a, V: 'a> { + map: &'a mut OrderMapCore, + key: K, + hash: HashValue, + probe: usize, +} + +impl<'a, K, V> VacantEntry<'a, K, V> { + pub fn key(&self) -> &K { &self.key } + pub fn into_key(self) -> K { self.key } + /// Return the index where the key-value pair will be inserted. + pub fn index(&self) -> usize { self.map.len() } + pub fn insert(self, value: V) -> &'a mut V { + if self.map.size_class_is_64bit() { + self.insert_impl::(value) + } else { + self.insert_impl::(value) + } + } + + fn insert_impl(self, value: V) -> &'a mut V + where Sz: Size + { + let index = self.map.entries.len(); + self.map.entries.push(Bucket { hash: self.hash, key: self.key, value: value }); + let old_pos = Pos::with_hash::(index, self.hash); + self.map.insert_phase_2::(self.probe, old_pos); + &mut {self.map}.entries[index].value + } +} + +impl IndexMap + where K: Hash + Eq, + S: BuildHasher, +{ + // FIXME: reduce duplication (compare with insert) + fn entry_phase_1(&mut self, key: K) -> Entry + where Sz: Size + { + let hash = hash_elem_using(&self.hash_builder, &key); + self.core.entry_phase_1::(hash, key) + } + + /// Remove all key-value pairs in the map, while preserving its capacity. + /// + /// Computes in **O(n)** time. + pub fn clear(&mut self) { + self.core.clear(); + } + + /// Reserve capacity for `additional` more key-value pairs. + /// + /// FIXME Not implemented fully yet. + pub fn reserve(&mut self, additional: usize) { + if additional > 0 { + self.reserve_one(); + } + } + + // First phase: Look for the preferred location for key. + // + // We will know if `key` is already in the map, before we need to insert it. + // When we insert they key, it might be that we need to continue displacing + // entries (robin hood hashing), in which case Inserted::RobinHood is returned + fn insert_phase_1(&mut self, key: K, value: V) -> Inserted + where Sz: Size + { + let hash = hash_elem_using(&self.hash_builder, &key); + self.core.insert_phase_1::(hash, key, value) + } + + fn reserve_one(&mut self) { + if self.len() == self.capacity() { + dispatch_32_vs_64!(self.double_capacity()); + } + } + fn double_capacity(&mut self) + where Sz: Size, + { + self.core.double_capacity::(); + } + + /// Insert a key-value pair in the map. + /// + /// If an equivalent key already exists in the map: the key remains and + /// retains in its place in the order, its corresponding value is updated + /// with `value` and the older value is returned inside `Some(_)`. + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted, last in order, and `None` is returned. + /// + /// Computes in **O(1)** time (amortized average). + /// + /// See also [`entry`](#method.entry) if you you want to insert *or* modify + /// or if you need to get the index of the corresponding key-value pair. + pub fn insert(&mut self, key: K, value: V) -> Option { + self.reserve_one(); + if self.size_class_is_64bit() { + match self.insert_phase_1::(key, value) { + Inserted::Swapped { prev_value } => Some(prev_value), + Inserted::Done => None, + Inserted::RobinHood { probe, old_pos } => { + self.core.insert_phase_2::(probe, old_pos); + None + } + } + } else { + match self.insert_phase_1::(key, value) { + Inserted::Swapped { prev_value } => Some(prev_value), + Inserted::Done => None, + Inserted::RobinHood { probe, old_pos } => { + self.core.insert_phase_2::(probe, old_pos); + None + } + } + } + } + + /// Get the given key’s corresponding entry in the map for insertion and/or + /// in-place manipulation. + /// + /// Computes in **O(1)** time (amortized average). + pub fn entry(&mut self, key: K) -> Entry { + self.reserve_one(); + dispatch_32_vs_64!(self.entry_phase_1(key)) + } + + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter(&self) -> Iter { + Iter { + iter: self.core.entries.iter() + } + } + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter_mut(&mut self) -> IterMut { + IterMut { + iter: self.core.entries.iter_mut() + } + } + + /// Return an iterator over the keys of the map, in their order + pub fn keys(&self) -> Keys { + Keys { + iter: self.core.entries.iter() + } + } + + /// Return an iterator over the values of the map, in their order + pub fn values(&self) -> Values { + Values { + iter: self.core.entries.iter() + } + } + + /// Return an iterator over mutable references to the the values of the map, + /// in their order + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut { + iter: self.core.entries.iter_mut() + } + } + + /// Return `true` if an equivalent to `key` exists in the map. + /// + /// Computes in **O(1)** time (average). + pub fn contains_key(&self, key: &Q) -> bool + where Q: Hash + Equivalent, + { + self.find(key).is_some() + } + + /// Return a reference to the value stored for `key`, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get(&self, key: &Q) -> Option<&V> + where Q: Hash + Equivalent, + { + self.get_full(key).map(third) + } + + /// Return item index, key and value + pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> + where Q: Hash + Equivalent, + { + if let Some((_, found)) = self.find(key) { + let entry = &self.core.entries[found]; + Some((found, &entry.key, &entry.value)) + } else { + None + } + } + + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where Q: Hash + Equivalent, + { + self.get_full_mut(key).map(third) + } + + pub fn get_full_mut(&mut self, key: &Q) + -> Option<(usize, &K, &mut V)> + where Q: Hash + Equivalent, + { + self.get_full_mut2_impl(key).map(|(i, k, v)| (i, &*k, v)) + } + + + pub(crate) fn get_full_mut2_impl(&mut self, key: &Q) + -> Option<(usize, &mut K, &mut V)> + where Q: Hash + Equivalent, + { + if let Some((_, found)) = self.find(key) { + let entry = &mut self.core.entries[found]; + Some((found, &mut entry.key, &mut entry.value)) + } else { + None + } + } + + + /// Return probe (indices) and position (entries) + pub(crate) fn find(&self, key: &Q) -> Option<(usize, usize)> + where Q: Hash + Equivalent, + { + if self.len() == 0 { return None; } + let h = hash_elem_using(&self.hash_builder, key); + self.core.find_using(h, move |entry| { Q::equivalent(key, &entry.key) }) + } + + /// NOTE: Same as .swap_remove + /// + /// Computes in **O(1)** time (average). + pub fn remove(&mut self, key: &Q) -> Option + where Q: Hash + Equivalent, + { + self.swap_remove(key) + } + + /// Remove the key-value pair equivalent to `key` and return + /// its value. + /// + /// Like `Vec::swap_remove`, the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(&mut self, key: &Q) -> Option + where Q: Hash + Equivalent, + { + self.swap_remove_full(key).map(third) + } + + /// Remove the key-value pair equivalent to `key` and return it and + /// the index it had. + /// + /// Like `Vec::swap_remove`, the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Return `None` if `key` is not in map. + pub fn swap_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> + where Q: Hash + Equivalent, + { + let (probe, found) = match self.find(key) { + None => return None, + Some(t) => t, + }; + let (k, v) = self.core.remove_found(probe, found); + Some((found, k, v)) + } + + /// Remove the last key-value pair + /// + /// Computes in **O(1)** time (average). + pub fn pop(&mut self) -> Option<(K, V)> { + self.core.pop_impl() + } + + /// Scan through each key-value pair in the map and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + pub fn retain(&mut self, mut keep: F) + where F: FnMut(&K, &mut V) -> bool, + { + self.retain_mut(move |k, v| keep(k, v)); + } + + pub(crate) fn retain_mut(&mut self, keep: F) + where F: FnMut(&mut K, &mut V) -> bool, + { + dispatch_32_vs_64!(self.retain_mut_sz::<_>(keep)); + } + + fn retain_mut_sz(&mut self, keep: F) + where F: FnMut(&mut K, &mut V) -> bool, + Sz: Size, + { + self.core.retain_in_order_impl::(keep); + } + + /// Sort the map’s key-value pairs by the default ordering of the keys. + /// + /// See `sort_by` for details. + pub fn sort_keys(&mut self) + where K: Ord, + { + self.core.sort_by(key_cmp) + } + + /// Sort the map’s key-value pairs in place using the comparison + /// function `compare`. + /// + /// The comparison function receives two key and value pairs to compare (you + /// can sort by keys or values or their combination as needed). + /// + /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is + /// the length of the map and *c* the capacity. The sort is stable. + pub fn sort_by(&mut self, compare: F) + where F: FnMut(&K, &V, &K, &V) -> Ordering, + { + self.core.sort_by(compare) + } + + /// Sort the key-value pairs of the map and return a by value iterator of + /// the key-value pairs with the result. + /// + /// The sort is stable. + pub fn sorted_by(mut self, mut cmp: F) -> IntoIter + where F: FnMut(&K, &V, &K, &V) -> Ordering + { + self.core.entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + self.into_iter() + } + + /// Clears the `IndexMap`, returning all key-value pairs as a drain iterator. + /// Keeps the allocated memory for reuse. + pub fn drain(&mut self, range: RangeFull) -> Drain { + self.core.clear_indices(); + + Drain { + iter: self.core.entries.drain(range), + } + } +} + +fn key_cmp(k1: &K, _v1: &V, k2: &K, _v2: &V) -> Ordering + where K: Ord +{ + Ord::cmp(k1, k2) +} + +impl IndexMap { + /// Get a key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time. + pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { + self.core.entries.get(index).map(Bucket::refs) + } + + /// Get a key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { + self.core.entries.get_mut(index).map(Bucket::muts) + } + + /// Remove the key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + let (probe, found) = match self.core.entries.get(index) + .map(|e| self.core.find_existing_entry(e)) + { + None => return None, + Some(t) => t, + }; + Some(self.core.remove_found(probe, found)) + } +} + +// Methods that don't use any properties (Hash / Eq) of K. +// +// It's cleaner to separate them out, then the compiler checks that we are not +// using Hash + Eq at all in these methods. +// +// However, we should probably not let this show in the public API or docs. +impl OrderMapCore { + fn len(&self) -> usize { self.entries.len() } + + fn capacity(&self) -> usize { + usable_capacity(self.raw_capacity()) + } + + fn clear(&mut self) { + self.entries.clear(); + self.clear_indices(); + } + + // clear self.indices to the same state as "no elements" + fn clear_indices(&mut self) { + for pos in self.indices.iter_mut() { + *pos = Pos::none(); + } + } + + fn first_allocation(&mut self) { + debug_assert_eq!(self.len(), 0); + let raw_cap = 8usize; + self.mask = raw_cap.wrapping_sub(1); + self.indices = vec![Pos::none(); raw_cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(raw_cap)); + } + + #[inline(never)] + // `Sz` is *current* Size class, before grow + fn double_capacity(&mut self) + where Sz: Size + { + debug_assert!(self.raw_capacity() == 0 || self.len() > 0); + if self.raw_capacity() == 0 { + return self.first_allocation(); + } + + // find first ideally placed element -- start of cluster + let mut first_ideal = 0; + for (i, index) in enumerate(&*self.indices) { + if let Some(pos) = index.pos() { + if 0 == probe_distance(self.mask, self.entries[pos].hash, i) { + first_ideal = i; + break; + } + } + } + + // visit the entries in an order where we can simply reinsert them + // into self.indices without any bucket stealing. + let new_raw_cap = self.indices.len() * 2; + let old_indices = replace(&mut self.indices, vec![Pos::none(); new_raw_cap].into_boxed_slice()); + self.mask = new_raw_cap.wrapping_sub(1); + + // `Sz` is the old size class, and either u32 or u64 is the new + for &pos in &old_indices[first_ideal..] { + dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); + } + + for &pos in &old_indices[..first_ideal] { + dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); + } + let more = self.capacity() - self.len(); + self.entries.reserve_exact(more); + } + + // write to self.indices + // read from self.entries at `pos` + // + // reinserting rewrites all `Pos` entries anyway. This handles transitioning + // from u32 to u64 size class if needed by using the two type parameters. + fn reinsert_entry_in_order(&mut self, pos: Pos) + where SzNew: Size, + SzOld: Size, + { + if let Some((i, hash_proxy)) = pos.resolve::() { + // only if the size class is conserved can we use the short hash + let entry_hash = if SzOld::is_same_size::() { + hash_proxy.get_short_hash(&self.entries, i).into_hash() + } else { + self.entries[i].hash + }; + // find first empty bucket and insert there + let mut probe = desired_pos(self.mask, entry_hash); + probe_loop!(probe < self.indices.len(), { + if let Some(_) = self.indices[probe].resolve::() { + /* nothing */ + } else { + // empty bucket, insert here + self.indices[probe] = Pos::with_hash::(i, entry_hash); + return; + } + }); + } + } + + fn pop_impl(&mut self) -> Option<(K, V)> { + let (probe, found) = match self.entries.last() + .map(|e| self.find_existing_entry(e)) + { + None => return None, + Some(t) => t, + }; + debug_assert_eq!(found, self.entries.len() - 1); + Some(self.remove_found(probe, found)) + } + + // FIXME: reduce duplication (compare with insert) + fn entry_phase_1(&mut self, hash: HashValue, key: K) -> Entry + where Sz: Size, + K: Eq, + { + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + debug_assert!(self.len() < self.raw_capacity()); + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + // if existing element probed less than us, swap + let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); + if their_dist < dist { + // robin hood: steal the spot if it's better for us + return Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key, + probe: probe, + }); + } else if entry_hash == hash && self.entries[i].key == key { + return Entry::Occupied(OccupiedEntry { + map: self, + key: key, + probe: probe, + index: i, + }); + } + } else { + // empty bucket, insert here + return Entry::Vacant(VacantEntry { + map: self, + hash: hash, + key: key, + probe: probe, + }); + } + dist += 1; + }); + } + + // First phase: Look for the preferred location for key. + // + // We will know if `key` is already in the map, before we need to insert it. + // When we insert they key, it might be that we need to continue displacing + // entries (robin hood hashing), in which case Inserted::RobinHood is returned + fn insert_phase_1(&mut self, hash: HashValue, key: K, value: V) -> Inserted + where Sz: Size, + K: Eq, + { + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + let insert_kind; + debug_assert!(self.len() < self.raw_capacity()); + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe]; + if let Some((i, hash_proxy)) = pos.resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + // if existing element probed less than us, swap + let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); + if their_dist < dist { + // robin hood: steal the spot if it's better for us + let index = self.entries.len(); + insert_kind = Inserted::RobinHood { + probe: probe, + old_pos: Pos::with_hash::(index, hash), + }; + break; + } else if entry_hash == hash && self.entries[i].key == key { + return Inserted::Swapped { + prev_value: replace(&mut self.entries[i].value, value), + }; + } + } else { + // empty bucket, insert here + let index = self.entries.len(); + *pos = Pos::with_hash::(index, hash); + insert_kind = Inserted::Done; + break; + } + dist += 1; + }); + self.entries.push(Bucket { hash: hash, key: key, value: value }); + insert_kind + } + + + /// phase 2 is post-insert where we forward-shift `Pos` in the indices. + fn insert_phase_2(&mut self, mut probe: usize, mut old_pos: Pos) + where Sz: Size + { + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe]; + if pos.is_none() { + *pos = old_pos; + break; + } else { + old_pos = replace(pos, old_pos); + } + }); + } + + + /// Return probe (indices) and position (entries) + fn find_using(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> + where F: Fn(&Bucket) -> bool, + { + dispatch_32_vs_64!(self.find_using_impl::<_>(hash, key_eq)) + } + + fn find_using_impl(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> + where F: Fn(&Bucket) -> bool, + Sz: Size, + { + debug_assert!(self.len() > 0); + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + if dist > probe_distance(self.mask, entry_hash.into_hash(), probe) { + // give up when probe distance is too long + return None; + } else if entry_hash == hash && key_eq(&self.entries[i]) { + return Some((probe, i)); + } + } else { + return None; + } + dist += 1; + }); + } + + /// Find `entry` which is already placed inside self.entries; + /// return its probe and entry index. + fn find_existing_entry(&self, entry: &Bucket) -> (usize, usize) + { + debug_assert!(self.len() > 0); + + let hash = entry.hash; + let actual_pos = ptrdistance(&self.entries[0], entry); + let probe = dispatch_32_vs_64!(self => + find_existing_entry_at(&self.indices, hash, self.mask, actual_pos)); + (probe, actual_pos) + } + + fn remove_found(&mut self, probe: usize, found: usize) -> (K, V) { + dispatch_32_vs_64!(self.remove_found_impl(probe, found)) + } + + fn remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) + where Sz: Size + { + // index `probe` and entry `found` is to be removed + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + self.indices[probe] = Pos::none(); + let entry = self.entries.swap_remove(found); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(found) { + // was not last element + // examine new element in `found` and find it in indices + let mut probe = desired_pos(self.mask, entry.hash); + probe_loop!(probe < self.indices.len(), { + if let Some((i, _)) = self.indices[probe].resolve::() { + if i >= self.entries.len() { + // found it + self.indices[probe] = Pos::with_hash::(found, entry.hash); + break; + } + } + }); + } + + self.backward_shift_after_removal::(probe); + + (entry.key, entry.value) + } + + fn backward_shift_after_removal(&mut self, probe_at_remove: usize) + where Sz: Size + { + // backward shift deletion in self.indices + // after probe, shift all non-ideally placed indices backward + let mut last_probe = probe_at_remove; + let mut probe = probe_at_remove + 1; + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + if probe_distance(self.mask, entry_hash.into_hash(), probe) > 0 { + self.indices[last_probe] = self.indices[probe]; + self.indices[probe] = Pos::none(); + } else { + break; + } + } else { + break; + } + last_probe = probe; + }); + } + + fn retain_in_order_impl(&mut self, mut keep: F) + where F: FnMut(&mut K, &mut V) -> bool, + Sz: Size, + { + // Like Vec::retain in self.entries; for each removed key-value pair, + // we clear its corresponding spot in self.indices, and run the + // usual backward shift in self.indices. + let len = self.entries.len(); + let mut n_deleted = 0; + for i in 0..len { + let will_keep; + let hash; + { + let ent = &mut self.entries[i]; + hash = ent.hash; + will_keep = keep(&mut ent.key, &mut ent.value); + }; + let probe = find_existing_entry_at::(&self.indices, hash, self.mask, i); + if !will_keep { + n_deleted += 1; + self.indices[probe] = Pos::none(); + self.backward_shift_after_removal::(probe); + } else if n_deleted > 0 { + self.indices[probe].set_pos::(i - n_deleted); + self.entries.swap(i - n_deleted, i); + } + } + self.entries.truncate(len - n_deleted); + } + + fn sort_by(&mut self, mut compare: F) + where F: FnMut(&K, &V, &K, &V) -> Ordering, + { + // Temporarily use the hash field in a bucket to store the old index. + // Save the old hash values in `side_index`. Then we can sort + // `self.entries` in place. + let mut side_index = Vec::from_iter(enumerate(&mut self.entries).map(|(i, elt)| { + replace(&mut elt.hash, HashValue(i)).get() + })); + + self.entries.sort_by(move |ei, ej| compare(&ei.key, &ei.value, &ej.key, &ej.value)); + + // Write back the hash values from side_index and fill `side_index` with + // a mapping from the old to the new index instead. + for (i, ent) in enumerate(&mut self.entries) { + let old_index = ent.hash.get(); + ent.hash = HashValue(replace(&mut side_index[old_index], i)); + } + + // Apply new index to self.indices + dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, &side_index)); + + fn apply_new_index(indices: &mut [Pos], new_index: &[usize]) + where Sz: Size + { + for pos in indices { + if let Some((i, _)) = pos.resolve::() { + pos.set_pos::(new_index[i]); + } + } + } + } +} + +/// Find, in the indices, an entry that already exists at a known position +/// inside self.entries in the IndexMap. +/// +/// This is effectively reverse lookup, from the entries into the hash buckets. +/// +/// Return the probe index (into self.indices) +/// +/// + indices: The self.indices of the map, +/// + hash: The full hash value from the bucket +/// + mask: self.mask. +/// + entry_index: The index of the entry in self.entries +fn find_existing_entry_at(indices: &[Pos], hash: HashValue, + mask: usize, entry_index: usize) -> usize + where Sz: Size, +{ + let mut probe = desired_pos(mask, hash); + probe_loop!(probe < indices.len(), { + // the entry *must* be present; if we hit a Pos::none this was not true + // and there is a debug assertion in resolve_existing_index for that. + let i = indices[probe].resolve_existing_index::(); + if i == entry_index { return probe; } + }); +} + +use std::slice::Iter as SliceIter; +use std::slice::IterMut as SliceIterMut; +use std::vec::IntoIter as VecIntoIter; + +pub struct Keys<'a, K: 'a, V: 'a> { + pub(crate) iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + iterator_methods!(Bucket::key_ref); +} + +impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { + fn next_back(&mut self) -> Option<&'a K> { + self.iter.next_back().map(Bucket::key_ref) + } +} + +impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct Values<'a, K: 'a, V: 'a> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + iterator_methods!(Bucket::value_ref); +} + +impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::value_ref) + } +} + +impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct ValuesMut<'a, K: 'a, V: 'a> { + iter: SliceIterMut<'a, Bucket>, +} + +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + iterator_methods!(Bucket::value_mut); +} + +impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::value_mut) + } +} + +impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct Iter<'a, K: 'a, V: 'a> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + iterator_methods!(Bucket::refs); +} + +impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::refs) + } +} + +impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct IterMut<'a, K: 'a, V: 'a> { + iter: SliceIterMut<'a, Bucket>, +} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + iterator_methods!(Bucket::ref_mut); +} + +impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::ref_mut) + } +} + +impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct IntoIter { + pub(crate) iter: VecIntoIter>, +} + +impl Iterator for IntoIter { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl<'a, K, V> DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::key_value) + } +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct Drain<'a, K, V> where K: 'a, V: 'a { + pub(crate) iter: ::std::vec::Drain<'a, Bucket> +} + +impl<'a, K, V> Iterator for Drain<'a, K, V> { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl<'a, K, V> DoubleEndedIterator for Drain<'a, K, V> { + double_ended_iterator_methods!(Bucket::key_value); +} + + +impl<'a, K, V, S> IntoIterator for &'a IndexMap + where K: Hash + Eq, + S: BuildHasher, +{ + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut IndexMap + where K: Hash + Eq, + S: BuildHasher, +{ + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for IndexMap + where K: Hash + Eq, + S: BuildHasher, +{ + type Item = (K, V); + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.core.entries.into_iter(), + } + } +} + +use std::ops::{Index, IndexMut}; + +impl<'a, K, V, Q: ?Sized, S> Index<&'a Q> for IndexMap + where Q: Hash + Equivalent, + K: Hash + Eq, + S: BuildHasher, +{ + type Output = V; + + /// ***Panics*** if `key` is not present in the map. + fn index(&self, key: &'a Q) -> &V { + if let Some(v) = self.get(key) { + v + } else { + panic!("IndexMap: key not found") + } + } +} + +/// Mutable indexing allows changing / updating values of key-value +/// pairs that are already present. +/// +/// You can **not** insert new pairs with index syntax, use `.insert()`. +impl<'a, K, V, Q: ?Sized, S> IndexMut<&'a Q> for IndexMap + where Q: Hash + Equivalent, + K: Hash + Eq, + S: BuildHasher, +{ + /// ***Panics*** if `key` is not present in the map. + fn index_mut(&mut self, key: &'a Q) -> &mut V { + if let Some(v) = self.get_mut(key) { + v + } else { + panic!("IndexMap: key not found") + } + } +} + +impl FromIterator<(K, V)> for IndexMap + where K: Hash + Eq, + S: BuildHasher + Default, +{ + /// Create an `IndexMap` from the sequence of key-value pairs in the + /// iterable. + /// + /// `from_iter` uses the same logic as `extend`. See + /// [`extend`](#method.extend) for more details. + fn from_iter>(iterable: I) -> Self { + let iter = iterable.into_iter(); + let (low, _) = iter.size_hint(); + let mut map = Self::with_capacity_and_hasher(low, <_>::default()); + map.extend(iter); + map + } +} + +impl Extend<(K, V)> for IndexMap + where K: Hash + Eq, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// This is equivalent to calling [`insert`](#method.insert) for each of + /// them in order, which means that for keys that already existed + /// in the map, their value is updated but it keeps the existing order. + /// + /// New keys are inserted inserted in the order in the sequence. If + /// equivalents of a key occur more than once, the last corresponding value + /// prevails. + fn extend>(&mut self, iterable: I) { + for (k, v) in iterable { self.insert(k, v); } + } +} + +impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap + where K: Hash + Eq + Copy, + V: Copy, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// See the first extend method for more details. + fn extend>(&mut self, iterable: I) { + self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))); + } +} + +impl Default for IndexMap + where S: BuildHasher + Default, +{ + /// Return an empty `IndexMap` + fn default() -> Self { + Self::with_capacity_and_hasher(0, S::default()) + } +} + +impl PartialEq> for IndexMap + where K: Hash + Eq, + V1: PartialEq, + S1: BuildHasher, + S2: BuildHasher +{ + fn eq(&self, other: &IndexMap) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl Eq for IndexMap + where K: Eq + Hash, + V: Eq, + S: BuildHasher +{ +} + +#[cfg(test)] +mod tests { + use super::*; + use util::enumerate; + + #[test] + fn it_works() { + let mut map = IndexMap::new(); + assert_eq!(map.is_empty(), true); + map.insert(1, ()); + map.insert(1, ()); + assert_eq!(map.len(), 1); + assert!(map.get(&1).is_some()); + assert_eq!(map.is_empty(), false); + } + + #[test] + fn new() { + let map = IndexMap::::new(); + println!("{:?}", map); + assert_eq!(map.capacity(), 0); + assert_eq!(map.len(), 0); + assert_eq!(map.is_empty(), true); + } + + #[test] + fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in enumerate(&insert) { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + println!("{:?}", map); + + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } + } + + #[test] + fn insert_2() { + let mut map = IndexMap::with_capacity(16); + + let mut keys = vec![]; + keys.extend(0..16); + keys.extend(128..267); + + for &i in &keys { + let old_map = map.clone(); + map.insert(i, ()); + for key in old_map.keys() { + if !map.get(key).is_some() { + println!("old_map: {:?}", old_map); + println!("map: {:?}", map); + panic!("did not find {} in map", key); + } + } + } + + for &i in &keys { + assert!(map.get(&i).is_some(), "did not find {}", i); + } + } + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, ()); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + for (i, k) in (0..insert.len()).zip(map.keys()) { + assert_eq!(map.get_index(i).unwrap().0, k); + } + } + + #[test] + fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + + for (i, &elt) in enumerate(&insert) { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + + println!("{:?}", map); + for &elt in &insert { + map.insert(elt * 10, elt); + } + for &elt in &insert { + map.insert(elt * 100, elt); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + map.insert(elt * 100 + i as i32, elt); + } + println!("{:?}", map); + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } + } + + #[test] + fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &key in &remove_fail { + assert!(map.swap_remove_full(&key).is_none()); + } + println!("{:?}", map); + for &key in &remove { + //println!("{:?}", map); + let index = map.get_full(&key).unwrap().0; + assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); + } + println!("{:?}", map); + + for key in &insert { + assert_eq!(map.get(key).is_some(), !remove.contains(key)); + } + assert_eq!(map.len(), insert.len() - remove.len()); + assert_eq!(map.keys().count(), insert.len() - remove.len()); + } + + #[test] + fn remove_to_empty() { + let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; + map.swap_remove(&5).unwrap(); + map.swap_remove(&4).unwrap(); + map.swap_remove(&0).unwrap(); + assert!(map.is_empty()); + } + + #[test] + fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt * 2); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and map + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let (out_map, _) = map.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_map); + } + assert_eq!(vector.len(), map.len()); + for (a, b) in vector.iter().zip(map.keys()) { + assert_eq!(a, b); + } + } + + #[test] + fn partial_eq_and_eq() { + let mut map_a = IndexMap::new(); + map_a.insert(1, "1"); + map_a.insert(2, "2"); + let mut map_b = map_a.clone(); + assert_eq!(map_a, map_b); + map_b.remove(&1); + assert_ne!(map_a, map_b); + + let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.to_owned())).collect(); + assert_ne!(map_a, map_c); + assert_ne!(map_c, map_a); + } + + #[test] + fn extend() { + let mut map = IndexMap::new(); + map.extend(vec![(&1, &2), (&3, &4)]); + map.extend(vec![(5, 6)]); + assert_eq!(map.into_iter().collect::>(), vec![(1, 2), (3, 4), (5, 6)]); + } + + #[test] + fn entry() { + let mut map = IndexMap::new(); + + map.insert(1, "1"); + map.insert(2, "2"); + { + let e = map.entry(3); + assert_eq!(e.index(), 2); + let e = e.or_insert("3"); + assert_eq!(e, &"3"); + } + + let e = map.entry(2); + assert_eq!(e.index(), 1); + assert_eq!(e.key(), &2); + match e { + Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), + Entry::Vacant(_) => panic!() + } + assert_eq!(e.or_insert("4"), &"2"); + } +} diff --git a/third_party/rust/indexmap/src/mutable_keys.rs b/third_party/rust/indexmap/src/mutable_keys.rs new file mode 100644 index 000000000000..9291f96f2802 --- /dev/null +++ b/third_party/rust/indexmap/src/mutable_keys.rs @@ -0,0 +1,71 @@ + +use std::hash::Hash; +use std::hash::BuildHasher; + +use super::{IndexMap, Equivalent}; + +pub struct PrivateMarker { } + +/// Opt-in mutable access to keys. +/// +/// These methods expose `&mut K`, mutable references to the key as it is stored +/// in the map. +/// You are allowed to modify the keys in the hashmap **if the modifcation +/// does not change the key’s hash and equality**. +/// +/// If keys are modified erronously, you can no longer look them up. +/// This is sound (memory safe) but a logical error hazard (just like +/// implementing PartialEq, Eq, or Hash incorrectly would be). +/// +/// `use` this trait to enable its methods for `IndexMap`. +pub trait MutableKeys { + type Key; + type Value; + + /// Return item index, mutable reference to key and value + fn get_full_mut2(&mut self, key: &Q) + -> Option<(usize, &mut Self::Key, &mut Self::Value)> + where Q: Hash + Equivalent; + + /// Scan through each key-value pair in the map and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + fn retain2(&mut self, keep: F) + where F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; + + /// This method is not useful in itself – it is there to “seal” the trait + /// for external implementation, so that we can add methods without + /// causing breaking changes. + fn __private_marker(&self) -> PrivateMarker; +} + +/// Opt-in mutable access to keys. +/// +/// See [`MutableKeys`](trait.MutableKeys.html) for more information. +impl MutableKeys for IndexMap + where K: Eq + Hash, + S: BuildHasher, +{ + type Key = K; + type Value = V; + fn get_full_mut2(&mut self, key: &Q) + -> Option<(usize, &mut K, &mut V)> + where Q: Hash + Equivalent, + { + self.get_full_mut2_impl(key) + } + + fn retain2(&mut self, keep: F) + where F: FnMut(&mut K, &mut V) -> bool, + { + self.retain_mut(keep) + } + + fn __private_marker(&self) -> PrivateMarker { + PrivateMarker { } + } +} diff --git a/third_party/rust/indexmap/src/serde.rs b/third_party/rust/indexmap/src/serde.rs new file mode 100644 index 000000000000..bd082a7c313d --- /dev/null +++ b/third_party/rust/indexmap/src/serde.rs @@ -0,0 +1,123 @@ + +extern crate serde; + +use self::serde::ser::{Serialize, Serializer, SerializeMap, SerializeSeq}; +use self::serde::de::{Deserialize, Deserializer, MapAccess, SeqAccess, Visitor}; + +use std::fmt::{self, Formatter}; +use std::hash::{BuildHasher, Hash}; +use std::marker::PhantomData; + +use IndexMap; + +/// Requires crate feature `"serde-1"` +impl Serialize for IndexMap + where K: Serialize + Hash + Eq, + V: Serialize, + S: BuildHasher +{ + fn serialize(&self, serializer: T) -> Result + where T: Serializer + { + let mut map_serializer = try!(serializer.serialize_map(Some(self.len()))); + for (key, value) in self { + try!(map_serializer.serialize_entry(key, value)); + } + map_serializer.end() + } +} + +struct OrderMapVisitor(PhantomData<(K, V, S)>); + +impl<'de, K, V, S> Visitor<'de> for OrderMapVisitor + where K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher +{ + type Value = IndexMap; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "a map") + } + + fn visit_map(self, mut map: A) -> Result + where A: MapAccess<'de> + { + let mut values = IndexMap::with_capacity_and_hasher(map.size_hint().unwrap_or(0), S::default()); + + while let Some((key, value)) = try!(map.next_entry()) { + values.insert(key, value); + } + + Ok(values) + } +} + +/// Requires crate feature `"serde-1"` +impl<'de, K, V, S> Deserialize<'de> for IndexMap + where K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher +{ + fn deserialize(deserializer: D) -> Result + where D: Deserializer<'de> + { + deserializer.deserialize_map(OrderMapVisitor(PhantomData)) + } +} + + +use IndexSet; + +/// Requires crate feature `"serde-1"` +impl Serialize for IndexSet + where T: Serialize + Hash + Eq, + S: BuildHasher +{ + fn serialize(&self, serializer: Se) -> Result + where Se: Serializer + { + let mut set_serializer = try!(serializer.serialize_seq(Some(self.len()))); + for value in self { + try!(set_serializer.serialize_element(value)); + } + set_serializer.end() + } +} + +struct OrderSetVisitor(PhantomData<(T, S)>); + +impl<'de, T, S> Visitor<'de> for OrderSetVisitor + where T: Deserialize<'de> + Eq + Hash, + S: Default + BuildHasher +{ + type Value = IndexSet; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "a set") + } + + fn visit_seq(self, mut seq: A) -> Result + where A: SeqAccess<'de> + { + let mut values = IndexSet::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), S::default()); + + while let Some(value) = try!(seq.next_element()) { + values.insert(value); + } + + Ok(values) + } +} + +/// Requires crate feature `"serde-1"` +impl<'de, T, S> Deserialize<'de> for IndexSet + where T: Deserialize<'de> + Eq + Hash, + S: Default + BuildHasher +{ + fn deserialize(deserializer: D) -> Result + where D: Deserializer<'de> + { + deserializer.deserialize_seq(OrderSetVisitor(PhantomData)) + } +} diff --git a/third_party/rust/indexmap/src/set.rs b/third_party/rust/indexmap/src/set.rs new file mode 100644 index 000000000000..e75f5169cfd0 --- /dev/null +++ b/third_party/rust/indexmap/src/set.rs @@ -0,0 +1,1135 @@ +//! A hash set implemented using `IndexMap` + +use std::cmp::Ordering; +use std::collections::hash_map::RandomState; +use std::fmt; +use std::iter::{FromIterator, Chain}; +use std::hash::{Hash, BuildHasher}; +use std::ops::RangeFull; +use std::ops::{BitAnd, BitOr, BitXor, Sub}; +use std::slice; +use std::vec; + +use super::{IndexMap, Equivalent}; + +type Bucket = super::Bucket; + +/// A hash set where the iteration order of the values is independent of their +/// hash values. +/// +/// The interface is closely compatible with the standard `HashSet`, but also +/// has additional features. +/// +/// # Order +/// +/// The values have a consistent order that is determined by the sequence of +/// insertion and removal calls on the set. The order does not depend on the +/// values or the hash function at all. Note that insertion order and value +/// are not affected if a re-insertion is attempted once an element is +/// already present. +/// +/// All iterators traverse the set *in order*. Set operation iterators like +/// `union` produce a concatenated order, as do their matching "bitwise" +/// operators. See their documentation for specifics. +/// +/// The insertion order is preserved, with **notable exceptions** like the +/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of +/// course result in a new order, depending on the sorting order. +/// +/// # Indices +/// +/// The values are indexed in a compact range without holes in the range +/// `0..self.len()`. For example, the method `.get_full` looks up the index for +/// a value, and the method `.get_index` looks up the value by index. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexSet; +/// +/// // Collects which letters appear in a sentence. +/// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect(); +/// +/// assert!(letters.contains(&'s')); +/// assert!(letters.contains(&'t')); +/// assert!(letters.contains(&'u')); +/// assert!(!letters.contains(&'y')); +/// ``` +#[derive(Clone)] +pub struct IndexSet { + map: IndexMap, +} + +impl fmt::Debug for IndexSet + where T: fmt::Debug + Hash + Eq, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if cfg!(not(feature = "test_debug")) { + f.debug_set().entries(self.iter()).finish() + } else { + // Let the inner `IndexMap` print all of its details + f.debug_struct("IndexSet").field("map", &self.map).finish() + } + } +} + +impl IndexSet { + /// Create a new set. (Does not allocate.) + pub fn new() -> Self { + IndexSet { map: IndexMap::new() } + } + + /// Create a new set with capacity for `n` elements. + /// (Does not allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity(n: usize) -> Self { + IndexSet { map: IndexMap::with_capacity(n) } + } +} + +impl IndexSet { + /// Create a new set with capacity for `n` elements. + /// (Does not allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self + where S: BuildHasher + { + IndexSet { map: IndexMap::with_capacity_and_hasher(n, hash_builder) } + } + + /// Return the number of elements in the set. + /// + /// Computes in **O(1)** time. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns true if the set contains no elements. + /// + /// Computes in **O(1)** time. + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Create a new set with `hash_builder` + pub fn with_hasher(hash_builder: S) -> Self + where S: BuildHasher + { + IndexSet { map: IndexMap::with_hasher(hash_builder) } + } + + /// Return a reference to the set's `BuildHasher`. + pub fn hasher(&self) -> &S + where S: BuildHasher + { + self.map.hasher() + } + + /// Computes in **O(1)** time. + pub fn capacity(&self) -> usize { + self.map.capacity() + } +} + +impl IndexSet + where T: Hash + Eq, + S: BuildHasher, +{ + /// Remove all elements in the set, while preserving its capacity. + /// + /// Computes in **O(n)** time. + pub fn clear(&mut self) { + self.map.clear(); + } + + /// FIXME Not implemented fully yet + pub fn reserve(&mut self, additional: usize) { + self.map.reserve(additional); + } + + /// Insert the value into the set. + /// + /// If an equivalent item already exists in the set, it returns + /// `false` leaving the original value in the set and without + /// altering its insertion order. Otherwise, it inserts the new + /// item and returns `true`. + /// + /// Computes in **O(1)** time (amortized average). + pub fn insert(&mut self, value: T) -> bool { + self.map.insert(value, ()).is_none() + } + + /// Return an iterator over the values of the set, in their order + pub fn iter(&self) -> Iter { + Iter { + iter: self.map.keys().iter + } + } + + /// Return an iterator over the values that are in `self` but not `other`. + /// + /// Values are produced in the same order that they appear in `self`. + pub fn difference<'a, S2>(&'a self, other: &'a IndexSet) -> Difference<'a, T, S2> + where S2: BuildHasher + { + Difference { + iter: self.iter(), + other: other, + } + } + + /// Return an iterator over the values that are in `self` or `other`, + /// but not in both. + /// + /// Values from `self` are produced in their original order, followed by + /// values from `other` in their original order. + pub fn symmetric_difference<'a, S2>(&'a self, other: &'a IndexSet) + -> SymmetricDifference<'a, T, S, S2> + where S2: BuildHasher + { + SymmetricDifference { + iter: self.difference(other).chain(other.difference(self)), + } + } + + /// Return an iterator over the values that are in both `self` and `other`. + /// + /// Values are produced in the same order that they appear in `self`. + pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet) -> Intersection<'a, T, S2> + where S2: BuildHasher + { + Intersection { + iter: self.iter(), + other: other, + } + } + + /// Return an iterator over all values that are in `self` or `other`. + /// + /// Values from `self` are produced in their original order, followed by + /// values that are unique to `other` in their original order. + pub fn union<'a, S2>(&'a self, other: &'a IndexSet) -> Union<'a, T, S> + where S2: BuildHasher + { + Union { + iter: self.iter().chain(other.difference(self)), + } + } + + /// Return `true` if an equivalent to `value` exists in the set. + /// + /// Computes in **O(1)** time (average). + pub fn contains(&self, value: &Q) -> bool + where Q: Hash + Equivalent, + { + self.map.contains_key(value) + } + + /// Return a reference to the value stored in the set, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get(&self, value: &Q) -> Option<&T> + where Q: Hash + Equivalent, + { + self.map.get_full(value).map(|(_, x, &())| x) + } + + /// Return item index and value + pub fn get_full(&self, value: &Q) -> Option<(usize, &T)> + where Q: Hash + Equivalent, + { + self.map.get_full(value).map(|(i, x, &())| (i, x)) + } + + /// Adds a value to the set, replacing the existing value, if any, that is + /// equal to the given one. Returns the replaced value. + /// + /// Computes in **O(1)** time (average). + pub fn replace(&mut self, value: T) -> Option + { + use super::map::Entry::*; + + match self.map.entry(value) { + Vacant(e) => { e.insert(()); None }, + Occupied(e) => Some(e.replace_key()), + } + } + + /// FIXME Same as .swap_remove + /// + /// Computes in **O(1)** time (average). + pub fn remove(&mut self, value: &Q) -> bool + where Q: Hash + Equivalent, + { + self.swap_remove(value) + } + + /// Remove the value from the set, and return `true` if it was present. + /// + /// Like `Vec::swap_remove`, the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Return `false` if `value` was not in the set. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(&mut self, value: &Q) -> bool + where Q: Hash + Equivalent, + { + self.map.swap_remove(value).is_some() + } + + /// FIXME Same as .swap_take + /// + /// Computes in **O(1)** time (average). + pub fn take(&mut self, value: &Q) -> Option + where Q: Hash + Equivalent, + { + self.swap_take(value) + } + + /// Removes and returns the value in the set, if any, that is equal to the + /// given one. + /// + /// Like `Vec::swap_remove`, the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Return `None` if `value` was not in the set. + /// + /// Computes in **O(1)** time (average). + pub fn swap_take(&mut self, value: &Q) -> Option + where Q: Hash + Equivalent, + { + self.map.swap_remove_full(value).map(|(_, x, ())| x) + } + + /// Remove the value from the set return it and the index it had. + /// + /// Like `Vec::swap_remove`, the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Return `None` if `value` was not in the set. + pub fn swap_remove_full(&mut self, value: &Q) -> Option<(usize, T)> + where Q: Hash + Equivalent, + { + self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) + } + + /// Remove the last value + /// + /// Computes in **O(1)** time (average). + pub fn pop(&mut self) -> Option { + self.map.pop().map(|(x, ())| x) + } + + /// Scan through each value in the set and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + pub fn retain(&mut self, mut keep: F) + where F: FnMut(&T) -> bool, + { + self.map.retain(move |x, &mut ()| keep(x)) + } + + /// Sort the set’s values by their default ordering. + /// + /// See `sort_by` for details. + pub fn sort(&mut self) + where T: Ord, + { + self.map.sort_keys() + } + + /// Sort the set’s values in place using the comparison function `compare`. + /// + /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. + pub fn sort_by(&mut self, mut compare: F) + where F: FnMut(&T, &T) -> Ordering, + { + self.map.sort_by(move |a, _, b, _| compare(a, b)); + } + + /// Sort the values of the set and return a by value iterator of + /// the values with the result. + /// + /// The sort is stable. + pub fn sorted_by(self, mut cmp: F) -> IntoIter + where F: FnMut(&T, &T) -> Ordering + { + IntoIter { + iter: self.map.sorted_by(move |a, &(), b, &()| cmp(a, b)).iter, + } + } + + /// Clears the `IndexSet`, returning all values as a drain iterator. + /// Keeps the allocated memory for reuse. + pub fn drain(&mut self, range: RangeFull) -> Drain { + Drain { + iter: self.map.drain(range).iter, + } + } +} + +impl IndexSet { + /// Get a value by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time. + pub fn get_index(&self, index: usize) -> Option<&T> { + self.map.get_index(index).map(|(x, &())| x) + } + + /// Remove the key-value pair by index + /// + /// Valid indices are *0 <= index < self.len()* + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_index(&mut self, index: usize) -> Option { + self.map.swap_remove_index(index).map(|(x, ())| x) + } +} + + +pub struct IntoIter { + iter: vec::IntoIter>, +} + +impl Iterator for IntoIter { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::key) + } +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + + +pub struct Iter<'a, T: 'a> { + iter: slice::Iter<'a, Bucket>, +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + iterator_methods!(Bucket::key_ref); +} + +impl<'a, T> DoubleEndedIterator for Iter<'a, T> { + fn next_back(&mut self) -> Option { + self.iter.next_back().map(Bucket::key_ref) + } +} + +impl<'a, T> ExactSizeIterator for Iter<'a, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +pub struct Drain<'a, T: 'a> { + iter: vec::Drain<'a, Bucket>, +} + +impl<'a, T> Iterator for Drain<'a, T> { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl<'a, T> DoubleEndedIterator for Drain<'a, T> { + double_ended_iterator_methods!(Bucket::key); +} + +impl<'a, T, S> IntoIterator for &'a IndexSet + where T: Hash + Eq, + S: BuildHasher, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for IndexSet + where T: Hash + Eq, + S: BuildHasher, +{ + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.map.into_iter().iter, + } + } +} + +impl FromIterator for IndexSet + where T: Hash + Eq, + S: BuildHasher + Default, +{ + fn from_iter>(iterable: I) -> Self { + let iter = iterable.into_iter().map(|x| (x, ())); + IndexSet { map: IndexMap::from_iter(iter) } + } +} + +impl Extend for IndexSet + where T: Hash + Eq, + S: BuildHasher, +{ + fn extend>(&mut self, iterable: I) { + let iter = iterable.into_iter().map(|x| (x, ())); + self.map.extend(iter); + } +} + +impl<'a, T, S> Extend<&'a T> for IndexSet + where T: Hash + Eq + Copy, + S: BuildHasher, +{ + fn extend>(&mut self, iterable: I) { + let iter = iterable.into_iter().map(|&x| x); + self.extend(iter); + } +} + + +impl Default for IndexSet + where S: BuildHasher + Default, +{ + /// Return an empty `IndexSet` + fn default() -> Self { + IndexSet { map: IndexMap::default() } + } +} + +impl PartialEq> for IndexSet + where T: Hash + Eq, + S1: BuildHasher, + S2: BuildHasher +{ + fn eq(&self, other: &IndexSet) -> bool { + self.len() == other.len() && self.is_subset(other) + } +} + +impl Eq for IndexSet + where T: Eq + Hash, + S: BuildHasher +{ +} + +impl IndexSet + where T: Eq + Hash, + S: BuildHasher +{ + /// Returns `true` if `self` has no elements in common with `other`. + pub fn is_disjoint(&self, other: &IndexSet) -> bool + where S2: BuildHasher + { + if self.len() <= other.len() { + self.iter().all(move |value| !other.contains(value)) + } else { + other.iter().all(move |value| !self.contains(value)) + } + } + + /// Returns `true` if all elements of `self` are contained in `other`. + pub fn is_subset(&self, other: &IndexSet) -> bool + where S2: BuildHasher + { + self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) + } + + /// Returns `true` if all elements of `other` are contained in `self`. + pub fn is_superset(&self, other: &IndexSet) -> bool + where S2: BuildHasher + { + other.is_subset(self) + } +} + + +pub struct Difference<'a, T: 'a, S: 'a> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Iterator for Difference<'a, T, S> + where T: Eq + Hash, + S: BuildHasher +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl<'a, T, S> DoubleEndedIterator for Difference<'a, T, S> + where T: Eq + Hash, + S: BuildHasher +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } +} + + +pub struct Intersection<'a, T: 'a, S: 'a> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Iterator for Intersection<'a, T, S> + where T: Eq + Hash, + S: BuildHasher +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl<'a, T, S> DoubleEndedIterator for Intersection<'a, T, S> + where T: Eq + Hash, + S: BuildHasher +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if self.other.contains(item) { + return Some(item); + } + } + None + } +} + + +pub struct SymmetricDifference<'a, T: 'a, S1: 'a, S2: 'a> { + iter: Chain, Difference<'a, T, S1>>, +} + +impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> + where T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where F: FnMut(B, Self::Item) -> B + { + self.iter.fold(init, f) + } +} + +impl<'a, T, S1, S2> DoubleEndedIterator for SymmetricDifference<'a, T, S1, S2> + where T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } +} + + +pub struct Union<'a, T: 'a, S: 'a> { + iter: Chain, Difference<'a, T, S>>, +} + +impl<'a, T, S> Iterator for Union<'a, T, S> + where T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where F: FnMut(B, Self::Item) -> B + { + self.iter.fold(init, f) + } +} + +impl<'a, T, S> DoubleEndedIterator for Union<'a, T, S> + where T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } +} + + +impl<'a, 'b, T, S1, S2> BitAnd<&'b IndexSet> for &'a IndexSet + where T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set intersection, cloned into a new set. + /// + /// Values are collected in the same order that they appear in `self`. + fn bitand(self, other: &'b IndexSet) -> Self::Output { + self.intersection(other).cloned().collect() + } +} + +impl<'a, 'b, T, S1, S2> BitOr<&'b IndexSet> for &'a IndexSet + where T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set union, cloned into a new set. + /// + /// Values from `self` are collected in their original order, followed by + /// values that are unique to `other` in their original order. + fn bitor(self, other: &'b IndexSet) -> Self::Output { + self.union(other).cloned().collect() + } +} + +impl<'a, 'b, T, S1, S2> BitXor<&'b IndexSet> for &'a IndexSet + where T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set symmetric-difference, cloned into a new set. + /// + /// Values from `self` are collected in their original order, followed by + /// values from `other` in their original order. + fn bitxor(self, other: &'b IndexSet) -> Self::Output { + self.symmetric_difference(other).cloned().collect() + } +} + +impl<'a, 'b, T, S1, S2> Sub<&'b IndexSet> for &'a IndexSet + where T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set difference, cloned into a new set. + /// + /// Values are collected in the same order that they appear in `self`. + fn sub(self, other: &'b IndexSet) -> Self::Output { + self.difference(other).cloned().collect() + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use util::enumerate; + + #[test] + fn it_works() { + let mut set = IndexSet::new(); + assert_eq!(set.is_empty(), true); + set.insert(1); + set.insert(1); + assert_eq!(set.len(), 1); + assert!(set.get(&1).is_some()); + assert_eq!(set.is_empty(), false); + } + + #[test] + fn new() { + let set = IndexSet::::new(); + println!("{:?}", set); + assert_eq!(set.capacity(), 0); + assert_eq!(set.len(), 0); + assert_eq!(set.is_empty(), true); + } + + #[test] + fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in enumerate(&insert) { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + println!("{:?}", set); + + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } + } + + #[test] + fn insert_2() { + let mut set = IndexSet::with_capacity(16); + + let mut values = vec![]; + values.extend(0..16); + values.extend(128..267); + + for &i in &values { + let old_set = set.clone(); + set.insert(i); + for value in old_set.iter() { + if !set.get(value).is_some() { + println!("old_set: {:?}", old_set); + println!("set: {:?}", set); + panic!("did not find {} in set", value); + } + } + } + + for &i in &values { + assert!(set.get(&i).is_some(), "did not find {}", i); + } + } + + #[test] + fn insert_dup() { + let mut elements = vec![0, 2, 4, 6, 8]; + let mut set: IndexSet = elements.drain(..).collect(); + { + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + { + let inserted = set.insert(0); + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(inserted, false); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + } + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..insert.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } + } + + #[test] + fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + + for (i, &elt) in enumerate(&insert) { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + + println!("{:?}", set); + for &elt in &insert { + set.insert(elt * 10); + } + for &elt in &insert { + set.insert(elt * 100); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + set.insert(elt * 100 + i as i32); + } + println!("{:?}", set); + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } + } + + #[test] + fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &value in &remove_fail { + assert!(set.swap_remove_full(&value).is_none()); + } + println!("{:?}", set); + for &value in &remove { + //println!("{:?}", set); + let index = set.get_full(&value).unwrap().0; + assert_eq!(set.swap_remove_full(&value), Some((index, value))); + } + println!("{:?}", set); + + for value in &insert { + assert_eq!(set.get(value).is_some(), !remove.contains(value)); + } + assert_eq!(set.len(), insert.len() - remove.len()); + assert_eq!(set.iter().count(), insert.len() - remove.len()); + } + + #[test] + fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and set + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let out_set = set.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_set); + } + assert_eq!(vector.len(), set.len()); + for (a, b) in vector.iter().zip(set.iter()) { + assert_eq!(a, b); + } + } + + #[test] + fn partial_eq_and_eq() { + let mut set_a = IndexSet::new(); + set_a.insert(1); + set_a.insert(2); + let mut set_b = set_a.clone(); + assert_eq!(set_a, set_b); + set_b.remove(&1); + assert_ne!(set_a, set_b); + + let set_c: IndexSet<_> = set_b.into_iter().collect(); + assert_ne!(set_a, set_c); + assert_ne!(set_c, set_a); + } + + #[test] + fn extend() { + let mut set = IndexSet::new(); + set.extend(vec![&1, &2, &3, &4]); + set.extend(vec![5, 6]); + assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn comparisons() { + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).collect(); + + assert!(!set_a.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_a)); + assert!(set_a.is_superset(&set_a)); + + assert!(set_a.is_disjoint(&set_b)); + assert!(set_b.is_disjoint(&set_a)); + assert!(!set_a.is_subset(&set_b)); + assert!(!set_b.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_b)); + assert!(!set_b.is_superset(&set_a)); + + assert!(!set_a.is_disjoint(&set_c)); + assert!(!set_c.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_c)); + assert!(!set_c.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_c)); + assert!(set_c.is_superset(&set_a)); + + assert!(!set_c.is_disjoint(&set_d)); + assert!(!set_d.is_disjoint(&set_c)); + assert!(!set_c.is_subset(&set_d)); + assert!(!set_d.is_subset(&set_c)); + assert!(!set_c.is_superset(&set_d)); + assert!(!set_d.is_superset(&set_c)); + } + + #[test] + fn iter_comparisons() { + use std::iter::empty; + + fn check<'a, I1, I2>(iter1: I1, iter2: I2) + where I1: Iterator, + I2: Iterator, + { + assert!(iter1.cloned().eq(iter2)); + } + + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + check(set_a.difference(&set_a), empty()); + check(set_a.symmetric_difference(&set_a), empty()); + check(set_a.intersection(&set_a), 0..3); + check(set_a.union(&set_a), 0..3); + + check(set_a.difference(&set_b), 0..3); + check(set_b.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_b), 0..6); + check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); + check(set_a.intersection(&set_b), empty()); + check(set_b.intersection(&set_a), empty()); + check(set_a.union(&set_b), 0..6); + check(set_b.union(&set_a), (3..6).chain(0..3)); + + check(set_a.difference(&set_c), empty()); + check(set_c.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_c), 3..6); + check(set_c.symmetric_difference(&set_a), 3..6); + check(set_a.intersection(&set_c), 0..3); + check(set_c.intersection(&set_a), 0..3); + check(set_a.union(&set_c), 0..6); + check(set_c.union(&set_a), 0..6); + + check(set_c.difference(&set_d), 0..3); + check(set_d.difference(&set_c), (6..9).rev()); + check(set_c.symmetric_difference(&set_d), (0..3).chain((6..9).rev())); + check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); + check(set_c.intersection(&set_d), 3..6); + check(set_d.intersection(&set_c), (3..6).rev()); + check(set_c.union(&set_d), (0..6).chain((6..9).rev())); + check(set_d.union(&set_c), (3..9).rev().chain(0..3)); + } + + #[test] + fn ops() { + let empty = IndexSet::::new(); + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + assert_eq!(&set_a & &set_a, set_a); + assert_eq!(&set_a | &set_a, set_a); + assert_eq!(&set_a ^ &set_a, empty); + assert_eq!(&set_a - &set_a, empty); + + assert_eq!(&set_a & &set_b, empty); + assert_eq!(&set_b & &set_a, empty); + assert_eq!(&set_a | &set_b, set_c); + assert_eq!(&set_b | &set_a, set_c); + assert_eq!(&set_a ^ &set_b, set_c); + assert_eq!(&set_b ^ &set_a, set_c); + assert_eq!(&set_a - &set_b, set_a); + assert_eq!(&set_b - &set_a, set_b); + + assert_eq!(&set_a & &set_c, set_a); + assert_eq!(&set_c & &set_a, set_a); + assert_eq!(&set_a | &set_c, set_c); + assert_eq!(&set_c | &set_a, set_c); + assert_eq!(&set_a ^ &set_c, set_b); + assert_eq!(&set_c ^ &set_a, set_b); + assert_eq!(&set_a - &set_c, empty); + assert_eq!(&set_c - &set_a, set_b); + + assert_eq!(&set_c & &set_d, set_b); + assert_eq!(&set_d & &set_c, set_b); + assert_eq!(&set_c | &set_d, &set_a | &set_d); + assert_eq!(&set_d | &set_c, &set_a | &set_d); + assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_c - &set_d, set_a); + assert_eq!(&set_d - &set_c, &set_d - &set_b); + } +} diff --git a/third_party/rust/indexmap/src/util.rs b/third_party/rust/indexmap/src/util.rs new file mode 100644 index 000000000000..90d3e7e33491 --- /dev/null +++ b/third_party/rust/indexmap/src/util.rs @@ -0,0 +1,17 @@ + +use std::iter::Enumerate; +use std::mem::size_of; + +pub fn third(t: (A, B, C)) -> C { t.2 } + +pub fn enumerate(iterable: I) -> Enumerate + where I: IntoIterator +{ + iterable.into_iter().enumerate() +} + +/// return the number of steps from a to b +pub fn ptrdistance(a: *const T, b: *const T) -> usize { + debug_assert!(a as usize <= b as usize); + (b as usize - a as usize) / size_of::() +} diff --git a/third_party/rust/indexmap/tests/equivalent_trait.rs b/third_party/rust/indexmap/tests/equivalent_trait.rs new file mode 100644 index 000000000000..8b79e20a19b2 --- /dev/null +++ b/third_party/rust/indexmap/tests/equivalent_trait.rs @@ -0,0 +1,55 @@ + +#[macro_use] extern crate indexmap; + +use indexmap::Equivalent; + +use std::hash::Hash; + +#[derive(Debug, Hash)] +pub struct Pair(pub A, pub B); + +impl PartialEq<(A, B)> for Pair + where C: PartialEq, + D: PartialEq, +{ + fn eq(&self, rhs: &(A, B)) -> bool { + self.0 == rhs.0 && + self.1 == rhs.1 && + true + } +} + +impl Equivalent for Pair + where Pair: PartialEq, + A: Hash + Eq, + B: Hash + Eq, +{ + fn equivalent(&self, other: &X) -> bool { + *self == *other + } +} + +#[test] +fn test_lookup() { + let s = String::from; + let map = indexmap! { + (s("a"), s("b")) => 1, + (s("a"), s("x")) => 2, + }; + + assert!(map.contains_key(&Pair("a", "b"))); + assert!(!map.contains_key(&Pair("b", "a"))); +} + +#[test] +fn test_string_str() { + let s = String::from; + let mut map = indexmap! { + s("a") => 1, s("b") => 2, + s("x") => 3, s("y") => 4, + }; + + assert!(map.contains_key("a")); + assert!(!map.contains_key("z")); + assert_eq!(map.remove("b"), Some(2)); +} diff --git a/third_party/rust/indexmap/tests/quick.rs b/third_party/rust/indexmap/tests/quick.rs new file mode 100644 index 000000000000..14f267cff194 --- /dev/null +++ b/third_party/rust/indexmap/tests/quick.rs @@ -0,0 +1,365 @@ + +extern crate indexmap; +extern crate itertools; +#[macro_use] +extern crate quickcheck; + +extern crate fnv; + +use indexmap::IndexMap; +use itertools::Itertools; + +use quickcheck::Arbitrary; +use quickcheck::Gen; + +use fnv::FnvHasher; +use std::hash::{BuildHasher, BuildHasherDefault}; +type FnvBuilder = BuildHasherDefault; +type OrderMapFnv = IndexMap; + +use std::collections::HashSet; +use std::collections::HashMap; +use std::iter::FromIterator; +use std::hash::Hash; +use std::fmt::Debug; +use std::ops::Deref; +use std::cmp::min; + + +use indexmap::map::Entry as OEntry; +use std::collections::hash_map::Entry as HEntry; + + +fn set<'a, T: 'a, I>(iter: I) -> HashSet + where I: IntoIterator, + T: Copy + Hash + Eq +{ + iter.into_iter().cloned().collect() +} + +fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap + where I: IntoIterator, + T: Copy + Hash + Eq, +{ + IndexMap::from_iter(iter.into_iter().cloned().map(|k| (k, ()))) +} + +quickcheck! { + fn contains(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + insert.iter().all(|&key| map.get(&key).is_some()) + } + + fn contains_not(insert: Vec, not: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let nots = &set(¬) - &set(&insert); + nots.iter().all(|&key| map.get(&key).is_none()) + } + + fn insert_remove(insert: Vec, remove: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + for &key in &remove { + map.swap_remove(&key); + } + let elements = &set(&insert) - &set(&remove); + map.len() == elements.len() && map.iter().count() == elements.len() && + elements.iter().all(|k| map.get(k).is_some()) + } + + fn insertion_order(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + itertools::assert_equal(insert.iter().unique(), map.keys()); + true + } + + fn pop(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let mut pops = Vec::new(); + while let Some((key, _v)) = map.pop() { + pops.push(key); + } + pops.reverse(); + + itertools::assert_equal(insert.iter().unique(), &pops); + true + } + + fn with_cap(cap: usize) -> bool { + let map: IndexMap = IndexMap::with_capacity(cap); + println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize); + map.capacity() >= cap + } + + fn drain(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let mut clone = map.clone(); + let drained = clone.drain(..); + for (key, _) in drained { + map.remove(&key); + } + map.is_empty() + } +} + +use Op::*; +#[derive(Copy, Clone, Debug)] +enum Op { + Add(K, V), + Remove(K), + AddEntry(K, V), + RemoveEntry(K), +} + +impl Arbitrary for Op + where K: Arbitrary, + V: Arbitrary, +{ + fn arbitrary(g: &mut G) -> Self { + match g.gen::() % 4 { + 0 => Add(K::arbitrary(g), V::arbitrary(g)), + 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)), + 2 => Remove(K::arbitrary(g)), + _ => RemoveEntry(K::arbitrary(g)), + } + } +} + +fn do_ops(ops: &[Op], a: &mut IndexMap, b: &mut HashMap) + where K: Hash + Eq + Clone, + V: Clone, + S: BuildHasher, +{ + for op in ops { + match *op { + Add(ref k, ref v) => { + a.insert(k.clone(), v.clone()); + b.insert(k.clone(), v.clone()); + } + AddEntry(ref k, ref v) => { + a.entry(k.clone()).or_insert(v.clone()); + b.entry(k.clone()).or_insert(v.clone()); + } + Remove(ref k) => { + a.swap_remove(k); + b.remove(k); + } + RemoveEntry(ref k) => { + match a.entry(k.clone()) { + OEntry::Occupied(ent) => { ent.remove_entry(); }, + _ => { } + } + match b.entry(k.clone()) { + HEntry::Occupied(ent) => { ent.remove_entry(); }, + _ => { } + } + } + } + //println!("{:?}", a); + } +} + +fn assert_maps_equivalent(a: &IndexMap, b: &HashMap) -> bool + where K: Hash + Eq + Debug, + V: Eq + Debug, +{ + assert_eq!(a.len(), b.len()); + assert_eq!(a.iter().next().is_some(), b.iter().next().is_some()); + for key in a.keys() { + assert!(b.contains_key(key), "b does not contain {:?}", key); + } + for key in b.keys() { + assert!(a.get(key).is_some(), "a does not contain {:?}", key); + } + for key in a.keys() { + assert_eq!(a[key], b[key]); + } + true +} + +quickcheck! { + fn operations_i8(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + assert_maps_equivalent(&map, &reference) + } + + fn operations_string(ops: Vec>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + assert_maps_equivalent(&map, &reference) + } + + fn keys_values(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + let mut visit = IndexMap::new(); + for (k, v) in map.keys().zip(map.values()) { + assert_eq!(&map[k], v); + assert!(!visit.contains_key(k)); + visit.insert(*k, *v); + } + assert_eq!(visit.len(), reference.len()); + true + } + + fn keys_values_mut(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + let mut visit = IndexMap::new(); + let keys = Vec::from_iter(map.keys().cloned()); + for (k, v) in keys.iter().zip(map.values_mut()) { + assert_eq!(&reference[k], v); + assert!(!visit.contains_key(k)); + visit.insert(*k, *v); + } + assert_eq!(visit.len(), reference.len()); + true + } + + fn equality(ops1: Vec>, removes: Vec) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops1, &mut map, &mut reference); + let mut ops2 = ops1.clone(); + for &r in &removes { + if !ops2.is_empty() { + let i = r % ops2.len(); + ops2.remove(i); + } + } + let mut map2 = OrderMapFnv::default(); + let mut reference2 = HashMap::new(); + do_ops(&ops2, &mut map2, &mut reference2); + assert_eq!(map == map2, reference == reference2); + true + } + + fn retain_ordered(keys: Large>, remove: Large>) -> () { + let mut map = indexmap(keys.iter()); + let initial_map = map.clone(); // deduplicated in-order input + let remove_map = indexmap(remove.iter()); + let keys_s = set(keys.iter()); + let remove_s = set(remove.iter()); + let answer = &keys_s - &remove_s; + map.retain(|k, _| !remove_map.contains_key(k)); + + // check the values + assert_eq!(map.len(), answer.len()); + for key in &answer { + assert!(map.contains_key(key)); + } + // check the order + itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k))); + } + + fn sort_1(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + let mut answer = keyvals.0; + answer.sort_by_key(|t| t.0); + + // reverse dedup: Because IndexMap::from_iter keeps the last value for + // identical keys + answer.reverse(); + answer.dedup_by_key(|t| t.0); + answer.reverse(); + + map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2)); + + // check it contains all the values it should + for &(key, val) in &answer { + assert_eq!(map[&key], val); + } + + // check the order + + let mapv = Vec::from_iter(map); + assert_eq!(answer, mapv); + + } + + fn sort_2(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2)); + assert_sorted_by_key(map, |t| t.1); + } +} + +fn assert_sorted_by_key(iterable: I, key: Key) + where I: IntoIterator, + I::Item: Ord + Clone + Debug, + Key: Fn(&I::Item) -> X, + X: Ord, +{ + let input = Vec::from_iter(iterable); + let mut sorted = input.clone(); + sorted.sort_by_key(key); + assert_eq!(input, sorted); +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct Alpha(String); + +impl Deref for Alpha { + type Target = String; + fn deref(&self) -> &String { &self.0 } +} + +const ALPHABET: &'static [u8] = b"abcdefghijklmnopqrstuvwxyz"; + +impl Arbitrary for Alpha { + fn arbitrary(g: &mut G) -> Self { + let len = g.next_u32() % g.size() as u32; + let len = min(len, 16); + Alpha((0..len).map(|_| { + ALPHABET[g.next_u32() as usize % ALPHABET.len()] as char + }).collect()) + } + + fn shrink(&self) -> Box> { + Box::new((**self).shrink().map(Alpha)) + } +} + +/// quickcheck Arbitrary adaptor -- make a larger vec +#[derive(Clone, Debug)] +struct Large(T); + +impl Deref for Large { + type Target = T; + fn deref(&self) -> &T { &self.0 } +} + +impl Arbitrary for Large> + where T: Arbitrary +{ + fn arbitrary(g: &mut G) -> Self { + let len = g.next_u32() % (g.size() * 10) as u32; + Large((0..len).map(|_| T::arbitrary(g)).collect()) + } + + fn shrink(&self) -> Box> { + Box::new((**self).shrink().map(Large)) + } +} diff --git a/third_party/rust/indexmap/tests/serde.rs b/third_party/rust/indexmap/tests/serde.rs new file mode 100644 index 000000000000..dbb23573e4c2 --- /dev/null +++ b/third_party/rust/indexmap/tests/serde.rs @@ -0,0 +1,59 @@ +#![cfg(feature = "serde-1")] + +#[macro_use] +extern crate indexmap; +extern crate serde_test; +extern crate fnv; + +use serde_test::{Token, assert_tokens}; + +#[test] +fn test_serde() { + let map = indexmap! { 1 => 2, 3 => 4 }; + assert_tokens(&map, + &[Token::Map { len: Some(2) }, + Token::I32(1), + Token::I32(2), + Token::I32(3), + Token::I32(4), + Token::MapEnd]); +} + +#[test] +fn test_serde_set() { + let set = indexset! { 1, 2, 3, 4 }; + assert_tokens(&set, + &[Token::Seq { len: Some(4) }, + Token::I32(1), + Token::I32(2), + Token::I32(3), + Token::I32(4), + Token::SeqEnd]); +} + +#[test] +fn test_serde_fnv_hasher() { + let mut map: ::indexmap::IndexMap = Default::default(); + map.insert(1, 2); + map.insert(3, 4); + assert_tokens(&map, + &[Token::Map { len: Some(2) }, + Token::I32(1), + Token::I32(2), + Token::I32(3), + Token::I32(4), + Token::MapEnd]); +} + +#[test] +fn test_serde_map_fnv_hasher() { + let mut set: ::indexmap::IndexSet = Default::default(); + set.extend(1..5); + assert_tokens(&set, + &[Token::Seq { len: Some(4) }, + Token::I32(1), + Token::I32(2), + Token::I32(3), + Token::I32(4), + Token::SeqEnd]); +} diff --git a/third_party/rust/indexmap/tests/tests.rs b/third_party/rust/indexmap/tests/tests.rs new file mode 100644 index 000000000000..4a7f4db7a493 --- /dev/null +++ b/third_party/rust/indexmap/tests/tests.rs @@ -0,0 +1,32 @@ + +#[macro_use] +extern crate indexmap; +extern crate itertools; + + +#[test] +fn test_sort() { + let m = indexmap! { + 1 => 2, + 7 => 1, + 2 => 2, + 3 => 3, + }; + + itertools::assert_equal(m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), + vec![(7, 1), (1, 2), (2, 2), (3, 3)]); +} + + +#[test] +fn test_sort_set() { + let s = indexset! { + 1, + 7, + 2, + 3, + }; + + itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), + vec![1, 2, 3, 7]); +} diff --git a/third_party/rust/iovec/.cargo-checksum.json b/third_party/rust/iovec/.cargo-checksum.json index faab3d0d2072..2c854870c345 100644 --- a/third_party/rust/iovec/.cargo-checksum.json +++ b/third_party/rust/iovec/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"647d4a5498a3bef693b45603933e604b6ccf7aba8823a33b764ca1833797768b","Cargo.toml":"1c82b292358c72b779205f71efaef3fa343b42143e2cd1845fc44ffd95527a77","LICENSE-APACHE":"01b5abb4a95cc87b220efbd67a1e99c74bef3d744806dd44b4d57e81db814962","LICENSE-MIT":"d4784f55731ba75b77ad73a52808914b26b2f93b69dd4c03249528a75afbd946","README.md":"247302d4c1dc621f150bc06fc0d37f7ad5a4f2dcf1aafe25f8dfe8eb4fe35921","appveyor.yml":"8c309c2779904317005c7f7404470daf2aad344571168a37da214e37833be2a9","src/lib.rs":"aab60277edb10e3b93a5f1a307054fd78c263f3a597b5088e5d7280378c7b028","src/sys/mod.rs":"4c3765602032675d6d236a25b99c00f20515f7e86b7f8afa3148aeaaef58def1","src/sys/unix.rs":"bbf6c36a4a4d48342581ae6c17f8d7ef95d22f4958cf71193429ce53ec4555c2","src/sys/windows.rs":"f0690442f4842b0f0e8fc34739397f0dca8912fde424563d8540d954868f64c7","src/unix.rs":"76e76333e31dd53d1ea6704a880f4188014af09fe8be3cecd5239003b2a1fe7c","src/windows.rs":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"package":"29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be"} \ No newline at end of file +{"files":{".travis.yml":"9134f9b0ca77f5646a3d0871a95c6aa4a9c778e37326a3dd87686fcb866c4636","CHANGELOG.md":"6f1bd582e0ed8f66e723cca4deabfc23fed4455e7469eaa0b51633ef88a14af6","Cargo.toml":"ac8f3603c5e24b4326aea402d8199c21d0c74602ef2c99a6befe210bbb63ed1a","LICENSE-APACHE":"01b5abb4a95cc87b220efbd67a1e99c74bef3d744806dd44b4d57e81db814962","LICENSE-MIT":"d4784f55731ba75b77ad73a52808914b26b2f93b69dd4c03249528a75afbd946","README.md":"247302d4c1dc621f150bc06fc0d37f7ad5a4f2dcf1aafe25f8dfe8eb4fe35921","appveyor.yml":"8c309c2779904317005c7f7404470daf2aad344571168a37da214e37833be2a9","src/lib.rs":"5f2e0b694ef4a24f83dbb9f73b7572d4b59478d9bb8cd724bbd982cd3b4e00df","src/sys/mod.rs":"b19af7b93fd8d7a6f46234e0bb93ddbb12b175e57eff9cb0c0c195ea89ec56b4","src/sys/unix.rs":"8265b65eee3672c96460f4aae8b7b61179e17c8ca80646be3994bc05951228ae","src/sys/unknown.rs":"5057c208dcb309ec1b46a76b922948358ceb727958fb8bde4908948a3890057d","src/sys/windows.rs":"90f2a0b93d2b322fb991daacb39e5ac7cef6dd90dac2de7660698c6097ec0c88","src/unix.rs":"76e76333e31dd53d1ea6704a880f4188014af09fe8be3cecd5239003b2a1fe7c","src/windows.rs":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"package":"dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"} \ No newline at end of file diff --git a/third_party/rust/iovec/.travis.yml b/third_party/rust/iovec/.travis.yml index 771acfb9bf1e..f76806b7df7a 100644 --- a/third_party/rust/iovec/.travis.yml +++ b/third_party/rust/iovec/.travis.yml @@ -2,18 +2,19 @@ language: rust sudo: false -rust: - - stable - -os: - - linux - - osx - matrix: include: - - os: linux - # Oldest supported Rust (this should track Mio) - rust: 1.9.0 + # Oldest supported Rust (this should track Mio) + - rust: 1.9.0 + - rust: stable + # OS X support + - rust: stable + os: osx + # WebAssembly support. + - rust: beta + script: + - rustup target add wasm32-unknown-unknown + - cargo build --target=wasm32-unknown-unknown script: - cargo build diff --git a/third_party/rust/iovec/CHANGELOG.md b/third_party/rust/iovec/CHANGELOG.md new file mode 100644 index 000000000000..0e2dfa3d1d44 --- /dev/null +++ b/third_party/rust/iovec/CHANGELOG.md @@ -0,0 +1,11 @@ +# 0.1.2 (January 26th, 2018) + +* Add support for non-windows/unix targets (#10) + +# 0.1.1 (October 5th, 2017) + +* Fix soundness bug: Assert slice lengths are always > 0 (#5) + +# 0.1.0 (March 14th, 2017) + +* Initial release diff --git a/third_party/rust/iovec/Cargo.toml b/third_party/rust/iovec/Cargo.toml index c87b8c114b3a..19fb6aac7e43 100644 --- a/third_party/rust/iovec/Cargo.toml +++ b/third_party/rust/iovec/Cargo.toml @@ -1,20 +1,28 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "iovec" -version = "0.1.0" +version = "0.1.2" authors = ["Carl Lerche "] -license = "MIT/Apache-2.0" -readme = "README.md" -keywords = ["scatter", "gather", "vectored", "io", "networking"] -repository = "https://github.com/carllerche/iovec" +description = "Portable buffer type for scatter/gather I/O operations\n" homepage = "https://github.com/carllerche/iovec" documentation = "https://docs.rs/iovec" -description = """ -Portable buffer type for scatter/gather I/O operations -""" +readme = "README.md" +keywords = ["scatter", "gather", "vectored", "io", "networking"] categories = ["network-programming", "api-bindings"] - -[target.'cfg(unix)'.dependencies] -libc = "0.2" - -[target.'cfg(windows)'.dependencies] -winapi = "0.2" +license = "MIT/Apache-2.0" +repository = "https://github.com/carllerche/iovec" +[target."cfg(unix)".dependencies.libc] +version = "0.2" +[target."cfg(windows)".dependencies.winapi] +version = "0.2" diff --git a/third_party/rust/iovec/src/lib.rs b/third_party/rust/iovec/src/lib.rs index f4e7fa75ec9a..af36e72cf674 100644 --- a/third_party/rust/iovec/src/lib.rs +++ b/third_party/rust/iovec/src/lib.rs @@ -24,14 +24,16 @@ pub const MAX_LENGTH: usize = sys::MAX_LENGTH; /// A specialized byte slice type for performing vectored I/O operations. /// -/// On all systems, the types needed to peform vectored I/O systems have the -/// same size as Rust's [slice]. However, the layout is not necessarily the +/// On all systems, the types needed to perform vectored I/O systems have the +/// same size as Rust's [`slice`]. However, the layout is not necessarily the /// same. `IoVec` provides a portable compatibility layer. /// -/// The `IoVec` behaves like like a Rust [slice], providing the same functions. +/// The `IoVec` behaves like a Rust [`slice`], providing the same functions. /// It also provides conversion functions to and from the OS specific vectored /// types. /// +/// [`slice`]: https://doc.rust-lang.org/std/primitive.slice.html +/// /// # Examples /// /// ``` @@ -47,8 +49,8 @@ pub const MAX_LENGTH: usize = sys::MAX_LENGTH; /// /// # Panics /// -/// Attempting to convert a slice longer than [`MAX_LENGTH`] to an `IoVec` will -/// result in a panic. +/// Attempting to convert a zero-length slice or a slice longer than +/// [`MAX_LENGTH`] to an `IoVec` will result in a panic. /// /// [`MAX_LENGTH`]: constant.MAX_LENGTH.html pub struct IoVec { @@ -56,6 +58,26 @@ pub struct IoVec { } impl IoVec { + pub fn from_bytes(slice: &[u8]) -> Option<&IoVec> { + if slice.len() == 0 { + return None + } + unsafe { + let iovec: &sys::IoVec = slice.into(); + Some(mem::transmute(iovec)) + } + } + + pub fn from_bytes_mut(slice: &mut [u8]) -> Option<&mut IoVec> { + if slice.len() == 0 { + return None + } + unsafe { + let iovec: &mut sys::IoVec = slice.into(); + Some(mem::transmute(iovec)) + } + } + #[deprecated(since = "0.1.0", note = "deref instead")] #[doc(hidden)] pub fn as_bytes(&self) -> &[u8] { @@ -83,35 +105,43 @@ impl ops::DerefMut for IoVec { } } +#[doc(hidden)] impl<'a> From<&'a [u8]> for &'a IoVec { fn from(bytes: &'a [u8]) -> &'a IoVec { - unsafe { - let iovec: &sys::IoVec = bytes.into(); - mem::transmute(iovec) - } + IoVec::from_bytes(bytes) + .expect("this crate accidentally accepted 0-sized slices \ + originally but this was since discovered as a soundness \ + hole, it's recommended to use the `from_bytes` \ + function instead") } } +#[doc(hidden)] impl<'a> From<&'a mut [u8]> for &'a mut IoVec { fn from(bytes: &'a mut [u8]) -> &'a mut IoVec { - unsafe { - let iovec: &mut sys::IoVec = bytes.into(); - mem::transmute(iovec) - } + IoVec::from_bytes_mut(bytes) + .expect("this crate accidentally accepted 0-sized slices \ + originally but this was since discovered as a soundness \ + hole, it's recommended to use the `from_bytes_mut` \ + function instead") } } +#[doc(hidden)] impl<'a> Default for &'a IoVec { fn default() -> Self { - let b: &[u8] = Default::default(); - b.into() + panic!("this implementation was accidentally provided but is \ + unfortunately unsound, it's recommended to stop using \ + `IoVec::default` or construct a vector with a nonzero length"); } } +#[doc(hidden)] impl<'a> Default for &'a mut IoVec { fn default() -> Self { - let b: &mut [u8] = Default::default(); - b.into() + panic!("this implementation was accidentally provided but is \ + unfortunately unsound, it's recommended to stop using \ + `IoVec::default` or construct a vector with a nonzero length"); } } diff --git a/third_party/rust/iovec/src/sys/mod.rs b/third_party/rust/iovec/src/sys/mod.rs index e15893ef92c6..3e0efc99e876 100644 --- a/third_party/rust/iovec/src/sys/mod.rs +++ b/third_party/rust/iovec/src/sys/mod.rs @@ -15,3 +15,12 @@ pub use self::windows::{ IoVec, MAX_LENGTH, }; + +#[cfg(not(any(windows, unix)))] +mod unknown; + +#[cfg(not(any(windows, unix)))] +pub use self::unknown::{ + IoVec, + MAX_LENGTH, +}; diff --git a/third_party/rust/iovec/src/sys/unix.rs b/third_party/rust/iovec/src/sys/unix.rs index 01e5edb21420..4dbc0674f85b 100644 --- a/third_party/rust/iovec/src/sys/unix.rs +++ b/third_party/rust/iovec/src/sys/unix.rs @@ -29,6 +29,7 @@ impl IoVec { impl<'a> From<&'a [u8]> for &'a IoVec { fn from(src: &'a [u8]) -> Self { + assert!(src.len() > 0); unsafe { mem::transmute(libc::iovec { iov_base: src.as_ptr() as *mut _, @@ -40,6 +41,7 @@ impl<'a> From<&'a [u8]> for &'a IoVec { impl<'a> From<&'a mut [u8]> for &'a mut IoVec { fn from(src: &'a mut [u8]) -> Self { + assert!(src.len() > 0); unsafe { mem::transmute(libc::iovec { iov_base: src.as_ptr() as *mut _, diff --git a/third_party/rust/iovec/src/sys/unknown.rs b/third_party/rust/iovec/src/sys/unknown.rs new file mode 100644 index 000000000000..37acedd78bc6 --- /dev/null +++ b/third_party/rust/iovec/src/sys/unknown.rs @@ -0,0 +1,57 @@ +use std::{mem, slice, usize}; + +#[derive(Clone)] +pub struct WasmIoVec { + ptr: *const u8, + len: usize, +} + +pub struct IoVec { + inner: [u8], +} + +pub const MAX_LENGTH: usize = usize::MAX; + +impl IoVec { + pub fn as_ref(&self) -> &[u8] { + unsafe { + let vec = self.iovec(); + slice::from_raw_parts(vec.ptr as *const u8, vec.len) + } + } + + pub fn as_mut(&mut self) -> &mut [u8] { + unsafe { + let vec = self.iovec(); + slice::from_raw_parts_mut(vec.ptr as *mut u8, vec.len) + } + } + + unsafe fn iovec(&self) -> WasmIoVec { + mem::transmute(&self.inner) + } +} + +impl<'a> From<&'a [u8]> for &'a IoVec { + fn from(src: &'a [u8]) -> Self { + assert!(src.len() > 0); + unsafe { + mem::transmute(WasmIoVec { + ptr: src.as_ptr() as *mut _, + len: src.len(), + }) + } + } +} + +impl<'a> From<&'a mut [u8]> for &'a mut IoVec { + fn from(src: &'a mut [u8]) -> Self { + assert!(src.len() > 0); + unsafe { + mem::transmute(WasmIoVec { + ptr: src.as_ptr() as *mut _, + len: src.len(), + }) + } + } +} diff --git a/third_party/rust/iovec/src/sys/windows.rs b/third_party/rust/iovec/src/sys/windows.rs index a442498b5f1b..ccc5f35d967e 100644 --- a/third_party/rust/iovec/src/sys/windows.rs +++ b/third_party/rust/iovec/src/sys/windows.rs @@ -29,6 +29,7 @@ impl IoVec { impl<'a> From<&'a [u8]> for &'a IoVec { fn from(src: &'a [u8]) -> Self { + assert!(src.len() > 0); assert!(src.len() <= MAX_LENGTH); unsafe { @@ -42,6 +43,7 @@ impl<'a> From<&'a [u8]> for &'a IoVec { impl<'a> From<&'a mut [u8]> for &'a mut IoVec { fn from(src: &'a mut [u8]) -> Self { + assert!(src.len() > 0); assert!(src.len() <= MAX_LENGTH); unsafe { diff --git a/third_party/rust/language-tags/.cargo-checksum.json b/third_party/rust/language-tags/.cargo-checksum.json deleted file mode 100644 index 16230f9ae295..000000000000 --- a/third_party/rust/language-tags/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"0e4e3b87c348cc5738de9bebdfcb070ac1ab2a93b81b77c074b030d9e27580c4","LICENSE":"e03e58ea9205f51989b7a50f450051b24e6516cc1f0b920222dcda992072be99","src/lib.rs":"0b839c7b379606a064a094b9973c31c634426c41719a1c0963ae8fae1676a987","tests/tests.rs":"cf9f805fdadee5e2f3c4e9e66e8184c3099f7e699d5e34b708849132f1b300ce"},"package":"a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"} \ No newline at end of file diff --git a/third_party/rust/language-tags/Cargo.toml b/third_party/rust/language-tags/Cargo.toml deleted file mode 100644 index be2e04b20534..000000000000 --- a/third_party/rust/language-tags/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "language-tags" -version = "0.2.2" -authors = ["Pyfisch "] -exclude = ["README.md", ".travis.yml", ".gitignore"] - -description = "Language tags for Rust" -license = "MIT" -repository = "https://github.com/pyfisch/rust-language-tags" -documentation = "http://pyfisch.github.io/rust-language-tags/language_tags/" -keywords = ["language", "internationalization", "translation", "http", "html"] - -[dependencies.heapsize] -version = ">=0.2.2, <0.4" -optional = true - -[dependencies.heapsize_plugin] -version = "0.1.2" -optional = true - -[features] -heap_size = ["heapsize", "heapsize_plugin"] diff --git a/third_party/rust/language-tags/LICENSE b/third_party/rust/language-tags/LICENSE deleted file mode 100644 index b1b75fa62cf5..000000000000 --- a/third_party/rust/language-tags/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Pyfisch - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/third_party/rust/language-tags/src/lib.rs b/third_party/rust/language-tags/src/lib.rs deleted file mode 100644 index 10141bef240e..000000000000 --- a/third_party/rust/language-tags/src/lib.rs +++ /dev/null @@ -1,640 +0,0 @@ -#![deny(missing_docs)] -#![cfg_attr(test, deny(warnings))] -#![cfg_attr(feature = "heap_size", feature(custom_derive, plugin))] -#![cfg_attr(feature = "heap_size", plugin(heapsize_plugin))] - -//! Language tags can be used identify human languages, scripts e.g. Latin script, countries and -//! other regions. -//! -//! Language tags are defined in [BCP47](http://tools.ietf.org/html/bcp47), an introduction is -//! ["Language tags in HTML and XML"](http://www.w3.org/International/articles/language-tags/) by -//! the W3C. They are commonly used in HTML and HTTP `Content-Language` and `Accept-Language` -//! header fields. -//! -//! This package currently supports parsing (fully conformant parser), formatting and comparing -//! language tags. -//! -//! # Examples -//! Create a simple language tag representing the French language as spoken -//! in Belgium and print it: -//! -//! ```rust -//! use language_tags::LanguageTag; -//! let mut langtag: LanguageTag = Default::default(); -//! langtag.language = Some("fr".to_owned()); -//! langtag.region = Some("BE".to_owned()); -//! assert_eq!(format!("{}", langtag), "fr-BE"); -//! ``` -//! -//! Parse a tag representing a special type of English specified by private agreement: -//! -//! ```rust -//! use language_tags::LanguageTag; -//! let langtag: LanguageTag = "en-x-twain".parse().unwrap(); -//! assert_eq!(format!("{}", langtag.language.unwrap()), "en"); -//! assert_eq!(format!("{:?}", langtag.privateuse), "[\"twain\"]"); -//! ``` -//! -//! You can check for equality, but more often you should test if two tags match. -//! -//! ```rust -//! use language_tags::LanguageTag; -//! let mut langtag_server: LanguageTag = Default::default(); -//! langtag_server.language = Some("de".to_owned()); -//! langtag_server.region = Some("AT".to_owned()); -//! let mut langtag_user: LanguageTag = Default::default(); -//! langtag_user.language = Some("de".to_owned()); -//! assert!(langtag_user.matches(&langtag_server)); -//! ``` -//! -//! There is also the `langtag!` macro for creating language tags. - -#[cfg(feature = "heap_size")] -extern crate heapsize; - -use std::ascii::AsciiExt; -use std::cmp::Ordering; -use std::collections::{BTreeMap, BTreeSet}; -use std::error::Error as ErrorTrait; -use std::fmt::{self, Display}; -use std::iter::FromIterator; - -fn is_alphabetic(s: &str) -> bool { - s.chars().all(|x| x >= 'A' && x <= 'Z' || x >= 'a' && x <= 'z') -} - -fn is_numeric(s: &str) -> bool { - s.chars().all(|x| x >= '0' && x <= '9') -} - -fn is_alphanumeric_or_dash(s: &str) -> bool { - s.chars() - .all(|x| x >= 'A' && x <= 'Z' || x >= 'a' && x <= 'z' || x >= '0' && x <= '9' || x == '-') -} - -/// Defines an Error type for langtags. -/// -/// Errors occur mainly during parsing of language tags. -#[derive(Debug, Eq, PartialEq)] -pub enum Error { - /// The same extension subtag is only allowed once in a tag before the private use part. - DuplicateExtension, - /// If an extension subtag is present, it must not be empty. - EmptyExtension, - /// If the `x` subtag is present, it must not be empty. - EmptyPrivateUse, - /// The langtag contains a char that is not A-Z, a-z, 0-9 or the dash. - ForbiddenChar, - /// A subtag fails to parse, it does not match any other subtags. - InvalidSubtag, - /// The given language subtag is invalid. - InvalidLanguage, - /// A subtag may be eight characters in length at maximum. - SubtagTooLong, - /// At maximum three extlangs are allowed, but zero to one extlangs are preferred. - TooManyExtlangs, -} - -impl ErrorTrait for Error { - fn description(&self) -> &str { - match *self { - Error::DuplicateExtension => "The same extension subtag is only allowed once in a tag", - Error::EmptyExtension => "If an extension subtag is present, it must not be empty", - Error::EmptyPrivateUse => "If the `x` subtag is present, it must not be empty", - Error::ForbiddenChar => "The langtag contains a char not allowed", - Error::InvalidSubtag => "A subtag fails to parse, it does not match any other subtags", - Error::InvalidLanguage => "The given language subtag is invalid", - Error::SubtagTooLong => "A subtag may be eight characters in length at maximum", - Error::TooManyExtlangs => "At maximum three extlangs are allowed", - } - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.description()) - } -} - -/// Result type used for this library. -pub type Result = ::std::result::Result; - -/// Contains all grandfathered tags. -pub const GRANDFATHERED: [(&'static str, Option<&'static str>); 26] = [("art-lojban", Some("jbo")), - ("cel-gaulish", None), - ("en-GB-oed", - Some("en-GB-oxendict")), - ("i-ami", Some("ami")), - ("i-bnn", Some("bnn")), - ("i-default", None), - ("i-enochian", None), - ("i-hak", Some("hak")), - ("i-klingon", Some("tlh")), - ("i-lux", Some("lb")), - ("i-mingo", None), - ("i-navajo", Some("nv")), - ("i-pwn", Some("pwn")), - ("i-tao", Some("tao")), - ("i-tay", Some("tay")), - ("i-tsu", Some("tsu")), - ("no-bok", Some("nb")), - ("no-nyn", Some("nn")), - ("sgn-BE-FR", Some("sfb")), - ("sgn-BE-NL", Some("vgt")), - ("sgn-CH-DE", Some("sgg")), - ("zh-guoyu", Some("cmn")), - ("zh-hakka", Some("hak")), - ("zh-min", None), - ("zh-min-nan", Some("nan")), - ("zh-xiang", Some("hsn"))]; - -const DEPRECATED_LANGUAGE: [(&'static str, &'static str); 53] = [("in", "id"), - ("iw", "he"), - ("ji", "yi"), - ("jw", "jv"), - ("mo", "ro"), - ("aam", "aas"), - ("adp", "dz"), - ("aue", "ktz"), - ("ayx", "nun"), - ("bjd", "drl"), - ("ccq", "rki"), - ("cjr", "mom"), - ("cka", "cmr"), - ("cmk", "xch"), - ("drh", "khk"), - ("drw", "prs"), - ("gav", "dev"), - ("gfx", "vaj"), - ("gti", "nyc"), - ("hrr", "jal"), - ("ibi", "opa"), - ("ilw", "gal"), - ("kgh", "kml"), - ("koj", "kwv"), - ("kwq", "yam"), - ("kxe", "tvd"), - ("lii", "raq"), - ("lmm", "rmx"), - ("meg", "cir"), - ("mst", "mry"), - ("mwj", "vaj"), - ("myt", "mry"), - ("nnx", "ngv"), - ("oun", "vaj"), - ("pcr", "adx"), - ("pmu", "phr"), - ("ppr", "lcq"), - ("puz", "pub"), - ("sca", "hle"), - ("thx", "oyb"), - ("tie", "ras"), - ("tkk", "twm"), - ("tlw", "weo"), - ("tnf", "prs"), - ("tsf", "taj"), - ("uok", "ema"), - ("xia", "acn"), - ("xsj", "suj"), - ("ybd", "rki"), - ("yma", "lrr"), - ("ymt", "mtm"), - ("yos", "zom"), - ("yuu", "yug")]; - -const DEPRECATED_REGION: [(&'static str, &'static str); 6] = [("BU", "MM"), - ("DD", "DE"), - ("FX", "FR"), - ("TP", "TL"), - ("YD", "YE"), - ("ZR", "CD")]; - -/// A language tag as described in [BCP47](http://tools.ietf.org/html/bcp47). -/// -/// Language tags are used to help identify languages, whether spoken, -/// written, signed, or otherwise signaled, for the purpose of -/// communication. This includes constructed and artificial languages -/// but excludes languages not intended primarily for human -/// communication, such as programming languages. -#[derive(Debug, Default, Eq, Clone)] -#[cfg_attr(feature = "heap_size", derive(HeapSizeOf))] -pub struct LanguageTag { - /// Language subtags are used to indicate the language, ignoring all - /// other aspects such as script, region or spefic invariants. - pub language: Option, - /// Extended language subtags are used to identify certain specially - /// selected languages that, for various historical and compatibility - /// reasons, are closely identified with or tagged using an existing - /// primary language subtag. - pub extlangs: Vec, - /// Script subtags are used to indicate the script or writing system - /// variations that distinguish the written forms of a language or its - /// dialects. - pub script: Option, - /// Region subtags are used to indicate linguistic variations associated - /// with or appropriate to a specific country, territory, or region. - /// Typically, a region subtag is used to indicate variations such as - /// regional dialects or usage, or region-specific spelling conventions. - /// It can also be used to indicate that content is expressed in a way - /// that is appropriate for use throughout a region, for instance, - /// Spanish content tailored to be useful throughout Latin America. - pub region: Option, - /// Variant subtags are used to indicate additional, well-recognized - /// variations that define a language or its dialects that are not - /// covered by other available subtags. - pub variants: Vec, - /// Extensions provide a mechanism for extending language tags for use in - /// various applications. They are intended to identify information that - /// is commonly used in association with languages or language tags but - /// that is not part of language identification. - pub extensions: BTreeMap>, - /// Private use subtags are used to indicate distinctions in language - /// that are important in a given context by private agreement. - pub privateuse: Vec, -} - -impl LanguageTag { - /// Matches language tags. The first language acts as a language range, the second one is used - /// as a normal language tag. None fields in the language range are ignored. If the language - /// tag has more extlangs than the range these extlangs are ignored. Matches are - /// case-insensitive. `*` in language ranges are represented using `None` values. The language - /// range `*` that matches language tags is created by the default language tag: - /// `let wildcard: LanguageTag = Default::default();.` - /// - /// For example the range `en-GB` matches only `en-GB` and `en-Arab-GB` but not `en`. - /// The range `en` matches all language tags starting with `en` including `en`, `en-GB`, - /// `en-Arab` and `en-Arab-GB`. - /// - /// # Panics - /// If the language range has extensions or private use tags. - /// - /// # Examples - /// ``` - /// # #[macro_use] extern crate language_tags; - /// # fn main() { - /// let range_italian = langtag!(it); - /// let tag_german = langtag!(de); - /// let tag_italian_switzerland = langtag!(it;;;CH); - /// assert!(!range_italian.matches(&tag_german)); - /// assert!(range_italian.matches(&tag_italian_switzerland)); - /// - /// let range_spanish_brazil = langtag!(es;;;BR); - /// let tag_spanish = langtag!(es); - /// assert!(!range_spanish_brazil.matches(&tag_spanish)); - /// # } - /// ``` - pub fn matches(&self, other: &LanguageTag) -> bool { - fn matches_option(a: &Option, b: &Option) -> bool { - match (a, b) { - (&Some(ref a), &Some(ref b)) => a.eq_ignore_ascii_case(b), - (&None, _) => true, - (_, &None) => false, - } - } - fn matches_vec(a: &[String], b: &[String]) -> bool { - a.iter().zip(b.iter()).all(|(x, y)| x.eq_ignore_ascii_case(y)) - } - assert!(self.is_language_range()); - matches_option(&self.language, &other.language) && - matches_vec(&self.extlangs, &other.extlangs) && - matches_option(&self.script, &other.script) && - matches_option(&self.region, &other.region) && - matches_vec(&self.variants, &other.variants) - } - - /// Checks if it is a language range, meaning that there are no extension and privateuse tags. - pub fn is_language_range(&self) -> bool { - self.extensions.is_empty() && self.privateuse.is_empty() - } - - /// Returns the canonical version of the language tag. - /// - /// It currently applies the following steps: - /// - /// * Grandfathered tags are replaced with the canonical version if possible. - /// * Extension languages are promoted to primary language. - /// * Deprecated languages are replaced with modern equivalents. - /// * Deprecated regions are replaced with new country names. - /// * The `heploc` variant is replaced with `alalc97`. - /// - /// The returned language tags may not be completly canonical and they are - /// not validated. - pub fn canonicalize(&self) -> LanguageTag { - if let Some(ref language) = self.language { - if let Some(&(_, Some(tag))) = GRANDFATHERED.iter().find(|&&(x, _)| { - x.eq_ignore_ascii_case(&language) - }) { - return tag.parse().expect("GRANDFATHERED list must contain only valid tags."); - } - } - let mut tag = self.clone(); - if !self.extlangs.is_empty() { - tag.language = Some(self.extlangs[0].clone()); - tag.extlangs = Vec::new(); - } - if let Some(ref language) = self.language { - if let Some(&(_, l)) = DEPRECATED_LANGUAGE.iter().find(|&&(x, _)| { - x.eq_ignore_ascii_case(&language) - }) { - tag.language = Some(l.to_owned()); - }; - } - if let Some(ref region) = self.region { - if let Some(&(_, r)) = DEPRECATED_REGION.iter().find(|&&(x, _)| { - x.eq_ignore_ascii_case(®ion) - }) { - tag.region = Some(r.to_owned()); - }; - } - tag.variants = self.variants - .iter() - .map(|variant| { - if "heploc".eq_ignore_ascii_case(variant) { - "alalc97".to_owned() - } else { - variant.clone() - } - }) - .collect(); - tag - } -} - -impl PartialEq for LanguageTag { - fn eq(&self, other: &LanguageTag) -> bool { - fn eq_option(a: &Option, b: &Option) -> bool { - match (a, b) { - (&Some(ref a), &Some(ref b)) => a.eq_ignore_ascii_case(b), - (&None, &None) => true, - _ => false, - } - } - fn eq_vec(a: &[String], b: &[String]) -> bool { - a.len() == b.len() && a.iter().zip(b.iter()).all(|(x, y)| x.eq_ignore_ascii_case(y)) - } - eq_option(&self.language, &other.language) && eq_vec(&self.extlangs, &other.extlangs) && - eq_option(&self.script, &other.script) && - eq_option(&self.region, &other.region) && eq_vec(&self.variants, &other.variants) && - BTreeSet::from_iter(&self.extensions) == BTreeSet::from_iter(&other.extensions) && - self.extensions.keys().all(|a| eq_vec(&self.extensions[a], &other.extensions[a])) && - eq_vec(&self.privateuse, &other.privateuse) - } -} - -/// Handles normal tags. -/// The parser has a position from 0 to 6. Bigger positions reepresent the ASCII codes of -/// single character extensions -/// language-extlangs-script-region-variant-extension-privateuse -/// --- 0 -- -- 1 -- -- 2 - -- 3 - -- 4 -- --- x --- ---- 6 --- -fn parse_language_tag(langtag: &mut LanguageTag, t: &str) -> Result { - let mut position: u8 = 0; - for subtag in t.split('-') { - if subtag.len() > 8 { - // All subtags have a maximum length of eight characters. - return Err(Error::SubtagTooLong); - } - if position == 6 { - langtag.privateuse.push(subtag.to_owned()); - } else if subtag.eq_ignore_ascii_case("x") { - position = 6; - } else if position == 0 { - // Primary language - if subtag.len() < 2 || !is_alphabetic(subtag) { - return Err(Error::InvalidLanguage); - } - langtag.language = Some(subtag.to_owned()); - if subtag.len() < 4 { - // extlangs are only allowed for short language tags - position = 1; - } else { - position = 2; - } - } else if position == 1 && subtag.len() == 3 && is_alphabetic(subtag) { - // extlangs - langtag.extlangs.push(subtag.to_owned()); - } else if position <= 2 && subtag.len() == 4 && is_alphabetic(subtag) { - // Script - langtag.script = Some(subtag.to_owned()); - position = 3; - } else if position <= 3 && - (subtag.len() == 2 && is_alphabetic(subtag) || subtag.len() == 3 && is_numeric(subtag)) { - langtag.region = Some(subtag.to_owned()); - position = 4; - } else if position <= 4 && - (subtag.len() >= 5 && is_alphabetic(&subtag[0..1]) || - subtag.len() >= 4 && is_numeric(&subtag[0..1])) { - // Variant - langtag.variants.push(subtag.to_owned()); - position = 4; - } else if subtag.len() == 1 { - position = subtag.as_bytes()[0] as u8; - if langtag.extensions.contains_key(&position) { - return Err(Error::DuplicateExtension); - } - langtag.extensions.insert(position, Vec::new()); - } else if position > 6 { - langtag.extensions - .get_mut(&position) - .expect("no entry found for key") - .push(subtag.to_owned()); - } else { - return Err(Error::InvalidSubtag); - } - } - Ok(position) -} - -impl std::str::FromStr for LanguageTag { - type Err = Error; - fn from_str(s: &str) -> Result { - let t = s.trim(); - if !is_alphanumeric_or_dash(t) { - return Err(Error::ForbiddenChar); - } - let mut langtag: LanguageTag = Default::default(); - // Handle grandfathered tags - if let Some(&(tag, _)) = GRANDFATHERED.iter().find(|&&(x, _)| x.eq_ignore_ascii_case(t)) { - langtag.language = Some((*tag).to_owned()); - return Ok(langtag); - } - let position = try!(parse_language_tag(&mut langtag, t)); - if langtag.extensions.values().any(|x| x.is_empty()) { - // Extensions and privateuse must not be empty if present - return Err(Error::EmptyExtension); - } - if position == 6 && langtag.privateuse.is_empty() { - return Err(Error::EmptyPrivateUse); - } - if langtag.extlangs.len() > 2 { - // maximum 3 extlangs - return Err(Error::TooManyExtlangs); - } - Ok(langtag) - } -} - -impl fmt::Display for LanguageTag { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fn cmp_ignore_ascii_case(a: &u8, b: &u8) -> Ordering { - fn byte_to_uppercase(x: u8) -> u8 { - if x > 96 { - x - 32 - } else { - x - } - } - let x: u8 = byte_to_uppercase(*a); - let y: u8 = byte_to_uppercase(*b); - x.cmp(&y) - } - if let Some(ref x) = self.language { - try!(Display::fmt(&x.to_ascii_lowercase()[..], f)) - } - for x in &self.extlangs { - try!(write!(f, "-{}", x.to_ascii_lowercase())); - } - if let Some(ref x) = self.script { - let y: String = x.chars() - .enumerate() - .map(|(i, c)| { - if i == 0 { - c.to_ascii_uppercase() - } else { - c.to_ascii_lowercase() - } - }) - .collect(); - try!(write!(f, "-{}", y)); - } - if let Some(ref x) = self.region { - try!(write!(f, "-{}", x.to_ascii_uppercase())); - } - for x in &self.variants { - try!(write!(f, "-{}", x.to_ascii_lowercase())); - } - let mut extensions: Vec<(&u8, &Vec)> = self.extensions.iter().collect(); - extensions.sort_by(|&(a, _), &(b, _)| cmp_ignore_ascii_case(a, b)); - for (raw_key, values) in extensions { - let mut key = String::new(); - key.push(*raw_key as char); - try!(write!(f, "-{}", key)); - for value in values { - try!(write!(f, "-{}", value)); - } - } - if !self.privateuse.is_empty() { - if self.language.is_none() { - try!(f.write_str("x")); - } else { - try!(f.write_str("-x")); - } - for value in &self.privateuse { - try!(write!(f, "-{}", value)); - } - } - Ok(()) - } -} - -#[macro_export] -/// Utility for creating simple language tags. -/// -/// The macro supports the language, exlang, script and region parts of language tags, -/// they are separated by semicolons, omitted parts are denoted with mulitple semicolons. -/// -/// # Examples -/// * `it`: `langtag!(it)` -/// * `it-LY`: `langtag!(it;;;LY)` -/// * `it-Arab-LY`: `langtag!(it;;Arab;LY)` -/// * `ar-afb`: `langtag!(ar;afb)` -/// * `i-enochian`: `langtag!(i-enochian)` -macro_rules! langtag { - ( $language:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: Vec::new(), - script: None, - region: None, - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;;;$region:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: Vec::new(), - script: None, - region: Some(stringify!($region).to_owned()), - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;;$script:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: Vec::new(), - script: Some(stringify!($script).to_owned()), - region: None, - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;;$script:expr;$region:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: Vec::new(), - script: Some(stringify!($script).to_owned()), - region: Some(stringify!($region).to_owned()), - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;$extlangs:expr) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: vec![stringify!($extlangs).to_owned()], - script: None, - region: None, - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;$extlangs:expr;$script:expr) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: vec![stringify!($extlangs).to_owned()], - script: Some(stringify!($script).to_owned()), - region: None, - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;$extlangs:expr;;$region:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: vec![stringify!($extlangs).to_owned()], - script: None, - region: Some(stringify!($region).to_owned()), - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; - ( $language:expr;$extlangs:expr;$script:expr;$region:expr ) => { - $crate::LanguageTag { - language: Some(stringify!($language).to_owned()), - extlangs: vec![stringify!($extlangs).to_owned()], - script: Some(stringify!($script).to_owned()), - region: Some(stringify!($region).to_owned()), - variants: Vec::new(), - extensions: ::std::collections::BTreeMap::new(), - privateuse: Vec::new(), - } - }; -} diff --git a/third_party/rust/language-tags/tests/tests.rs b/third_party/rust/language-tags/tests/tests.rs deleted file mode 100644 index 5a45819a7381..000000000000 --- a/third_party/rust/language-tags/tests/tests.rs +++ /dev/null @@ -1,580 +0,0 @@ -#[macro_use] -extern crate language_tags; - -use std::ascii::AsciiExt; -use std::default::Default; -use std::collections::BTreeMap; - -use language_tags::{Error, LanguageTag, Result}; - -// All tests here may be completly nonsensical. - -#[test] -fn test_lang_from_str() { - let a: LanguageTag = "de".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("de".to_owned()); - assert_eq!(a, b); -} - -#[test] -fn test_extlang_from_str() { - let a: LanguageTag = "ar-afb".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("ar".to_owned()); - b.extlangs = vec!["afb".to_owned()]; - assert_eq!(a, b); -} - -#[test] -fn test_script_from_str() { - let a: LanguageTag = "ar-afb-Latn".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("ar".to_owned()); - b.extlangs = vec!["afb".to_owned()]; - b.script = Some("latn".to_owned()); - assert_eq!(a, b); -} - -#[test] -fn test_region_from_str() { - let mut a: LanguageTag = "ar-DE".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("ar".to_owned()); - b.region = Some("de".to_owned()); - assert_eq!(a, b); - - a = "ar-005".parse().unwrap(); - b = Default::default(); - b.language = Some("ar".to_owned()); - b.region = Some("005".to_owned()); - assert_eq!(a, b); - - a = "ar-005".parse().unwrap(); - b = Default::default(); - b.language = Some("ar".to_owned()); - b.region = Some("005".to_owned()); - assert_eq!(a, b); -} - -#[test] -fn test_variant_from_str() { - let a: LanguageTag = "sl-IT-nedis".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("sl".parse().unwrap()); - b.region = Some("it".parse().unwrap()); - b.variants = vec!["nedis".parse().unwrap()]; - assert_eq!(a, b); -} - -#[test] -fn test_invalid_from_str() { - assert_eq!("sl-07".parse::(), Err(Error::InvalidSubtag)); -} - -#[test] -fn test_strange_case_from_str() { - // This is a perfectly valid language code - let a: LanguageTag = "SL-AFB-lATN-005-nEdis".parse().unwrap(); - let b = LanguageTag { - language: Some("sl".to_owned()), - extlangs: vec!["afb".to_owned()], - script: Some("Latn".to_owned()), - region: Some("005".to_owned()), - variants: vec!["nedis".to_owned()], - extensions: BTreeMap::new(), - privateuse: Vec::new(), - }; - assert_eq!(a, b); -} - -#[test] -fn test_fmt() { - let a: LanguageTag = "ar-arb-Latn-DE-nedis-foobar".parse().unwrap(); - assert_eq!(format!("{}", a), "ar-arb-Latn-DE-nedis-foobar"); - let b: LanguageTag = "ar-arb-latn-de-nedis-foobar".parse().unwrap(); - assert_eq!(format!("{}", b), "ar-arb-Latn-DE-nedis-foobar"); - let b: LanguageTag = "AR-ARB-LATN-DE-NEDIS-FOOBAR".parse().unwrap(); - assert_eq!(format!("{}", b), "ar-arb-Latn-DE-nedis-foobar"); - let b: LanguageTag = "xx-z-foo-a-bar-F-spam-b-eggs".parse().unwrap(); - assert_eq!(format!("{}", b), "xx-a-bar-b-eggs-F-spam-z-foo"); -} - -#[test] -fn test_match() { - let de_de: LanguageTag = "de-DE".parse().unwrap(); - let de: LanguageTag = "de".parse().unwrap(); - assert!(de.matches(&de_de)); - assert!(!de_de.matches(&de)); -} - -#[test] -fn test_match_all() { - let de_de: LanguageTag = "de-DE".parse().unwrap(); - let crazy: LanguageTag = "GDJ-nHYa-bw-X-ke-rohH5GfS-LdJKsGVe".parse().unwrap(); - let wildcard: LanguageTag = Default::default(); - assert!(wildcard.matches(&de_de)); - assert!(wildcard.matches(&crazy)); -} - -#[test] -fn test_klingon() { - let a: LanguageTag = "i-klingon".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("i-klingon".to_owned()); - assert_eq!(a, b); -} - -#[test] -fn test_private_use() { - let a: LanguageTag = "es-x-foobar-AT-007".parse().unwrap(); - let mut b: LanguageTag = Default::default(); - b.language = Some("es".to_owned()); - b.privateuse = vec!["foobar".to_owned(), "AT".to_owned(), "007".to_owned()]; - assert_eq!(a, b); -} - -#[test] -fn test_unicode() { - let x: Result = "zh-x-Üńìcødê".parse(); - assert!(x.is_err()); -} - -#[test] -fn test_format() { - let x: LanguageTag = "HkgnmerM-x-e5-zf-VdDjcpz-1V6".parse().unwrap(); - assert!(format!("{}", x).eq_ignore_ascii_case("HkgnmerM-x-e5-zf-VdDjcpz-1V6")); - let y: LanguageTag = "MgxQa-ywEp-8lcW-7bvT-h-dP1Md-0h7-0Z3ir".parse().unwrap(); - assert!(format!("{}", y).eq_ignore_ascii_case("MgxQa-ywEp-8lcW-7bvT-h-dP1Md-0h7-0Z3ir")); -} - -#[test] -fn test_cmp() { - assert_eq!(langtag!(dE;;AraB;lY), langtag!(DE;;aRaB;LY)); - assert!(langtag!(dE;;AraB;lY).matches(&langtag!(DE;;aRaB;LY))); - let mut extensions = BTreeMap::new(); - extensions.insert(75 /* K */, vec!["foo".to_owned(), "bar".to_owned()]); - extensions.insert(112 /* p */, vec!["spam".to_owned(), "eggs".to_owned()]); - let langtag = LanguageTag { - language: Some("it".to_owned()), - extlangs: Vec::new(), - script: None, - region: None, - variants: Vec::new(), - extensions: extensions, - privateuse: Vec::new(), - }; - assert_eq!(langtag, langtag); -} - -#[test] -fn test_macro() { - let a1 = langtag!(it); - let a2 = LanguageTag { - language: Some("it".to_owned()), - extlangs: Vec::new(), - script: None, - region: None, - variants: Vec::new(), - extensions: BTreeMap::new(), - privateuse: Vec::new(), - }; - assert_eq!(a1, a2); - let b1 = langtag!(it;;;LY); - let b2 = LanguageTag { - language: Some("it".to_owned()), - extlangs: Vec::new(), - script: None, - region: Some("LY".to_owned()), - variants: Vec::new(), - extensions: BTreeMap::new(), - privateuse: Vec::new(), - }; - assert_eq!(b1, b2); - let c1 = langtag!(it;;Arab;LY); - let c2 = LanguageTag { - language: Some("it".to_owned()), - extlangs: Vec::new(), - script: Some("Arab".to_owned()), - region: Some("LY".to_owned()), - variants: Vec::new(), - extensions: BTreeMap::new(), - privateuse: Vec::new(), - }; - assert_eq!(c1, c2); -} - -#[test] -fn test_private_tag() { - let mut tag: LanguageTag = Default::default(); - tag.privateuse = vec!["foo".to_owned(), "bar".to_owned()]; - assert_eq!(format!("{}", tag), "x-foo-bar"); -} - -#[test] -fn test_grandfathered_tag() { - let tag_irregular: LanguageTag = "i-klingon".parse().unwrap(); - assert_eq!(tag_irregular.language.unwrap(), "i-klingon"); - let tag_regular: LanguageTag = "zh-hakka".parse().unwrap(); - assert_eq!(tag_regular.language.unwrap(), "zh-hakka"); -} - -#[test] -fn test_eq() { - let mut tag1: LanguageTag = Default::default(); - tag1.language = Some("zh".to_owned()); - let mut tag2: LanguageTag = Default::default(); - tag2.language = Some("zh".to_owned()); - tag2.script = Some("Latn".to_owned()); - assert!(tag1 != tag2); -} - -#[test] -fn test_wellformed_tags() { - // Source: http://www.langtag.net/test-suites/well-formed-tags.txt - let tags = vec![ - "fr ", - "fr-Latn", - "fr-fra", // Extended tag - "fr-Latn-FR", - "fr-Latn-419", - "fr-FR", - "ax-TZ", // Not in the registry, but well-formed - "fr-shadok", // Variant - "fr-y-myext-myext2", - "fra-Latn", // ISO 639 can be 3-letters - "fra", - "fra-FX", - "i-klingon", // grandfathered with singleton - "I-kLINgon", // tags are case-insensitive... - "no-bok", // grandfathered without singleton - "fr-Lat", // Extended", - "mn-Cyrl-MN", - "mN-cYrL-Mn", - "fr-Latn-CA", - "en-US", - "fr-Latn-CA", - "i-enochian", // Grand fathered - "x-fr-CH ", - "sr-Latn-CS", - "es-419", - "sl-nedis", - "de-CH-1996", - "de-Latg-1996", - "sl-IT-nedis", - "en-a-bbb-x-a-ccc", - "de-a-value", - "en-Latn-GB-boont-r-extended-sequence-x-private", - "en-x-US", - "az-Arab-x-AZE-derbend", - "es-Latn-CO-x-private", - "en-US-boont", - "ab-x-abc-x-abc", // anything goes after x - "ab-x-abc-a-a", // ditto", - "i-default", // grandfathered", - "i-klingon", // grandfathered", - "abcd-Latn", // Language of 4 chars reserved for future use - "AaBbCcDd-x-y-any-x", // Language of 5-8 chars, registered - "en", - "de-AT", - "es-419", - "de-CH-1901", - "sr-Cyrl", - "sr-Cyrl-CS", - "sl-Latn-IT-rozaj", - "en-US-x-twain", - "zh-cmn", - "zh-cmn-Hant", - "zh-cmn-Hant-HK", - "zh-gan", - "zh-yue-Hant-HK", - "xr-lxs-qut", // extlangS - "xr-lqt-qu", // extlang + region - "xr-p-lze", // Extension - ]; - let failed: Vec<&str> = tags.iter() - .filter(|x| x.parse::().is_err()) - .map(|&x| x) - .collect(); - println!("Number: { } Failed: {:?}", failed.len(), failed); - assert!(failed.is_empty()); -} - -#[test] -fn test_broken_tags() { - // http://www.langtag.net/test-suites/broken-tags.txt - let tags = vec![ - "f", - "f-Latn", - "fr-Latn-F", - "a-value", - "en-a-bbb-a-ccc", // 'a' appears twice - "tlh-a-b-foo", - "i-notexist", // grandfathered but not registered: always invalid - "abcdefghi-012345678", - "ab-abc-abc-abc-abc", - "ab-abcd-abc", - "ab-ab-abc", - "ab-123-abc", - "a-Hant-ZH", - "a1-Hant-ZH", - "ab-abcde-abc", - "ab-1abc-abc", - "ab-ab-abcd", - "ab-123-abcd", - "ab-abcde-abcd", - "ab-1abc-abcd", - "ab-a-b", - "ab-a-x", - "ab--ab", - "ab-abc-", - "-ab-abc", - "ab-c-abc-r-toto-c-abc # 'c' appears twice ", - "abcd-efg", - "aabbccddE", - ]; - let failed: Vec<(&str, Result)> = tags.iter() - .map(|x| (*x, x.parse::())) - .filter(|x| x.1.is_ok()) - .collect(); - println!("Number: { } Failed: {:?}", failed.len(), failed); - assert!(failed.is_empty()); -} - -#[test] -fn test_random_good_tags() { - // http://unicode.org/repos/cldr/trunk/tools/java/org/unicode/cldr/util/data/langtagTest.txt - let tags = vec!["zszLDm-sCVS-es-x-gn762vG-83-S-mlL", - "IIJdFI-cfZv", - "kbAxSgJ-685", - "tbutP", - "hDL-595", - "dUf-iUjq-0hJ4P-5YkF-WD8fk", - "FZAABA-FH", - "xZ-lh-4QfM5z9J-1eG4-x-K-R6VPr2z", - "Fyi", - "SeI-DbaG", - "ch-xwFn", - "OeC-GPVI", - "JLzvUSi", - "Fxh-hLAs", - "pKHzCP-sgaO-554", - "eytqeW-hfgH-uQ", - "ydn-zeOP-PR", - "uoWmBM-yHCf-JE", - "xwYem", - "zie", - "Re-wjSv-Ey-i-XE-E-JjWTEB8-f-DLSH-NVzLH-AtnFGWoH-SIDE", - "Ri-063-c-u6v-ZfhkToTB-C-IFfmv-XT-j-rdyYFMhK-h-pY-D5-Oh6FqBhL-hcXt-v-WdpNx71-\ - K-c74m4-eBTT7-JdH7Q1Z", - "ji", - "IM-487", - "EPZ-zwcB", - "GauwEcwo", - "kDEP", - "FwDYt-TNvo", - "ottqP-KLES-x-9-i9", - "fcflR-grQQ", - "TvFwdu-kYhs", - "WE-336", - "MgxQa-ywEp-8lcW-7bvT-h-dP1Md-0h7-0Z3ir-K-Srkm-kA-7LXM-Z-whb2MiO-2mNsvbLm-W3O\ - -4r-U-KceIxHdI-gvMVgUBV-2uRUni-J0-7C8yTK2", - "Hyr-B-evMtVoB1-mtsVZf-vQMV-gM-I-rr-kvLzg-f-lAUK-Qb36Ne-Z-7eFzOD-mv6kKf-l-miZ\ - 7U3-k-XDGtNQG", - "ybrlCpzy", - "PTow-w-cAQ51-8Xd6E-cumicgt-WpkZv3NY-q-ORYPRy-v-A4jL4A-iNEqQZZ-sjKn-W-N1F-pzy\ - c-xP5eWz-LmsCiCcZ", - "ih-DlPR-PE", - "Krf-362", - "WzaD", - "EPaOnB-gHHn", - "XYta", - "NZ-RgOO-tR", - "at-FE", - "Tpc-693", - "YFp", - "gRQrQULo", - "pVomZ-585", - "laSu-ZcAq-338", - "gCW", - "PydSwHRI-TYfF", - "zKmWDD", - "X-bCrL5RL", - "HK", - "YMKGcLY", - "GDJ-nHYa-bw-X-ke-rohH5GfS-LdJKsGVe", - "tfOxdau-yjge-489-a-oB-I8Csb-1ESaK1v-VFNz-N-FT-ZQyn-On2-I-hu-vaW3-jIQb-vg0U-h\ - Ul-h-dO6KuJqB-U-tde2L-P3gHUY-vnl5c-RyO-H-gK1-zDPu-VF1oeh8W-kGzzvBbW-yuAJZ", - "LwDux", - "Zl-072", - "Ri-Ar", - "vocMSwo-cJnr-288", - "kUWq-gWfQ-794", - "YyzqKL-273", - "Xrw-ZHwH-841-9foT-ESSZF-6OqO-0knk-991U-9p3m-b-JhiV-0Kq7Y-h-cxphLb-cDlXUBOQ-X\ - -4Ti-jty94yPp", - "en-GB-oed", - "LEuZl-so", - "HyvBvFi-cCAl-X-irMQA-Pzt-H", - "uDbsrAA-304", - "wTS", - "IWXS", - "XvDqNkSn-jRDR", - "gX-Ycbb-iLphEks-AQ1aJ5", - "FbSBz-VLcR-VL", - "JYoVQOP-Iytp", - "gDSoDGD-lq-v-7aFec-ag-k-Z4-0kgNxXC-7h", - "Bjvoayy-029", - "qSDJd", - "qpbQov", - "fYIll-516", - "GfgLyfWE-EHtB", - "Wc-ZMtk", - "cgh-VEYK", - "WRZs-AaFd-yQ", - "eSb-CpsZ-788", - "YVwFU", - "JSsHiQhr-MpjT-381", - "LuhtJIQi-JKYt", - "vVTvS-RHcP", - "SY", - "fSf-EgvQfI-ktWoG-8X5z-63PW", - "NOKcy", - "OjJb-550", - "KB", - "qzKBv-zDKk-589", - "Jr", - "Acw-GPXf-088", - "WAFSbos", - "HkgnmerM-x-e5-zf-VdDjcpz-1V6", - "UAfYflJU-uXDc-YV", - "x-CHsHx-VDcOUAur-FqagDTx-H-V0e74R", - "uZIAZ-Xmbh-pd"]; - let failed: Vec<(&str, Result)> = tags.iter() - .map(|x| (*x, x.parse::())) - .filter(|x| x.1.is_err()) - .collect(); - println!("Number: { } Failed: {:?}", failed.len(), failed); - assert!(failed.is_empty()); -} - -#[test] -fn test_random_bad_tags() { - // http://unicode.org/repos/cldr/trunk/tools/java/org/unicode/cldr/util/data/langtagTest.txt - let tags = vec!["EdY-z_H791Xx6_m_kj", - "qWt85_8S0-L_rbBDq0gl_m_O_zsAx_nRS", - "VzyL2", - "T_VFJq-L-0JWuH_u2_VW-hK-kbE", - "u-t", - "Q-f_ZVJXyc-doj_k-i", - "JWB7gNa_K-5GB-25t_W-s-ZbGVwDu1-H3E", - "b-2T-Qob_L-C9v_2CZxK86", - "fQTpX_0_4Vg_L3L_g7VtALh2", - "S-Z-E_J", - "f6wsq-02_i-F", - "9_GcUPq_G", - "QjsIy_9-0-7_Dv2yPV09_D-JXWXM", - "D_se-f-k", - "ON47Wv1_2_W", - "f-z-R_s-ha", - "N3APeiw_195_Bx2-mM-pf-Z-Ip5lXWa-5r", - "IRjxU-E_6kS_D_b1b_H", - "NB-3-5-AyW_FQ-9hB-TrRJg3JV_3C", - "yF-3a_V_FoJQAHeL_Z-Mc-u", - "n_w_bbunOG_1-s-tJMT5je", - "Q-AEWE_X", - "57b1O_k_R6MU_sb", - "hK_65J_i-o_SI-Y", - "wB4B7u_5I2_I_NZPI", - "J24Nb_q_d-zE", - "v6-dHjJmvPS_IEb-x_A-O-i", - "8_8_dl-ZgBr84u-P-E", - "nIn-xD7EVhe_C", - "5_N-6P_x7Of_Lo_6_YX_R", - "0_46Oo0sZ-YNwiU8Wr_d-M-pg1OriV", - "laiY-5", - "K-8Mdd-j_ila0sSpo_aO8_J", - "wNATtSL-Cp4_gPa_fD41_9z", - "H_FGz5V8_n6rrcoz0_1O6d-kH-7-N", - "wDOrnHU-odqJ_vWl", - "gP_qO-I-jH", - "h", - "dJ0hX-o_csBykEhU-F", - "L-Vf7_BV_eRJ5goSF_Kp", - "y-oF-chnavU-H", - "9FkG-8Q-8_v", - "W_l_NDQqI-O_SFSAOVq", - "kDG3fzXw", - "t-nsSp-7-t-mUK2", - "Yw-F", - "1-S_3_l", - "u-v_brn-Y", - "4_ft_3ZPZC5lA_D", - "n_dR-QodsqJnh_e", - "Hwvt-bSwZwj_KL-hxg0m-3_hUG", - "mQHzvcV-UL-o2O_1KhUJQo_G2_uryk3-a", - "b-UTn33HF", - "r-Ep-jY-aFM_N_H", - "K-k-krEZ0gwD_k_ua-9dm3Oy-s_v", - "XS_oS-p", - "EIx_h-zf5", - "p_z-0_i-omQCo3B", - "1_q0N_jo_9", - "0Ai-6-S", - "L-LZEp_HtW", - "Zj-A4JD_2A5Aj7_b-m3", - "x", - "p-qPuXQpp_d-jeKifB-c-7_G-X", - "X94cvJ_A", - "F2D25R_qk_W-w_Okf_kx", - "rc-f", - "D", - "gD_WrDfxmF-wu-E-U4t", - "Z_BN9O4_D9-D_0E_KnCwZF-84b-19", - "T-8_g-u-0_E", - "lXTtys9j_X_A_m-vtNiNMw_X_b-C6Nr", - "V_Ps-4Y-S", - "X5wGEA", - "mIbHFf_ALu4_Jo1Z1", - "ET-TacYx_c", - "Z-Lm5cAP_ri88-d_q_fi8-x", - "rTi2ah-4j_j_4AlxTs6m_8-g9zqncIf-N5", - "FBaLB85_u-0NxhAy-ZU_9c", - "x_j_l-5_aV95_s_tY_jp4", - "PL768_D-m7jNWjfD-Nl_7qvb_bs_8_Vg", - "9-yOc-gbh", - "6DYxZ_SL-S_Ye", - "ZCa-U-muib-6-d-f_oEh_O", - "Qt-S-o8340F_f_aGax-c-jbV0gfK_p", - "WE_SzOI_OGuoBDk-gDp", - "cs-Y_9", - "m1_uj", - "Y-ob_PT", - "li-B", - "f-2-7-9m_f8den_J_T_d", - "p-Os0dua-H_o-u", - "L", - "rby-w"]; - let failed: Vec<(&str, Result)> = tags.iter() - .map(|x| (*x, x.parse::())) - .filter(|x| x.1.is_ok()) - .collect(); - println!("Number: { } Failed: {:?}", failed.len(), failed); - assert!(failed.is_empty()); -} - -#[test] -fn test_canonicalize() { - let langtag = langtag!(en;;;BU); - assert_eq!(langtag.canonicalize().to_string(), "en-MM"); - let langtag = LanguageTag { - language: Some("sgn-BE-FR".to_owned()), - extlangs: vec![], - script: None, - region: None, - variants: vec![], - extensions: BTreeMap::new(), - privateuse: vec![], - }; - assert_eq!(langtag.canonicalize().to_string(), "sfb"); -} diff --git a/third_party/rust/lazycell-0.4.0/.cargo-checksum.json b/third_party/rust/lazycell-0.4.0/.cargo-checksum.json new file mode 100644 index 000000000000..5aa2517a8559 --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"a8defced70d220e04f77271ccced7e207d4e1417ed5e512b3dd4c8f9979e6a52","Cargo.toml":"46631e96c028ae56b797ec10524d6d9912fdd9857c3bea82957b1c394050b224","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6fc4fe1f402475d5c3f6e1b5c35407c5f489daa58bf8bb085d231909b5fac666","README.md":"fb8373bbd59d2885e119bdacf25898e0e3b98d4a97ea840c62cf967db28c61a2","src/lib.rs":"efcff18d06fdcc4bca2ead19e41b33dbc83f9c7d1591cd98206f657dad704580"},"package":"ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b"} \ No newline at end of file diff --git a/third_party/rust/lazycell-0.4.0/CHANGELOG.md b/third_party/rust/lazycell-0.4.0/CHANGELOG.md new file mode 100644 index 000000000000..3f1e820e1fc3 --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/CHANGELOG.md @@ -0,0 +1,80 @@ + +## (2016-08-17) + + +#### Breaking Changes + +* **LazyCell:** return Err(value) on full cell ([68f3415d](https://github.com/indiv0/lazycell/commit/68f3415dd5d6a66ba047a133b7028ebe4f1c5070), breaks [#](https://github.com/indiv0/lazycell/issues/)) + +#### Improvements + +* **LazyCell:** return Err(value) on full cell ([68f3415d](https://github.com/indiv0/lazycell/commit/68f3415dd5d6a66ba047a133b7028ebe4f1c5070), breaks [#](https://github.com/indiv0/lazycell/issues/)) + + + + +## (2016-08-16) + + +#### Features + +* add AtomicLazyCell which is thread-safe ([85afbd36](https://github.com/indiv0/lazycell/commit/85afbd36d8a148e14cc53654b39ddb523980124d)) + +#### Improvements + +* Use UnsafeCell instead of RefCell ([3347a8e9](https://github.com/indiv0/lazycell/commit/3347a8e97d2215a47e25c1e2fc953e8052ad8eb6)) + + + + +## (2016-04-18) + + +#### Documentation + +* put types in between backticks ([607cf939](https://github.com/indiv0/lazycell/commit/607cf939b05e35001ba3070ec7a0b17b064e7be1)) + + + + +## v0.2.0 (2016-03-28) + + +#### Features + +* **lazycell:** + * add tests for `LazyCell` struct ([38f1313d](https://github.com/indiv0/lazycell/commit/38f1313d98542ca8c98b424edfa9ba9c3975f99e), closes [#30](https://github.com/indiv0/lazycell/issues/30)) + * remove unnecessary `Default` impl ([68c16d2d](https://github.com/indiv0/lazycell/commit/68c16d2df4e9d13d5298162c06edf918246fd758)) + +#### Documentation + +* **CHANGELOG:** removed unnecessary sections ([1cc0555d](https://github.com/indiv0/lazycell/commit/1cc0555d875898a01b0832ff967aed6b40e720eb)) +* **README:** add link to documentation ([c8dc33f0](https://github.com/indiv0/lazycell/commit/c8dc33f01f2c0dc187f59ee53a2b73081053012b), closes [#13](https://github.com/indiv0/lazycell/issues/13)) + + + + +## v0.1.0 (2016-03-16) + + +#### Features + +* **lib.rs:** implement Default trait for LazyCell ([150a6304](https://github.com/indiv0/LazyCell/commit/150a6304a230ee1de8424e49c447ec1b2d6578ce)) + + + + +## v0.0.1 (2016-03-16) + + +#### Bug Fixes + +* **Cargo.toml:** loosen restrictions on Clippy version ([84dd8f96](https://github.com/indiv0/LazyCell/commit/84dd8f960000294f9dad47d776a41b98ed812981)) + +#### Features + +* add initial implementation ([4b39764a](https://github.com/indiv0/LazyCell/commit/4b39764a575bcb701dbd8047b966f72720fd18a4)) +* add initial commit ([a80407a9](https://github.com/indiv0/LazyCell/commit/a80407a907ef7c9401f120104663172f6965521a)) + + + diff --git a/third_party/rust/lazycell-0.4.0/Cargo.toml b/third_party/rust/lazycell-0.4.0/Cargo.toml new file mode 100644 index 000000000000..4c150e2f48f9 --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "lazycell" +version = "0.4.0" +authors = ["Alex Crichton ", + "Nikita Pekin "] +description = "A library providing a lazily filled Cell struct" +repository = "https://github.com/indiv0/lazycell" +documentation = "http://indiv0.github.io/lazycell/lazycell/" +readme = "README.md" +keywords = ["lazycell", "lazy", "cell", "library"] +license = "MIT/Apache-2.0" +include = [ + "CHANGELOG.md", + "Cargo.toml", + "LICENSE-MIT", + "LICENSE-APACHE", + "README.md", + "src/**/*.rs", +] + +[dependencies] +clippy = { version = "0.0", optional = true } + +[features] +nightly = [] +nightly-testing = ["clippy", "nightly"] diff --git a/third_party/rust/lazycell-0.4.0/LICENSE-APACHE b/third_party/rust/lazycell-0.4.0/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/lazycell-0.4.0/LICENSE-MIT b/third_party/rust/lazycell-0.4.0/LICENSE-MIT new file mode 100644 index 000000000000..bcea5601691b --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/LICENSE-MIT @@ -0,0 +1,26 @@ +Original work Copyright (c) 2014 The Rust Project Developers +Modified work Copyright (c) 2016 Nikita Pekin and lazycell contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/lazycell-0.4.0/README.md b/third_party/rust/lazycell-0.4.0/README.md new file mode 100644 index 000000000000..d23f7e8f414d --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/README.md @@ -0,0 +1,76 @@ +# lazycell + + + + + + + + + + + + + +
Linux / OS Xtravis-badge
Windowsappveyor-badge
+ api-docs-badge + crates-io + license-badge + coveralls-badge +
+ +Rust library providing a lazily filled Cell. + +# Table of Contents + +* [Usage](#usage) +* [Contributing](#contributing) +* [Credits](#credits) +* [License](#license) + +## Usage + +Add the following to your `Cargo.toml`: + +```toml +[dependencies] +lazycell = "0.4" +``` + +And in your `lib.rs` or `main.rs`: + +```rust +extern crate lazycell; +``` + +See the [API docs][api-docs] for information on using the crate in your library. + +## Contributing + +Contributions are always welcome! +If you have an idea for something to add (code, documentation, tests, examples, +etc.) feel free to give it a shot. + +Please read [CONTRIBUTING.md][contributing] before you start contributing. + +## Credits + +The LazyCell library is based originally on work by The Rust Project Developers +for the project [crates.io][crates-io-repo]. + +The list of contributors to this project can be found at +[CONTRIBUTORS.md][contributors]. + +## License + +LazyCell is distributed under the terms of both the MIT license and the Apache +License (Version 2.0). + +See [LICENSE-APACHE][license-apache], and [LICENSE-MIT][license-mit] for details. + +[api-docs]: https://indiv0.github.io/lazycell/lazycell +[contributing]: https://github.com/indiv0/lazycell/blob/master/CONTRIBUTING.md "Contribution Guide" +[contributors]: https://github.com/indiv0/lazycell/blob/master/CONTRIBUTORS.md "List of Contributors" +[crates-io-repo]: https://github.com/rust-lang/crates.io "rust-lang/crates.io: Source code for crates.io" +[license-apache]: https://github.com/indiv0/lazycell/blob/master/LICENSE-APACHE "Apache-2.0 License" +[license-mit]: https://github.com/indiv0/lazycell/blob/master/LICENSE-MIT "MIT License" diff --git a/third_party/rust/lazycell-0.4.0/src/lib.rs b/third_party/rust/lazycell-0.4.0/src/lib.rs new file mode 100644 index 000000000000..a61f9887a85b --- /dev/null +++ b/third_party/rust/lazycell-0.4.0/src/lib.rs @@ -0,0 +1,234 @@ +// Original work Copyright (c) 2014 The Rust Project Developers +// Modified work Copyright (c) 2016 Nikita Pekin and the lazycell contributors +// See the README.md file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(missing_docs)] +#![cfg_attr(feature = "nightly", feature(plugin))] +#![cfg_attr(feature = "clippy", plugin(clippy))] + +//! This crate provides a `LazyCell` struct which acts as a lazily filled +//! `Cell`, but with frozen contents. +//! +//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of +//! the entire object, but only of the borrows returned. A `LazyCell` is a +//! variation on `RefCell` which allows borrows to be tied to the lifetime of +//! the outer object. +//! +//! The limitation of a `LazyCell` is that after it is initialized, it can never +//! be modified. +//! +//! # Example +//! +//! The following example shows a quick example of the basic functionality of +//! `LazyCell`. +//! +//! ``` +//! use lazycell::LazyCell; +//! +//! let lazycell = LazyCell::new(); +//! +//! assert_eq!(lazycell.borrow(), None); +//! assert!(!lazycell.filled()); +//! lazycell.fill(1).ok(); +//! assert!(lazycell.filled()); +//! assert_eq!(lazycell.borrow(), Some(&1)); +//! assert_eq!(lazycell.into_inner(), Some(1)); +//! ``` +//! +//! `AtomicLazyCell` is a variant that uses an atomic variable to manage +//! coordination in a thread-safe fashion. + +use std::cell::UnsafeCell; +use std::sync::atomic::{AtomicUsize, Ordering}; + +/// A lazily filled `Cell`, with frozen contents. +pub struct LazyCell { + inner: UnsafeCell>, +} + +impl LazyCell { + /// Creates a new, empty, `LazyCell`. + pub fn new() -> LazyCell { + LazyCell { inner: UnsafeCell::new(None) } + } + + /// Put a value into this cell. + /// + /// This function will return Err(value) is the cell is already full. + pub fn fill(&self, t: T) -> Result<(), T> { + let mut slot = unsafe { &mut *self.inner.get() }; + if slot.is_some() { + return Err(t); + } + *slot = Some(t); + + Ok(()) + } + + /// Test whether this cell has been previously filled. + pub fn filled(&self) -> bool { + self.borrow().is_some() + } + + /// Borrows the contents of this lazy cell for the duration of the cell + /// itself. + /// + /// This function will return `Some` if the cell has been previously + /// initialized, and `None` if it has not yet been initialized. + pub fn borrow(&self) -> Option<&T> { + unsafe { &*self.inner.get() }.as_ref() + } + + /// Consumes this `LazyCell`, returning the underlying value. + pub fn into_inner(self) -> Option { + unsafe { self.inner.into_inner() } + } +} + +// Tracks the AtomicLazyCell inner state +const NONE: usize = 0; +const LOCK: usize = 1; +const SOME: usize = 2; + +/// A lazily filled `Cell`, with frozen contents. +pub struct AtomicLazyCell { + inner: UnsafeCell>, + state: AtomicUsize, +} + +impl AtomicLazyCell { + /// Creates a new, empty, `AtomicLazyCell`. + pub fn new() -> AtomicLazyCell { + AtomicLazyCell { + inner: UnsafeCell::new(None), + state: AtomicUsize::new(NONE), + } + } + + /// Put a value into this cell. + /// + /// This function will return Err(value) is the cell is already full. + pub fn fill(&self, t: T) -> Result<(), T> { + if NONE != self.state.compare_and_swap(NONE, LOCK, Ordering::Acquire) { + return Err(t); + } + + unsafe { *self.inner.get() = Some(t) }; + + if LOCK != self.state.compare_and_swap(LOCK, SOME, Ordering::Release) { + panic!("unable to release lock"); + } + + Ok(()) + } + + /// Test whether this cell has been previously filled. + pub fn filled(&self) -> bool { + self.state.load(Ordering::Acquire) == SOME + } + + /// Borrows the contents of this lazy cell for the duration of the cell + /// itself. + /// + /// This function will return `Some` if the cell has been previously + /// initialized, and `None` if it has not yet been initialized. + pub fn borrow(&self) -> Option<&T> { + match self.state.load(Ordering::Acquire) { + SOME => unsafe { &*self.inner.get() }.as_ref(), + _ => None, + } + } + + /// Consumes this `LazyCell`, returning the underlying value. + pub fn into_inner(self) -> Option { + unsafe { self.inner.into_inner() } + } +} + +unsafe impl Sync for AtomicLazyCell { } +unsafe impl Send for AtomicLazyCell { } + +#[cfg(test)] +mod tests { + use super::{LazyCell, AtomicLazyCell}; + + #[test] + fn test_borrow_from_empty() { + let lazycell: LazyCell = LazyCell::new(); + + let value = lazycell.borrow(); + assert_eq!(value, None); + } + + #[test] + fn test_fill_and_borrow() { + let lazycell = LazyCell::new(); + + assert!(!lazycell.filled()); + lazycell.fill(1).unwrap(); + assert!(lazycell.filled()); + + let value = lazycell.borrow(); + assert_eq!(value, Some(&1)); + } + + #[test] + fn test_already_filled_error() { + let lazycell = LazyCell::new(); + + lazycell.fill(1).unwrap(); + assert_eq!(lazycell.fill(1), Err(1)); + } + + #[test] + fn test_into_inner() { + let lazycell = LazyCell::new(); + + lazycell.fill(1).unwrap(); + let value = lazycell.into_inner(); + assert_eq!(value, Some(1)); + } + + #[test] + fn test_atomic_borrow_from_empty() { + let lazycell: AtomicLazyCell = AtomicLazyCell::new(); + + let value = lazycell.borrow(); + assert_eq!(value, None); + } + + #[test] + fn test_atomic_fill_and_borrow() { + let lazycell = AtomicLazyCell::new(); + + assert!(!lazycell.filled()); + lazycell.fill(1).unwrap(); + assert!(lazycell.filled()); + + let value = lazycell.borrow(); + assert_eq!(value, Some(&1)); + } + + #[test] + fn test_atomic_already_filled_panic() { + let lazycell = AtomicLazyCell::new(); + + lazycell.fill(1).unwrap(); + assert_eq!(1, lazycell.fill(1).unwrap_err()); + } + + #[test] + fn test_atomic_into_inner() { + let lazycell = AtomicLazyCell::new(); + + lazycell.fill(1).unwrap(); + let value = lazycell.into_inner(); + assert_eq!(value, Some(1)); + } +} diff --git a/third_party/rust/lazycell/.cargo-checksum.json b/third_party/rust/lazycell/.cargo-checksum.json index 5aa2517a8559..e325e80ac076 100644 --- a/third_party/rust/lazycell/.cargo-checksum.json +++ b/third_party/rust/lazycell/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"a8defced70d220e04f77271ccced7e207d4e1417ed5e512b3dd4c8f9979e6a52","Cargo.toml":"46631e96c028ae56b797ec10524d6d9912fdd9857c3bea82957b1c394050b224","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6fc4fe1f402475d5c3f6e1b5c35407c5f489daa58bf8bb085d231909b5fac666","README.md":"fb8373bbd59d2885e119bdacf25898e0e3b98d4a97ea840c62cf967db28c61a2","src/lib.rs":"efcff18d06fdcc4bca2ead19e41b33dbc83f9c7d1591cd98206f657dad704580"},"package":"ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b"} \ No newline at end of file +{"files":{"CHANGELOG.md":"0ac479629dc1cde30432141baf12e6ee347b14bd3cd07a44ea3e645fdb1a0d7c","Cargo.toml":"152d2f4435ce45bd83a0f3842a04c0aed7223f458c843c9ad1cc10ad6f0acca6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"4d3959aeee87b7924faecca7e7369370a2ac603422e7bb3fea86191b2574899d","README.md":"ea7850a3e9cc388dd40c7e5ea26e08cec788219fc4cd01280ea52cb7382d184f","src/lib.rs":"226ef2985de8085b4edd780578f4cf5e77f58512c566b2fa8462086db8f96e15"},"package":"a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef"} \ No newline at end of file diff --git a/third_party/rust/lazycell/CHANGELOG.md b/third_party/rust/lazycell/CHANGELOG.md index 3f1e820e1fc3..d3b6d78268a2 100644 --- a/third_party/rust/lazycell/CHANGELOG.md +++ b/third_party/rust/lazycell/CHANGELOG.md @@ -1,3 +1,55 @@ + +## 0.6.0 (2017-11-25) + + +#### Bug Fixes + +* fix soundness hole in borrow_with ([d1f46bef](https://github.com/indiv0/lazycell/commit/d1f46bef9d1397570aa9c3e87e18e0d16e6d1585)) + +#### Features + +* add Default derives ([71bc5088](https://github.com/indiv0/lazycell/commit/71bc50880cd8e20002038197c9b890f5b76ad096)) +* add LazyCell::try_borrow_with ([bffa4028](https://github.com/indiv0/lazycell/commit/bffa402896670b5c78a9ec050d82a58ee98de6fb)) +* add LazyCell::borrow_mut method ([fd419dea](https://github.com/indiv0/lazycell/commit/fd419dea965ff1ad3853f26f37e8d107c6ca096c)) + +#### Breaking Changes + +* add `T: Send` for `AtomicLazyCell` `Sync` impl ([668bb2fa](https://github.com/indiv0/lazycell/commit/668bb2fa974fd6707c4c7edad292c76a9017d74d), closes [#67](https://github.com/indiv0/lazycell/issues/67)) + +#### Improvements + +* add `T: Send` for `AtomicLazyCell` `Sync` impl ([668bb2fa](https://github.com/indiv0/lazycell/commit/668bb2fa974fd6707c4c7edad292c76a9017d74d), closes [#67](https://github.com/indiv0/lazycell/issues/67)) + + + + +## v0.5.1 (2017-03-24) + + +#### Documentation + +* fix missing backticks ([44bafaaf](https://github.com/indiv0/lazycell/commit/44bafaaf93a91641261f58ee38adadcd4af6458e)) + +#### Improvements + +* derive `Debug` impls ([9da0a5a2](https://github.com/indiv0/lazycell/commit/9da0a5a2ffac1fef03ef02851c2c89d26d67d225)) + +#### Features + +* Add get method for Copy types ([dc8f8209](https://github.com/indiv0/lazycell/commit/dc8f8209888b6eba6d18717eba6a22614629b997)) + + + + +## v0.5.0 (2016-12-08) + + +#### Features + +* add borrow_with to LazyCell ([a15efa35](https://github.com/indiv0/lazycell/commit/a15efa359ea5a31a66ba57fc5b25f90c87b4b0dd)) + + + ## (2016-08-17) diff --git a/third_party/rust/lazycell/Cargo.toml b/third_party/rust/lazycell/Cargo.toml index 4c150e2f48f9..a3b952a8b660 100644 --- a/third_party/rust/lazycell/Cargo.toml +++ b/third_party/rust/lazycell/Cargo.toml @@ -1,25 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "lazycell" -version = "0.4.0" -authors = ["Alex Crichton ", - "Nikita Pekin "] +version = "0.6.0" +authors = ["Alex Crichton ", "Nikita Pekin "] +include = ["CHANGELOG.md", "Cargo.toml", "LICENSE-MIT", "LICENSE-APACHE", "README.md", "src/**/*.rs"] description = "A library providing a lazily filled Cell struct" -repository = "https://github.com/indiv0/lazycell" documentation = "http://indiv0.github.io/lazycell/lazycell/" readme = "README.md" keywords = ["lazycell", "lazy", "cell", "library"] license = "MIT/Apache-2.0" -include = [ - "CHANGELOG.md", - "Cargo.toml", - "LICENSE-MIT", - "LICENSE-APACHE", - "README.md", - "src/**/*.rs", -] - -[dependencies] -clippy = { version = "0.0", optional = true } +repository = "https://github.com/indiv0/lazycell" +[dependencies.clippy] +version = "0.0" +optional = true [features] nightly = [] diff --git a/third_party/rust/lazycell/LICENSE-MIT b/third_party/rust/lazycell/LICENSE-MIT index bcea5601691b..9a871b84794a 100644 --- a/third_party/rust/lazycell/LICENSE-MIT +++ b/third_party/rust/lazycell/LICENSE-MIT @@ -1,5 +1,5 @@ Original work Copyright (c) 2014 The Rust Project Developers -Modified work Copyright (c) 2016 Nikita Pekin and lazycell contributors +Modified work Copyright (c) 2016-2017 Nikita Pekin and lazycell contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated diff --git a/third_party/rust/lazycell/README.md b/third_party/rust/lazycell/README.md index d23f7e8f414d..a1669b7a82fa 100644 --- a/third_party/rust/lazycell/README.md +++ b/third_party/rust/lazycell/README.md @@ -2,13 +2,9 @@ - + - - - -
Linux / OS XLinux travis-badge
Windowsappveyor-badge
api-docs-badge @@ -34,7 +30,7 @@ Add the following to your `Cargo.toml`: ```toml [dependencies] -lazycell = "0.4" +lazycell = "0.6" ``` And in your `lib.rs` or `main.rs`: diff --git a/third_party/rust/lazycell/src/lib.rs b/third_party/rust/lazycell/src/lib.rs index a61f9887a85b..0137a463ca98 100644 --- a/third_party/rust/lazycell/src/lib.rs +++ b/third_party/rust/lazycell/src/lib.rs @@ -1,5 +1,5 @@ // Original work Copyright (c) 2014 The Rust Project Developers -// Modified work Copyright (c) 2016 Nikita Pekin and the lazycell contributors +// Modified work Copyright (c) 2016-2017 Nikita Pekin and the lazycell contributors // See the README.md file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 { inner: UnsafeCell>, } @@ -60,15 +61,15 @@ impl LazyCell { /// Put a value into this cell. /// - /// This function will return Err(value) is the cell is already full. - pub fn fill(&self, t: T) -> Result<(), T> { + /// This function will return `Err(value)` is the cell is already full. + pub fn fill(&self, value: T) -> Result<(), T> { let mut slot = unsafe { &mut *self.inner.get() }; if slot.is_some() { - return Err(t); + return Err(value); } - *slot = Some(t); + *slot = Some(value); - Ok(()) + Ok(()) } /// Test whether this cell has been previously filled. @@ -85,18 +86,76 @@ impl LazyCell { unsafe { &*self.inner.get() }.as_ref() } + /// Borrows the contents of this lazy cell mutably for the duration of the cell + /// itself. + /// + /// This function will return `Some` if the cell has been previously + /// initialized, and `None` if it has not yet been initialized. + pub fn borrow_mut(&mut self) -> Option<&mut T> { + unsafe { &mut *self.inner.get() }.as_mut() + } + + /// Borrows the contents of this lazy cell for the duration of the cell + /// itself. + /// + /// If the cell has not yet been filled, the cell is first filled using the + /// function provided. + /// + /// # Panics + /// + /// Panics if the cell becomes filled as a side effect of `f`. + pub fn borrow_with T>(&self, f: F) -> &T { + if let Some(value) = self.borrow() { + return value; + } + let value = f(); + if self.fill(value).is_err() { + panic!("borrow_with: cell was filled by closure") + } + self.borrow().unwrap() + } + + /// Same as `borrow_with`, but allows the initializing function to fail. + /// + /// # Panics + /// + /// Panics if the cell becomes filled as a side effect of `f`. + pub fn try_borrow_with(&self, f: F) -> Result<&T, E> + where F: FnOnce() -> Result + { + if let Some(value) = self.borrow() { + return Ok(value); + } + let value = f()?; + if self.fill(value).is_err() { + panic!("try_borrow_with: cell was filled by closure") + } + Ok(self.borrow().unwrap()) + } + /// Consumes this `LazyCell`, returning the underlying value. pub fn into_inner(self) -> Option { unsafe { self.inner.into_inner() } } } +impl LazyCell { + /// Returns a copy of the contents of the lazy cell. + /// + /// This function will return `Some` if the cell has been previously initialized, + /// and `None` if it has not yet been initialized. + pub fn get(&self) -> Option { + unsafe { *self.inner.get() } + } +} + // Tracks the AtomicLazyCell inner state const NONE: usize = 0; const LOCK: usize = 1; const SOME: usize = 2; /// A lazily filled `Cell`, with frozen contents. +#[derive(Debug, Default)] pub struct AtomicLazyCell { inner: UnsafeCell>, state: AtomicUsize, @@ -113,7 +172,7 @@ impl AtomicLazyCell { /// Put a value into this cell. /// - /// This function will return Err(value) is the cell is already full. + /// This function will return `Err(value)` is the cell is already full. pub fn fill(&self, t: T) -> Result<(), T> { if NONE != self.state.compare_and_swap(NONE, LOCK, Ordering::Acquire) { return Err(t); @@ -151,12 +210,26 @@ impl AtomicLazyCell { } } -unsafe impl Sync for AtomicLazyCell { } -unsafe impl Send for AtomicLazyCell { } +impl AtomicLazyCell { + /// Returns a copy of the contents of the lazy cell. + /// + /// This function will return `Some` if the cell has been previously initialized, + /// and `None` if it has not yet been initialized. + pub fn get(&self) -> Option { + match self.state.load(Ordering::Acquire) { + SOME => unsafe { *self.inner.get() }, + _ => None, + } + } +} + +unsafe impl Sync for AtomicLazyCell {} + +unsafe impl Send for AtomicLazyCell {} #[cfg(test)] mod tests { - use super::{LazyCell, AtomicLazyCell}; + use super::{AtomicLazyCell, LazyCell}; #[test] fn test_borrow_from_empty() { @@ -164,6 +237,9 @@ mod tests { let value = lazycell.borrow(); assert_eq!(value, None); + + let value = lazycell.get(); + assert_eq!(value, None); } #[test] @@ -176,6 +252,25 @@ mod tests { let value = lazycell.borrow(); assert_eq!(value, Some(&1)); + + let value = lazycell.get(); + assert_eq!(value, Some(1)); + } + + #[test] + fn test_borrow_mut() { + let mut lazycell = LazyCell::new(); + assert!(lazycell.borrow_mut().is_none()); + + lazycell.fill(1).unwrap(); + assert_eq!(lazycell.borrow_mut(), Some(&mut 1)); + + *lazycell.borrow_mut().unwrap() = 2; + assert_eq!(lazycell.borrow_mut(), Some(&mut 2)); + + // official way to reset the cell + lazycell = LazyCell::new(); + assert!(lazycell.borrow_mut().is_none()); } #[test] @@ -186,6 +281,85 @@ mod tests { assert_eq!(lazycell.fill(1), Err(1)); } + #[test] + fn test_borrow_with() { + let lazycell = LazyCell::new(); + + let value = lazycell.borrow_with(|| 1); + assert_eq!(&1, value); + } + + #[test] + fn test_borrow_with_already_filled() { + let lazycell = LazyCell::new(); + lazycell.fill(1).unwrap(); + + let value = lazycell.borrow_with(|| 1); + assert_eq!(&1, value); + } + + #[test] + fn test_borrow_with_not_called_when_filled() { + let lazycell = LazyCell::new(); + + lazycell.fill(1).unwrap(); + + let value = lazycell.borrow_with(|| 2); + assert_eq!(&1, value); + } + + #[test] + #[should_panic] + fn test_borrow_with_sound_with_reentrancy() { + // Kudos to dbaupp for discovering this issue + // https://www.reddit.com/r/rust/comments/5vs9rt/lazycell_a_rust_library_providing_a_lazilyfilled/de527xm/ + let lazycell: LazyCell> = LazyCell::new(); + + let mut reference: Option<&i32> = None; + + lazycell.borrow_with(|| { + let _ = lazycell.fill(Box::new(1)); + reference = lazycell.borrow().map(|r| &**r); + Box::new(2) + }); + } + + #[test] + fn test_try_borrow_with_ok() { + let lazycell = LazyCell::new(); + let result = lazycell.try_borrow_with::<(), _>(|| Ok(1)); + assert_eq!(result, Ok(&1)); + } + + #[test] + fn test_try_borrow_with_err() { + let lazycell = LazyCell::<()>::new(); + let result = lazycell.try_borrow_with(|| Err(1)); + assert_eq!(result, Err(1)); + } + + #[test] + fn test_try_borrow_with_already_filled() { + let lazycell = LazyCell::new(); + lazycell.fill(1).unwrap(); + let result = lazycell.try_borrow_with::<(), _>(|| unreachable!()); + assert_eq!(result, Ok(&1)); + } + + #[test] + #[should_panic] + fn test_try_borrow_with_sound_with_reentrancy() { + let lazycell: LazyCell> = LazyCell::new(); + + let mut reference: Option<&i32> = None; + + let _ = lazycell.try_borrow_with::<(), _>(|| { + let _ = lazycell.fill(Box::new(1)); + reference = lazycell.borrow().map(|r| &**r); + Ok(Box::new(2)) + }); + } + #[test] fn test_into_inner() { let lazycell = LazyCell::new(); @@ -201,6 +375,9 @@ mod tests { let value = lazycell.borrow(); assert_eq!(value, None); + + let value = lazycell.get(); + assert_eq!(value, None); } #[test] @@ -213,6 +390,9 @@ mod tests { let value = lazycell.borrow(); assert_eq!(value, Some(&1)); + + let value = lazycell.get(); + assert_eq!(value, Some(1)); } #[test] diff --git a/third_party/rust/libc/.cargo-checksum.json b/third_party/rust/libc/.cargo-checksum.json index 7cbfcc0b607a..fbebcfd35635 100644 --- a/third_party/rust/libc/.cargo-checksum.json +++ b/third_party/rust/libc/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"88c852e2bb7718702c73107cef9168fdb68722d45e4b2ba28c2f4a9221caa41c","Cargo.toml":"14990e43355b59b8f32126990446662da60e4268f1a71357c4a4e4c8769cb18d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"00fbfd7e989de6ecc270aed1f1c2c20560ac67e6c1564833622b8e34385c6170","appveyor.yml":"216f7ac4561aa5810dc84ce5a9950897a8c0496e0615d0211d62348b1c8dc720","ci/README.md":"b8debdd9044a773dbccc85e574eed21008784117d77c24a15f207d4b2a173a14","ci/android-install-ndk.sh":"725db9025c5905849916bf7c910f98ff0e753484397c2a1f836d48a576d10890","ci/android-install-sdk.sh":"5c3fbe402ac611239ac7715a61f247d1c55fa012f33a5be0b0127dfc196965cf","ci/android-sysimage.sh":"901415631752827454c827e8c51906ba4260612e4021eda98eb7fff771c7d0e8","ci/docker/aarch64-linux-android/Dockerfile":"e17945fba1786dfe766006f50e79baf3f4151ca0c0c14ae96f91483bf345afd7","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"5f430271941e1eecdf9d1a5fb701dd5622e2c4b9da03140fd829bf216e55529d","ci/docker/aarch64-unknown-linux-musl/Dockerfile":"3e1cbf0fa728571b9be9769e5a6281c964fa5b26d586265117ccee017ca4022c","ci/docker/arm-linux-androideabi/Dockerfile":"4e0bdc13254f99bd0db195f91331c634050426e3e4a0fcc63ef25ab795fe2d46","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"dbb025b53b27e406893184290836a50133ecae8295711d5e05b4e41fac9bd262","ci/docker/arm-unknown-linux-musleabihf/Dockerfile":"7cb6e0f8fb91c97f142a9c827687bbbc1a5e7643a3081160025d0365593a596c","ci/docker/asmjs-unknown-emscripten/Dockerfile":"0d9aea5119c2cd136cc2c0a578105d91210e45901ac49b17c5e45f458b1c7551","ci/docker/i686-linux-android/Dockerfile":"4e8377ec0bd9ad2df23bf2c5373200a12750dc9f28c4f10bc83a0150fe1623ee","ci/docker/i686-unknown-linux-gnu/Dockerfile":"f22ac412525ef15b33ab8ccd8193d97346faf421c17f6ddeffc25b651aba83b7","ci/docker/i686-unknown-linux-musl/Dockerfile":"4ac86fe9e159d454616396a9f3f07ce0f5d99cc4b49898b8d2486e6bdbfed9e9","ci/docker/mips-unknown-linux-gnu/Dockerfile":"6d2a9daa299003497c1d441d07b69f730ad75ee49f34520f959b5158e60072e0","ci/docker/mips-unknown-linux-musl/Dockerfile":"4773b2656a7dd6a3b106fcb737428436652edf3d1f48181de3f62c16bf5bd49d","ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile":"7c4d26232f1c1553a6612d9b0b3faac9887e139eaffa025f70d34113dcee812f","ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile":"edb4144c07ade1a8bd65272ec1d3672ad794e9e6b7d01197896e159a70175b58","ci/docker/mipsel-unknown-linux-musl/Dockerfile":"0ca9c12b5618c6d2df04ff820d56fb28e05b43e45eaa506480126b03c5072d48","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"4b247dcc399395815ec9153c1247cc03d764896c484eddcb196d0bf8650d6311","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"e949717a8ba5e123940729ff47ce1c45989c8b8247c576f1488f698b534e0283","ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile":"018591017f499414a9f79477e1c39baa6a47f71fce6812fb1868fb0fcdfb8cea","ci/docker/s390x-unknown-linux-gnu/Dockerfile":"9860f478c5b2dc3bcf76d2cda7f71922a2a2ef54898cc2ed6ea9b9eff094a5c0","ci/docker/sparc64-unknown-linux-gnu/Dockerfile":"2312491343665d2ab36fd669f14766facbf3c7e9820ffb8f0d623830b676f8f0","ci/docker/wasm32-unknown-emscripten/Dockerfile":"bd072d6ae91a9160693e402dd77462d3c9dd0716711e719a62af330ae479eb4e","ci/docker/wasm32-unknown-emscripten/node-wrapper.sh":"0eef37c3c4fb16dbc083148b7e7af45f2ae60bd9a1b3a77e1d43da79efbd30c6","ci/docker/x86_64-linux-android/Dockerfile":"aeeaa540189ca712369c564c9a14cbace63217dadcfaf879a2cb40fbdeb08199","ci/docker/x86_64-rumprun-netbsd/Dockerfile":"e8f9287b267c6058eec42d1bca0007cb9a78a1d244dd8e11088368cb61fb17d6","ci/docker/x86_64-rumprun-netbsd/runtest.rs":"53302e9ed39293c1ec68ab56287593907d4aaf5bac9c1c2857b29f754a71d62b","ci/docker/x86_64-unknown-freebsd/Dockerfile":"dfc00fa7d5dbb24e6864fb62cdbd48536c14c0be2d40fd02de53d1374e4b760a","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"ab3fa45765802b8155996796fcad9fb82096360ac587e38e6faa3ec345268796","ci/docker/x86_64-unknown-linux-gnux32/Dockerfile":"f22ac412525ef15b33ab8ccd8193d97346faf421c17f6ddeffc25b651aba83b7","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"e145784741473150473b1bef7cc3c2cf0c6339d4fc480113ac41b4247a9b38ec","ci/dox.sh":"9ea240a4a607036235fd68c01b5d2a59f365768d103d3be774dcf34aa3ff563e","ci/emscripten-entry.sh":"c97bbec520b57af9b1ae264ca991560e99c3852c99b00a2f673c614d1ba17498","ci/emscripten.sh":"6f66c7b5c3d34a41afc59ceb0a8c3b0880cd6fd9a6344b874ae80bac0639ccb2","ci/ios/deploy_and_run_on_ios_simulator.rs":"3175066fd7f82390f6226d881e1a1dda9767ea2705656870e0d7774e2731800e","ci/landing-page-footer.html":"b70b3112c2147f5c967e7481061ef38bc2d79a28dd55a16fb916d9c9426da2c4","ci/landing-page-head.html":"ad69663fac7924f27d0209bc519d55838e86edfc4133713a6fd08caadac1b142","ci/linux-s390x.sh":"d6b732d7795b4ba131326aff893bca6228a7d2eb0e9402f135705413dbbe0dce","ci/linux-sparc64.sh":"afe325f853481d9e18b0e5a376e4f987de7673066ec336ad56e26d929858b427","ci/run-docker.sh":"be83bc5a8b5ef913a7c9941ffca24734716028650c9a876123c4c160672c18de","ci/run-qemu.sh":"bb859421170871ef23a8940c5e150efec0c01b95e32d2ce2d37b79a45d9d346c","ci/run.sh":"546ce3904f96fe5316fb1342687f5732c26adfc40c6db8f3becb1a67a9892555","ci/runtest-android.rs":"a07ddbdd276aedda7876c7e676774178a60d9aeab95df01275a4ee95f59e3044","ci/style.rs":"60564abc1d5197ed1598426dd0d6ee9939a16d2875b03373538f58843bb616c4","ci/test-runner-linux":"cb3713d9e4fa1d9a7c039dfd077af0939921c1f2bf969c9e680ee66e87dc30a4","src/cloudabi/aarch64.rs":"b8550bf1fd7344972aa4db29441486f39f31482d0327534981dbb75959c29114","src/cloudabi/arm.rs":"c197e2781c2839808bd6fcef219a29705b27b992d3ef920e9cf6ac96e2022bbf","src/cloudabi/mod.rs":"1c2dc787a1b3438970ccd04153c93538719b1a27445707913b90b6f0cdcdde77","src/cloudabi/x86.rs":"33eb97f272d2201f3838ae74d444583c7de8f67856852ca375293b20bbd05636","src/cloudabi/x86_64.rs":"400d85d4fe39e26cf2e6ece9ee31c75fe9e88c4bcf4d836ca9f765c05c9c5be3","src/dox.rs":"c670f160d18c24f34ccea660676723dec5019d64479aac4d99c2bd544f34f4b9","src/fuchsia/aarch64.rs":"8366ac6f51e494aad9266ccab2b3a95c5ed7aa3a9f77ea672413283440919743","src/fuchsia/mod.rs":"78dde4fb654996e842c30f8f7bc4156e11440092292e21fc1e26ee686e2a4095","src/fuchsia/powerpc64.rs":"390e8db54271a1d5f512d54a21b328091a792378bf9b42b49b6c1a72388da4ec","src/fuchsia/x86_64.rs":"b4a3eff94dcf1ffe41e6500468ca2cff0e97ddbcc75fe079b6ac7adb1e493f56","src/lib.rs":"5e2b2884da3b6fd9b68133c558cca128b74cff09a501808c5d811fbc30b2cb16","src/macros.rs":"e1b0bf5db89faa8fcb39a3fd46cdb5fdcfabb7f3524eb2192157f0188b8a764b","src/redox/mod.rs":"07ae6652bc6f7fe276255bda210eae7a197d3d64d9dede6dadce0478e8890e2b","src/redox/net.rs":"a1e9d2291e2c12833333ac8706b23f388ce5dbe1718bdd7b38fd68c74559f0b4","src/unix/bsd/apple/b32.rs":"41699d2802327b0a4d4aa50cd20b1e366b442176cbedab27ca888ac0446c9156","src/unix/bsd/apple/b64.rs":"4fe7bf5de252dcd712dee0a7a8acfaa7c737c862eaa3ff669255d3f2076c5fa6","src/unix/bsd/apple/mod.rs":"c305b6c1ac1d66634e971df52640faf5aac1f0f4a9c253665969ea97e8b73be5","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"1950bf56a7ecbfc8994e4b54dc94e752d91dccd8f0e2d1e4c2d3ad2fa8cb5e23","src/unix/bsd/freebsdlike/freebsd/aarch64.rs":"97132e2097411034271b8c927ecc94a208a361564680972a6c82998bd30a9826","src/unix/bsd/freebsdlike/freebsd/mod.rs":"5a3156a7a25fdce61abdade87d3b054d885c268db0cd434eb94a7a9005af4431","src/unix/bsd/freebsdlike/freebsd/x86.rs":"54311d3ebf2bb091ab22361e377e6ef9224aec2ecfe459fbfcedde4932db9c58","src/unix/bsd/freebsdlike/freebsd/x86_64.rs":"97132e2097411034271b8c927ecc94a208a361564680972a6c82998bd30a9826","src/unix/bsd/freebsdlike/mod.rs":"c983d33bb6f0ac1e38d00a41698485098e8a1fe1cc78e685e057c6a4d7129a56","src/unix/bsd/mod.rs":"a1030452eed4ec52a39e9f39040e8fae4309143c7af145efadd9b425caa39672","src/unix/bsd/netbsdlike/mod.rs":"1a5adcab85fb0051c5458b6ca40aeca4573a802152c7285b637d4e514d78f22a","src/unix/bsd/netbsdlike/netbsd/mod.rs":"92ca8ff8410d64035351b70d9118fe1d85edeec61b92ff6730a77dfba9832d56","src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/netbsd/other/mod.rs":"4d9f7091af8e166943ac6f42ce85558909e5b6e61325039bff7adfbcf4b90212","src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs":"6b460d78618fb977c129ed432f19ece03bd8fd47fdd2528b02794c316f59869b","src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/openbsdlike/mod.rs":"3d22ed9ac1ca930f3ab606934d978be240468d7eadaf13772ca91c989901a93d","src/unix/bsd/netbsdlike/openbsdlike/openbsd/aarch64.rs":"820092e397c7ec259cd2de8f2444083a8e57071c02d73d678701dfa7807726e9","src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs":"c57f0cfe1be1229c38e8f08ed886fa027fdd150f9559ef40f7bbd9ba231634d9","src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86.rs":"44b7ea81cf363777b29935da175e702cbf45ed78f7498ae57faf44aa32335085","src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs":"7c959cdb3415f68a0f948117b9aa87a17463e57ab97cc3235f2567454b706653","src/unix/haiku/b32.rs":"69ae47fc52c6880e85416b4744500d5655c9ec6131cb737f3b649fceaadce15a","src/unix/haiku/b64.rs":"73e64db09275a8da8d50a13cce2cfa2b136036ddf3a930d2939f337fc995900b","src/unix/haiku/mod.rs":"1574d3ac239befc2dc7b9b5245d5175e3b7498d6f7b278dab14791f04ba08388","src/unix/mod.rs":"ef4faebfff6f76db542f7818d8577c93c3886d08a04c7e1a9437ea4b6f912976","src/unix/newlib/aarch64/mod.rs":"c408a990f22fb4292a824f38367e9b517e6e6f8623328397ee631cc88b3d1f7d","src/unix/newlib/arm/mod.rs":"2b6dba2e697ab9b4f4bc4dd5f28057249e9b596d1cb395a9322ec87605c4a5c4","src/unix/newlib/mod.rs":"7422845a44de13a2faf15d105670525ed090c6e200c9723178ed735810bbd689","src/unix/notbsd/android/b32/arm.rs":"3625a32c7e58cfe683a53486fbe3d42d4e28f00bea31e19cb46ed2bb0b6a140b","src/unix/notbsd/android/b32/mod.rs":"e9ec6f08e20ea235cd7095604ea6e37c739c1276e9f40fa31a4db4951fa005af","src/unix/notbsd/android/b32/x86.rs":"ae2b7f1d6278caddc007749bb1d09ca33f7593478a0fd7fe98b457dae86c7814","src/unix/notbsd/android/b64/aarch64.rs":"63d65629d79371814910f691672ef593d20244ee09be26f1ebe07ee6212d0163","src/unix/notbsd/android/b64/mod.rs":"63290adc8122c040f9be369ef4180975bcf1a967a717aa75d30371162d5d5fa9","src/unix/notbsd/android/b64/x86_64.rs":"5547aef8dcbaa5a932559f34606fd8d89f6c9c15173d2b1412c12d39b3c1045f","src/unix/notbsd/android/mod.rs":"9f233087b989db4a925b79b1c0bc67ff8d237e8069d415c81d3142e8b44a99ed","src/unix/notbsd/emscripten.rs":"f8d8acf5cef1acd1a6bdd41299054120820865d072c4c2e3c740fbd0c6e9b3e7","src/unix/notbsd/linux/mips/mips32.rs":"f769bbe5c48f77e6bc94e37deccd2866e7e52e7eb1b3cd7611dbad95574e82f8","src/unix/notbsd/linux/mips/mips64.rs":"ef678b48b571428d46fef5b2ca9bef7251b5f27fcd2c38d987d1b80f2ad3ece0","src/unix/notbsd/linux/mips/mod.rs":"b4577e9605f5638367be580e9f6ba75ef277a77ba8a875a7d6dd9b16c79baeaa","src/unix/notbsd/linux/mod.rs":"090630438ac5e68fb679287330244c7fd921b212a25b0cee2d9b29607f54157a","src/unix/notbsd/linux/musl/b32/arm.rs":"d2998b13648696304bb34f0793715d821178baf8e88a45b532764a20b5294232","src/unix/notbsd/linux/musl/b32/mips.rs":"340be794362a4532d709ef23542b10762f710f7bfd0c4fafa5166a3fb9a15b4f","src/unix/notbsd/linux/musl/b32/mod.rs":"3cc7979546258a47df6b0fcd7ad64571826623671857633a7acafe87e05e56a1","src/unix/notbsd/linux/musl/b32/x86.rs":"df114102dcf35bc32f891d4a9e09ce02fbe4c096a196c6b98b10ff87b29dbe4d","src/unix/notbsd/linux/musl/b64/aarch64.rs":"12c590fde2a1450c08934234c4f5bcd94ee7b58ca21f8e93bc930148c15fb0b0","src/unix/notbsd/linux/musl/b64/mod.rs":"b1991ef46a00d2db7ce3e36f6596685c1d508786c4dd4e62cbbf65ac3c256cc0","src/unix/notbsd/linux/musl/b64/powerpc64.rs":"790dca3cc6c0a4166992d2c1665e1b1e320fbad5c0b1ba3c454321a6b2d7a103","src/unix/notbsd/linux/musl/b64/x86_64.rs":"bf8fc10a09bf700084db0381c484ddec3add79aa1726954cb14d21802ff7d199","src/unix/notbsd/linux/musl/mod.rs":"04fc2a5385fc36a04d537adac25d47ea0978c2ef9e8b4f875e146d93cebc9566","src/unix/notbsd/linux/other/b32/arm.rs":"d9892f7350b2978335f734f1cd2d7fed60f0f2e66aa05bee3f69549c031f8b14","src/unix/notbsd/linux/other/b32/mod.rs":"f2ce29ed104daf758cd4f61c190d1b5c12f516a428375891d7839d77ade8cbe6","src/unix/notbsd/linux/other/b32/powerpc.rs":"253fcd2f9978525285be1903cc08f3fec2dc3b12d1660a33e2995b4f6b810d1c","src/unix/notbsd/linux/other/b32/x86.rs":"49376e3ed0f3ff95c230ac20751911fe3c608dfe15c7c118b069fd7a954d8db9","src/unix/notbsd/linux/other/b64/aarch64.rs":"3aa6ab0412857fd788c8831649772bb7fd903d0b2f3ee04914a1b99e00a6e7cb","src/unix/notbsd/linux/other/b64/mod.rs":"63e1a3fdf5f4d1b9820934ab344c91aed5e458e7e05908535d2e942d51a08bf8","src/unix/notbsd/linux/other/b64/not_x32.rs":"e1cf87b9dbe89e9fff667c6fc8eff0166a02565ef65aae3dc9304dc70f7a5624","src/unix/notbsd/linux/other/b64/powerpc64.rs":"5c20bf2bf528d329ed279f7730469b1131565029fae495803cee8809cc8d27e9","src/unix/notbsd/linux/other/b64/sparc64.rs":"884bb89cd5bc49a410ce8eb5c5d75e8db303f90dca174bf702ca541bb4bf0db6","src/unix/notbsd/linux/other/b64/x32.rs":"e521bd43a2d66f09f03a045b2e1f69f2ca91cff37922ac5b7c437d888799cee6","src/unix/notbsd/linux/other/b64/x86_64.rs":"c07dfaca8234bc1291e12a30cb91230f228199024a5aa64e41e350d33b030abb","src/unix/notbsd/linux/other/mod.rs":"50e16e911ec49887fd3e0b5d08e843d680ea50d3aa3726d26cedda8f4498b1de","src/unix/notbsd/linux/s390x.rs":"ddbb07a2f48f6cd45e1bff38e05249940202c43d3abd8cf97a754f49668119af","src/unix/notbsd/mod.rs":"4f0893bf43bda88d84530a0790e3a3f6396c6e26a32aab11851a507f328a0023","src/unix/solaris/mod.rs":"57bc5dedf4d6e41c794aee28a264a601c636508090f5eff8e77b72748e110735","src/unix/uclibc/mips/mips32.rs":"43a91d7ae53413133d1355d3791846f5c55e982ef6a10b9bdccad5b37bee8c4f","src/unix/uclibc/mips/mips64.rs":"e67eec1636a998b047d89a4cda1c99cb6bc3071db017762675179a68201c4438","src/unix/uclibc/mips/mod.rs":"74817a9b1ee3998d8e0b751a555d57225f70fd979c283c94ada344a162a8b856","src/unix/uclibc/mod.rs":"0e2f59647bc0e48c37914a3f788c2c63d67287f051d0176bdbf1a1e6388a9046","src/unix/uclibc/x86_64/l4re.rs":"54b4e58ce5a671348c32bc41c1607dbc5c13fa6818cc3e0e0e2b409c300a305e","src/unix/uclibc/x86_64/mod.rs":"bd569360c45a6f2b585cfb47544d223e92243a2ff4f8429b78fecac6b889f9fd","src/windows.rs":"e41357d610608bad81abf285306ad8b127b6f02d5132f63c4942861980b47d59"},"package":"f54263ad99207254cf58b5f701ecb432c717445ea2ee8af387334bdd1a03fdff"} \ No newline at end of file +{"files":{".travis.yml":"8088167016d06169f8b1eb86ba5f413cdfcb776bd95af46f968d146665ff5afe","Cargo.toml":"74e837a30336b387d94fc92db3d1ece407b47318ca1362a2b8f37dfb28064e54","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"a550fd6c92b62c70925cc6a75dd1d40ae93f9a77e3c4e9baacdf014fa0cae550","appveyor.yml":"216f7ac4561aa5810dc84ce5a9950897a8c0496e0615d0211d62348b1c8dc720","ci/README.md":"2e3d7ad13f8c3202e57d2af73aeeebde306221dce7c0907e462e25767a692d6b","ci/android-install-ndk.sh":"725db9025c5905849916bf7c910f98ff0e753484397c2a1f836d48a576d10890","ci/android-install-sdk.sh":"5c3fbe402ac611239ac7715a61f247d1c55fa012f33a5be0b0127dfc196965cf","ci/android-sysimage.sh":"901415631752827454c827e8c51906ba4260612e4021eda98eb7fff771c7d0e8","ci/docker/aarch64-linux-android/Dockerfile":"e17945fba1786dfe766006f50e79baf3f4151ca0c0c14ae96f91483bf345afd7","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"5f430271941e1eecdf9d1a5fb701dd5622e2c4b9da03140fd829bf216e55529d","ci/docker/aarch64-unknown-linux-musl/Dockerfile":"1e8c66067bcbd718119db5eb6e69390c4f0ea72c1543e09b6846a36ef66cd21b","ci/docker/arm-linux-androideabi/Dockerfile":"4e0bdc13254f99bd0db195f91331c634050426e3e4a0fcc63ef25ab795fe2d46","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"dbb025b53b27e406893184290836a50133ecae8295711d5e05b4e41fac9bd262","ci/docker/arm-unknown-linux-musleabihf/Dockerfile":"12b50abdc5605e3a39eff6bb0d0fccb0885896933c5bfbb3d0cbde9068492a0f","ci/docker/asmjs-unknown-emscripten/Dockerfile":"0d9aea5119c2cd136cc2c0a578105d91210e45901ac49b17c5e45f458b1c7551","ci/docker/i686-linux-android/Dockerfile":"4e8377ec0bd9ad2df23bf2c5373200a12750dc9f28c4f10bc83a0150fe1623ee","ci/docker/i686-unknown-linux-gnu/Dockerfile":"f22ac412525ef15b33ab8ccd8193d97346faf421c17f6ddeffc25b651aba83b7","ci/docker/i686-unknown-linux-musl/Dockerfile":"f95cd8b514f48686d774b85e4dffccce1a5acd68749d8ff59b204419d448d575","ci/docker/mips-unknown-linux-gnu/Dockerfile":"6d2a9daa299003497c1d441d07b69f730ad75ee49f34520f959b5158e60072e0","ci/docker/mips-unknown-linux-musl/Dockerfile":"4773b2656a7dd6a3b106fcb737428436652edf3d1f48181de3f62c16bf5bd49d","ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile":"7c4d26232f1c1553a6612d9b0b3faac9887e139eaffa025f70d34113dcee812f","ci/docker/mips64el-unknown-linux-gnuabi64/Dockerfile":"edb4144c07ade1a8bd65272ec1d3672ad794e9e6b7d01197896e159a70175b58","ci/docker/mipsel-unknown-linux-musl/Dockerfile":"0ca9c12b5618c6d2df04ff820d56fb28e05b43e45eaa506480126b03c5072d48","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"4b247dcc399395815ec9153c1247cc03d764896c484eddcb196d0bf8650d6311","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"e949717a8ba5e123940729ff47ce1c45989c8b8247c576f1488f698b534e0283","ci/docker/powerpc64le-unknown-linux-gnu/Dockerfile":"018591017f499414a9f79477e1c39baa6a47f71fce6812fb1868fb0fcdfb8cea","ci/docker/s390x-unknown-linux-gnu/Dockerfile":"9860f478c5b2dc3bcf76d2cda7f71922a2a2ef54898cc2ed6ea9b9eff094a5c0","ci/docker/sparc64-unknown-linux-gnu/Dockerfile":"1471a694817758331ecdbb23857537563ad7ae56aa3a88e49bf9cd421addcafe","ci/docker/wasm32-unknown-emscripten/Dockerfile":"bd072d6ae91a9160693e402dd77462d3c9dd0716711e719a62af330ae479eb4e","ci/docker/wasm32-unknown-emscripten/node-wrapper.sh":"0eef37c3c4fb16dbc083148b7e7af45f2ae60bd9a1b3a77e1d43da79efbd30c6","ci/docker/x86_64-linux-android/Dockerfile":"aeeaa540189ca712369c564c9a14cbace63217dadcfaf879a2cb40fbdeb08199","ci/docker/x86_64-rumprun-netbsd/Dockerfile":"e8f9287b267c6058eec42d1bca0007cb9a78a1d244dd8e11088368cb61fb17d6","ci/docker/x86_64-rumprun-netbsd/runtest.rs":"53302e9ed39293c1ec68ab56287593907d4aaf5bac9c1c2857b29f754a71d62b","ci/docker/x86_64-unknown-freebsd/Dockerfile":"ab1f14c65c29f3721c7c091bdec2e865fb1abf45fdcdc867201d087966e396c4","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"ab3fa45765802b8155996796fcad9fb82096360ac587e38e6faa3ec345268796","ci/docker/x86_64-unknown-linux-gnux32/Dockerfile":"f22ac412525ef15b33ab8ccd8193d97346faf421c17f6ddeffc25b651aba83b7","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"0c31058e39d9f25c6f4f9b7fe78c7c8d135f32bfe52199e9b2e7fa10d5dc3940","ci/dox.sh":"d77171a9da467bb01fc702a28fc3b5099f82a485a627f3d5593a9830c3e1a77c","ci/emscripten-entry.sh":"c97bbec520b57af9b1ae264ca991560e99c3852c99b00a2f673c614d1ba17498","ci/emscripten.sh":"6f66c7b5c3d34a41afc59ceb0a8c3b0880cd6fd9a6344b874ae80bac0639ccb2","ci/ios/deploy_and_run_on_ios_simulator.rs":"be6d2ccfe78df5d77a2c4ee40ffbd22b1bb2ac0a0cf6b2a108f21406f22ce1a8","ci/landing-page-footer.html":"b70b3112c2147f5c967e7481061ef38bc2d79a28dd55a16fb916d9c9426da2c4","ci/landing-page-head.html":"ad69663fac7924f27d0209bc519d55838e86edfc4133713a6fd08caadac1b142","ci/linux-s390x.sh":"d6b732d7795b4ba131326aff893bca6228a7d2eb0e9402f135705413dbbe0dce","ci/linux-sparc64.sh":"c92966838b1ab7ad3b7a344833ee726aba6b647cf5952e56f0ad1ba420b13325","ci/run-docker.sh":"be83bc5a8b5ef913a7c9941ffca24734716028650c9a876123c4c160672c18de","ci/run-qemu.sh":"bb859421170871ef23a8940c5e150efec0c01b95e32d2ce2d37b79a45d9d346c","ci/run.sh":"86b7d1ce555ed5eeeac6b44fd0e9563166ff38ba38e56a82d70800ace3b65946","ci/runtest-android.rs":"a07ddbdd276aedda7876c7e676774178a60d9aeab95df01275a4ee95f59e3044","ci/style.rs":"940c06a676cff1dfc1555b887e46867c6aacc473956cd6aaafaed71824facdb2","ci/test-runner-linux":"cb3713d9e4fa1d9a7c039dfd077af0939921c1f2bf969c9e680ee66e87dc30a4","src/cloudabi/aarch64.rs":"b8550bf1fd7344972aa4db29441486f39f31482d0327534981dbb75959c29114","src/cloudabi/arm.rs":"c197e2781c2839808bd6fcef219a29705b27b992d3ef920e9cf6ac96e2022bbf","src/cloudabi/mod.rs":"1c2dc787a1b3438970ccd04153c93538719b1a27445707913b90b6f0cdcdde77","src/cloudabi/x86.rs":"33eb97f272d2201f3838ae74d444583c7de8f67856852ca375293b20bbd05636","src/cloudabi/x86_64.rs":"400d85d4fe39e26cf2e6ece9ee31c75fe9e88c4bcf4d836ca9f765c05c9c5be3","src/dox.rs":"8f6037887281b828d8541ce8a549dacaed5d29c05fd9cf78b169838476b82741","src/fuchsia/aarch64.rs":"8366ac6f51e494aad9266ccab2b3a95c5ed7aa3a9f77ea672413283440919743","src/fuchsia/mod.rs":"e5b7e6ff40e670200c52919b53474627931b4def18d452323999de201953cf21","src/fuchsia/powerpc64.rs":"390e8db54271a1d5f512d54a21b328091a792378bf9b42b49b6c1a72388da4ec","src/fuchsia/x86_64.rs":"b4a3eff94dcf1ffe41e6500468ca2cff0e97ddbcc75fe079b6ac7adb1e493f56","src/lib.rs":"bbad6abf79078649e76c2379c3c3fcbc4198430736e726a01d96a8f1439e8cb1","src/macros.rs":"8ad55edea39fad158e80c5df0d7b520b6863f5088db4db286ba31c12cbc4d67d","src/redox/mod.rs":"685d4d39911e855bf0fd7879b9a02bc15cefebfb0f520382c1a1353364f0d523","src/redox/net.rs":"f2e1922883f208cb46c00744da4a68feccfbec576c6981978ad404e46f818c8b","src/unix/bsd/apple/b32.rs":"41699d2802327b0a4d4aa50cd20b1e366b442176cbedab27ca888ac0446c9156","src/unix/bsd/apple/b64.rs":"0cda592881a1db30f2d96ff0f67cf4214aa99881dfe4f2fb474ef7ec78bd204a","src/unix/bsd/apple/mod.rs":"ac02092ad74cb81fa789e89d541d79525d2298ac77842847b48e5fd1a83fff0d","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"7a8df4e8079ed60ad4ac35362901eb2fea4d53384709e4ac45930899b84f8eaf","src/unix/bsd/freebsdlike/freebsd/aarch64.rs":"97132e2097411034271b8c927ecc94a208a361564680972a6c82998bd30a9826","src/unix/bsd/freebsdlike/freebsd/mod.rs":"1b158199be42b37708f10d0e73164cf0b63a2b5ae0e6fcc1feab5944f5377f24","src/unix/bsd/freebsdlike/freebsd/x86.rs":"54311d3ebf2bb091ab22361e377e6ef9224aec2ecfe459fbfcedde4932db9c58","src/unix/bsd/freebsdlike/freebsd/x86_64.rs":"97132e2097411034271b8c927ecc94a208a361564680972a6c82998bd30a9826","src/unix/bsd/freebsdlike/mod.rs":"9ddcc86d3bb76f426e26c4df8d853b3715a7d6d9006acaa4fa26b2b0f5bb3314","src/unix/bsd/mod.rs":"a1030452eed4ec52a39e9f39040e8fae4309143c7af145efadd9b425caa39672","src/unix/bsd/netbsdlike/mod.rs":"6fb522d55eced39ef1bc28873f9ae8d5ab141acde09c5cb6a22aeca577d916ad","src/unix/bsd/netbsdlike/netbsd/mod.rs":"1cd66ed5967c788562d4ad626cfbeb1544f4b9267111de5f6790379b8327f28e","src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/netbsd/other/mod.rs":"4d9f7091af8e166943ac6f42ce85558909e5b6e61325039bff7adfbcf4b90212","src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs":"92459d80b8e5b570c0efe35a1d244d38f23072cd28b6581dfcb313dc9bfc3d51","src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/openbsdlike/mod.rs":"37fea61239bd53cd951fd7bc800229f663be2e9ad2e1539ebdf73767ca28b469","src/unix/bsd/netbsdlike/openbsdlike/openbsd/aarch64.rs":"820092e397c7ec259cd2de8f2444083a8e57071c02d73d678701dfa7807726e9","src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs":"973331fd80876db55467913556d81f45ea1ede03322fef9c5d552aba833d2207","src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86.rs":"44b7ea81cf363777b29935da175e702cbf45ed78f7498ae57faf44aa32335085","src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs":"7c959cdb3415f68a0f948117b9aa87a17463e57ab97cc3235f2567454b706653","src/unix/haiku/b32.rs":"69ae47fc52c6880e85416b4744500d5655c9ec6131cb737f3b649fceaadce15a","src/unix/haiku/b64.rs":"73e64db09275a8da8d50a13cce2cfa2b136036ddf3a930d2939f337fc995900b","src/unix/haiku/mod.rs":"8b8a7a51e1bc20407e42b0ab6c1a0cd1c8fc4f3ef61a04ccb7c8e312495ce30b","src/unix/hermit/aarch64.rs":"86048676e335944c37a63d0083d0f368ae10ceccefeed9debb3bbe08777fc682","src/unix/hermit/mod.rs":"a8bb096695eea74fb1c5c6766c1e680d28378c452dbc622fa5f91c6ce204306f","src/unix/hermit/x86_64.rs":"ab832b7524e5fb15c49ff7431165ab1a37dc4667ae0b58e8306f4c539bfa110c","src/unix/mod.rs":"af909129f76f9bbe571dee2e78f43afd63ff6e46c27a429da43c239537330283","src/unix/newlib/aarch64/mod.rs":"c408a990f22fb4292a824f38367e9b517e6e6f8623328397ee631cc88b3d1f7d","src/unix/newlib/arm/mod.rs":"2b6dba2e697ab9b4f4bc4dd5f28057249e9b596d1cb395a9322ec87605c4a5c4","src/unix/newlib/mod.rs":"d9f59ee9a994490122986b7ae5e3064bf9ce0b888349a388a50341a4c7069842","src/unix/notbsd/android/b32/arm.rs":"3625a32c7e58cfe683a53486fbe3d42d4e28f00bea31e19cb46ed2bb0b6a140b","src/unix/notbsd/android/b32/mod.rs":"2fbe398c1fb5251dda6213741a193e50aae4622807cb255d1dd2e82b536f0c65","src/unix/notbsd/android/b32/x86.rs":"ae2b7f1d6278caddc007749bb1d09ca33f7593478a0fd7fe98b457dae86c7814","src/unix/notbsd/android/b64/aarch64.rs":"63d65629d79371814910f691672ef593d20244ee09be26f1ebe07ee6212d0163","src/unix/notbsd/android/b64/mod.rs":"90d4f6b063fd4de42fd302cbc9d9902fd99ac1d71dc48cb8bc6ad7b4c902e481","src/unix/notbsd/android/b64/x86_64.rs":"5547aef8dcbaa5a932559f34606fd8d89f6c9c15173d2b1412c12d39b3c1045f","src/unix/notbsd/android/mod.rs":"eadc87bfea5f5e8ea50a3776b526cea2b0bfaf6d55240ba88134e19670f8a3a6","src/unix/notbsd/emscripten.rs":"d2d817af2b1496c1ee173f216c478a3a1c26223ef938797103bc50a265284662","src/unix/notbsd/linux/mips/mips32.rs":"a483ddfd10765b7d5090dc21686eee8842649cd21236828a42d634114885f5f9","src/unix/notbsd/linux/mips/mips64.rs":"9fff696e3943cf206b549d1ae13fa361828e9a8454e2d5730eeaa1c172ff370d","src/unix/notbsd/linux/mips/mod.rs":"af1b7bffff09aa5d8662e6452f72bc1e55b6639899b8c2a4be9cd1cfb245de01","src/unix/notbsd/linux/mod.rs":"081cb74be8b465c9cf86289f675cb7c6f7d7eee39713a6764e1871eabcbaee44","src/unix/notbsd/linux/musl/b32/arm.rs":"9d9bff31ab0925a1f62a20945d36a83b94ce3ab78dd202cd468bb31556b21725","src/unix/notbsd/linux/musl/b32/mips.rs":"cb38c463aebfc235f31880db158dd47c6e21f182a092d3f3087d92994b7711da","src/unix/notbsd/linux/musl/b32/mod.rs":"540928f168f145c136f9dd729ffa12b9d1838d9fe664fc642365d17d7fae648f","src/unix/notbsd/linux/musl/b32/powerpc.rs":"3930a2825657ac9208935341e29cfa62f6e37fc5c6b2c0d0dc9ac8c3b5569d59","src/unix/notbsd/linux/musl/b32/x86.rs":"c02dd333012cf65cb8873fa211eff5e63d466be55451a347510e3d4f50ed515e","src/unix/notbsd/linux/musl/b64/aarch64.rs":"4d79d86d11fbb8cb7a74084e410a1140e3c89dfc1842cdfb213f3a0ca93046df","src/unix/notbsd/linux/musl/b64/mod.rs":"caac00326693b372d6805e4dda239475e7fef36368881f372c006264844fda0d","src/unix/notbsd/linux/musl/b64/powerpc64.rs":"24514e41be4b5f5e0ffbe8a25a99dae8989489b607db59e8bfa345f8e65c9963","src/unix/notbsd/linux/musl/b64/x86_64.rs":"25340999290a63d564ec149532c905f59c312ec369f8806d6b15df66fa1b8857","src/unix/notbsd/linux/musl/mod.rs":"17c70acf9c6eabeb73916c2abb7d7e8b0310214090faae4a8dc2fd183a9b45ba","src/unix/notbsd/linux/other/b32/arm.rs":"d9892f7350b2978335f734f1cd2d7fed60f0f2e66aa05bee3f69549c031f8b14","src/unix/notbsd/linux/other/b32/mod.rs":"dac0fd1054a0fa163bce85df58c4ad4d222d8b7353bdb4364482b46c19991d03","src/unix/notbsd/linux/other/b32/powerpc.rs":"253fcd2f9978525285be1903cc08f3fec2dc3b12d1660a33e2995b4f6b810d1c","src/unix/notbsd/linux/other/b32/x86.rs":"49376e3ed0f3ff95c230ac20751911fe3c608dfe15c7c118b069fd7a954d8db9","src/unix/notbsd/linux/other/b64/aarch64.rs":"d57f3e06a0ac8affc5bf9d17e1f217ef1d1d714c947f47e647e0e038deaf48b2","src/unix/notbsd/linux/other/b64/mod.rs":"63e1a3fdf5f4d1b9820934ab344c91aed5e458e7e05908535d2e942d51a08bf8","src/unix/notbsd/linux/other/b64/not_x32.rs":"fa8636fb93eab230ed53bdec0a06f5b81d6d982cc0800103563c8c1eefcdb2d9","src/unix/notbsd/linux/other/b64/powerpc64.rs":"024057a910d0b885c63443165d34ce33f972973a9a8f5979906198180b19ad8b","src/unix/notbsd/linux/other/b64/sparc64.rs":"bb28f201e29c7f490a42dd2673eb8180fd82c1824a5d21aeb5aed674ffcc6e07","src/unix/notbsd/linux/other/b64/x32.rs":"06a26c5120ced30fc015c220799b67c4401be2f13fc6c7361bebd3d37ff4982d","src/unix/notbsd/linux/other/b64/x86_64.rs":"afba464e903d350325a1ca3d9d5af1659efc0ede83a43dbac4dbd60c522e2ad1","src/unix/notbsd/linux/other/mod.rs":"add154a8cfe9392d5a73b6055eb1419902e2b8b5458c22586a6ef8f90f89501b","src/unix/notbsd/linux/s390x.rs":"033cb7c4ee00af352698615de0248a5c6c9bcff57ba671ee22d31b70ef86fb4a","src/unix/notbsd/mod.rs":"134502158bab09d5189249ef400c9ddf7fdf5d1a1bd3134484ca82b80a0833e7","src/unix/solaris/mod.rs":"9c52a7479b56d3bc1f2c9ba5bb44f71ab1470989a54d3d0d85571e19489e4b7e","src/unix/uclibc/mips/mips32.rs":"9739c5fb47f389a0394ef08ee30da97a3de0a1300020731a8cc0a033616011b2","src/unix/uclibc/mips/mips64.rs":"230583280bbc7b3c7fcdb61244f51fa1af5944ca127c7cf83c598fe2313713d0","src/unix/uclibc/mips/mod.rs":"3f86061d05a8da7d923310550b7d40c6223f0c907d77edc86b7a78da1d647f76","src/unix/uclibc/mod.rs":"8cf2db30468476b917a60fdffe475917302875a3a4150f29a042ebee182545d1","src/unix/uclibc/x86_64/l4re.rs":"68fd3a833fd1f7caf784a084224f384bdbdfb8b5a14ef94c4f5155409afb3439","src/unix/uclibc/x86_64/mod.rs":"419182836aedd426a5c9e6b8667058adf86ac8f43af73ce8d00c503f8ff8f414","src/unix/uclibc/x86_64/other.rs":"f03b47842896f2f3ae6f8ebdcbcf0276454f880349d9cf00e3d304f8136893c5","src/windows.rs":"e41357d610608bad81abf285306ad8b127b6f02d5132f63c4942861980b47d59"},"package":"76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"} \ No newline at end of file diff --git a/third_party/rust/libc/.travis.yml b/third_party/rust/libc/.travis.yml index 6acf0a80641b..b6a73f00c278 100644 --- a/third_party/rust/libc/.travis.yml +++ b/third_party/rust/libc/.travis.yml @@ -21,10 +21,13 @@ env: global: secure: "e2/3QjgRN9atOuSHp22TrYG7QVKcYUWY48Hi9b60w+r1+BhPkTseIJLte7WefRhdXtqpjjUJTooKDhnurFOeHaCT+nmBgiv+FPU893sBl4bhesY4m0vgUJVbNZcs6lTImYekWVb+aqjGdgV/XAgCw7c3kPmrZV0MzGDWL64Xaps=" matrix: + allow_failures: + # FIXME(#987) move back to include once 404 is fixed + - env: TARGET=s390x-unknown-linux-gnu include: - # 1.0.0 compat + # 1.13.0 compat - env: TARGET=x86_64-unknown-linux-gnu NO_ADD=1 - rust: 1.0.0 + rust: 1.13.0 script: rm -f Cargo.lock && cargo build install: @@ -38,10 +41,10 @@ matrix: - env: TARGET=i686-unknown-linux-gnu - os: osx env: TARGET=x86_64-apple-darwin NO_ADD=1 - osx_image: xcode8.3 + osx_image: xcode9.4 - os: osx env: TARGET=i686-apple-darwin - osx_image: xcode8.3 + osx_image: xcode9.4 - env: TARGET=arm-linux-androideabi - env: TARGET=aarch64-linux-android # FIXME(#826) should reenable @@ -56,14 +59,14 @@ matrix: # FIXME(#856) rust: 1.22.1 - os: osx - osx_image: xcode8.2 + osx_image: xcode9.4 env: TARGET=i386-apple-ios CARGO_TARGET_I386_APPLE_IOS_RUNNER=$HOME/runtest RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 before_install: rustc ./ci/ios/deploy_and_run_on_ios_simulator.rs -o $HOME/runtest - os: osx - osx_image: xcode8.2 + osx_image: xcode9.4 env: TARGET=x86_64-apple-ios CARGO_TARGET_X86_64_APPLE_IOS_RUNNER=$HOME/runtest RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 @@ -88,7 +91,7 @@ matrix: rust: beta - os: osx env: TARGET=x86_64-apple-darwin NO_ADD=1 - osx_image: xcode8.3 + osx_image: xcode9.4 rust: beta # nightly @@ -96,7 +99,7 @@ matrix: rust: nightly - os: osx env: TARGET=x86_64-apple-darwin NO_ADD=1 - osx_image: xcode8.3 + osx_image: xcode9.4 rust: nightly # not available on stable # without --release the build fails @@ -106,6 +109,13 @@ matrix: # QEMU based targets that compile in an emulator - env: TARGET=x86_64-unknown-freebsd + allow_failures: + - env: TARGET=i386-apple-ios + CARGO_TARGET_I386_APPLE_IOS_RUNNER=$HOME/runtest + RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 + - env: TARGET=x86_64-apple-ios + CARGO_TARGET_X86_64_APPLE_IOS_RUNNER=$HOME/runtest + RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 notifications: email: diff --git a/third_party/rust/libc/Cargo.toml b/third_party/rust/libc/Cargo.toml index 019f68e52efd..876b127c08f6 100644 --- a/third_party/rust/libc/Cargo.toml +++ b/third_party/rust/libc/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "libc" -version = "0.2.39" +version = "0.2.43" authors = ["The Rust Project Developers"] description = "A library for types and bindings to native C functions often found in libc or\nother common platform libraries.\n" homepage = "https://github.com/rust-lang/libc" @@ -22,6 +22,7 @@ license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang/libc" [features] +align = [] default = ["use_std"] use_std = [] [badges.appveyor] diff --git a/third_party/rust/libc/README.md b/third_party/rust/libc/README.md index c3333da14707..a19a56ee0f4f 100644 --- a/third_party/rust/libc/README.md +++ b/third_party/rust/libc/README.md @@ -6,8 +6,10 @@ various systems, including libc. [![Build Status](https://travis-ci.org/rust-lang/libc.svg?branch=master)](https://travis-ci.org/rust-lang/libc) [![Build status](https://ci.appveyor.com/api/projects/status/github/rust-lang/libc?svg=true)](https://ci.appveyor.com/project/rust-lang-libs/libc) +[![Latest version](https://img.shields.io/crates/v/libc.svg)](https://crates.io/crates/libc) +[![Documentation](https://docs.rs/libc/badge.svg)](https://docs.rs/libc) +![License](https://img.shields.io/crates/l/libc.svg) -[Documentation](#platforms-and-documentation) ## Usage @@ -33,6 +35,16 @@ this via: libc = { version = "0.2", default-features = false } ``` +By default libc uses private fields in structs in order to enforce a certain +memory alignment on them. These structs can be hard to instantiate outside of +libc. To make libc use `#[repr(align(x))]`, instead of the private fields, +activate the *align* feature. This requires Rust 1.25 or newer: + +```toml +[dependencies] +libc = { version = "0.2", features = ["align"] } +``` + ## What is libc? The primary purpose of this crate is to provide all of the definitions necessary @@ -122,38 +134,41 @@ it. If you'd like to get a release out ASAP you can follow these steps: The following platforms are currently tested and have documentation available: Tested: - * [`i686-pc-windows-msvc`](https://doc.rust-lang.org/libc/i686-pc-windows-msvc/libc/) - * [`x86_64-pc-windows-msvc`](https://doc.rust-lang.org/libc/x86_64-pc-windows-msvc/libc/) + * [`i686-pc-windows-msvc`](https://rust-lang.github.io/libc/i686-pc-windows-msvc/libc/) + * [`x86_64-pc-windows-msvc`](https://rust-lang.github.io/libc/x86_64-pc-windows-msvc/libc/) (Windows) - * [`i686-pc-windows-gnu`](https://doc.rust-lang.org/libc/i686-pc-windows-gnu/libc/) - * [`x86_64-pc-windows-gnu`](https://doc.rust-lang.org/libc/x86_64-pc-windows-gnu/libc/) - * [`i686-apple-darwin`](https://doc.rust-lang.org/libc/i686-apple-darwin/libc/) - * [`x86_64-apple-darwin`](https://doc.rust-lang.org/libc/x86_64-apple-darwin/libc/) + * [`i686-pc-windows-gnu`](https://rust-lang.github.io/libc/i686-pc-windows-gnu/libc/) + * [`x86_64-pc-windows-gnu`](https://rust-lang.github.io/libc/x86_64-pc-windows-gnu/libc/) + * [`i686-apple-darwin`](https://rust-lang.github.io/libc/i686-apple-darwin/libc/) + * [`x86_64-apple-darwin`](https://rust-lang.github.io/libc/x86_64-apple-darwin/libc/) (OSX) * `i386-apple-ios` * `x86_64-apple-ios` - * [`i686-unknown-linux-gnu`](https://doc.rust-lang.org/libc/i686-unknown-linux-gnu/libc/) - * [`x86_64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu/libc/) + * [`i686-unknown-linux-gnu`](https://rust-lang.github.io/libc/i686-unknown-linux-gnu/libc/) + * [`x86_64-unknown-linux-gnu`](https://rust-lang.github.io/libc/x86_64-unknown-linux-gnu/libc/) (Linux) - * [`x86_64-unknown-linux-musl`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl/libc/) + * [`x86_64-unknown-linux-musl`](https://rust-lang.github.io/libc/x86_64-unknown-linux-musl/libc/) (Linux MUSL) - * [`aarch64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu/libc/) + * [`aarch64-unknown-linux-gnu`](https://rust-lang.github.io/libc/aarch64-unknown-linux-gnu/libc/) (Linux) - * [`aarch64-unknown-linux-musl`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-musl/libc/) + * `aarch64-unknown-linux-musl` (Linux MUSL) - * [`mips-unknown-linux-gnu`](https://doc.rust-lang.org/libc/mips-unknown-linux-gnu/libc/) - * [`arm-unknown-linux-gnueabihf`](https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf/libc/) - * [`arm-linux-androideabi`](https://doc.rust-lang.org/libc/arm-linux-androideabi/libc/) + * [`sparc64-unknown-linux-gnu`](https://rust-lang.github.io/libc/sparc64-unknown-linux-gnu/libc/) + (Linux) + * [`mips-unknown-linux-gnu`](https://rust-lang.github.io/libc/mips-unknown-linux-gnu/libc/) + * [`arm-unknown-linux-gnueabihf`](https://rust-lang.github.io/libc/arm-unknown-linux-gnueabihf/libc/) + * [`arm-linux-androideabi`](https://rust-lang.github.io/libc/arm-linux-androideabi/libc/) (Android) - * [`x86_64-unknown-freebsd`](https://doc.rust-lang.org/libc/x86_64-unknown-freebsd/libc/) - * [`x86_64-unknown-openbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-openbsd/libc/) - * [`x86_64-rumprun-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) + * [`x86_64-unknown-freebsd`](https://rust-lang.github.io/libc/x86_64-unknown-freebsd/libc/) + * [`x86_64-unknown-openbsd`](https://rust-lang.github.io/libc/x86_64-unknown-openbsd/libc/) + * [`x86_64-rumprun-netbsd`](https://rust-lang.github.io/libc/x86_64-unknown-netbsd/libc/) The following may be supported, but are not guaranteed to always work: * `i686-unknown-freebsd` - * [`x86_64-unknown-bitrig`](https://doc.rust-lang.org/libc/x86_64-unknown-bitrig/libc/) - * [`x86_64-unknown-dragonfly`](https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly/libc/) + * [`x86_64-unknown-bitrig`](https://rust-lang.github.io/libc/x86_64-unknown-bitrig/libc/) + * [`x86_64-unknown-dragonfly`](https://rust-lang.github.io/libc/x86_64-unknown-dragonfly/libc/) * `i686-unknown-haiku` * `x86_64-unknown-haiku` - * [`x86_64-unknown-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) + * [`x86_64-unknown-netbsd`](https://rust-lang.github.io/libc/x86_64-unknown-netbsd/libc/) + * [`x86_64-sun-solaris`](https://rust-lang.github.io/libc/x86_64-sun-solaris/libc/) diff --git a/third_party/rust/libc/ci/README.md b/third_party/rust/libc/ci/README.md index aef6ef1db182..28152e5d00b9 100644 --- a/third_party/rust/libc/ci/README.md +++ b/third_party/rust/libc/ci/README.md @@ -128,32 +128,72 @@ QEMU is available, and if so mount it, run a script (it'll specifically be `run-qemu.sh` in this folder which is copied into the generated image talked about above), and then shut down. -### QEMU setup - FreeBSD +### QEMU Setup - FreeBSD -1. Download CD installer (most minimal is fine) -2. `qemu-img create -f qcow2 foo.qcow2 2G` -3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` -4. run installer -5. `echo 'console="comconsole"' >> /boot/loader.conf` -6. `echo 'autoboot_delay="0"' >> /boot/loader.conf` -7. look at /etc/ttys, see what getty argument is for ttyu0 -8. edit /etc/gettytab, look for ttyu0 argument, prepend `:al=root` to line - beneath +1. [Download the latest stable amd64-bootonly release ISO](https://www.freebsd.org/where.html). + E.g. FreeBSD-11.1-RELEASE-amd64-bootonly.iso +2. Create the disk image: `qemu-img create -f qcow2 FreeBSD-11.1-RELEASE-amd64.qcow2 2G` +3. Boot the machine: `qemu-system-x86_64 -cdrom FreeBSD-11.1-RELEASE-amd64-bootonly.iso -drive if=virtio,file=FreeBSD-11.1-RELEASE-amd64.qcow2 -net nic,model=virtio -net user` +4. Run the installer, and install FreeBSD: + 1. Install + 1. Continue with default keymap + 1. Set Hostname: freebsd-ci + 1. Distribution Select: + 1. Uncheck lib32 + 1. Uncheck ports + 1. Network Configuration: vtnet0 + 1. Configure IPv4? Yes + 1. DHCP? Yes + 1. Configure IPv6? No + 1. Resolver Configuration: Ok + 1. Mirror Selection: Main Site + 1. Partitioning: Auto (UFS) + 1. Partition: Entire Disk + 1. Partition Scheme: MBR + 1. App Partition: Ok + 1. Partition Editor: Finish + 1. Confirmation: Commit + 1. Wait for sets to install + 1. Set the root password to nothing (press enter twice) + 1. Set time zone to UTC + 1. Set Date: Skip + 1. Set Time: Skip + 1. System Configuration: + 1. Disable sshd + 1. Disable dumpdev + 1. System Hardening + 1. Disable Sendmail service + 1. Add User Accounts: No + 1. Final Configuration: Exit + 1. Manual Configuration: Yes + 1. `echo 'console="comconsole"' >> /boot/loader.conf` + 1. `echo 'autoboot_delay="0"' >> /boot/loader.conf` + 1. `echo 'ext2fs_load="YES"' >> /boot/loader.conf` + 1. Look at `/etc/ttys`, see what getty argument is for `ttyu0` (E.g. `3wire`) + 1. Edit `/etc/gettytab` (with `vi` for example), look for `ttyu0` argument, + prepend `:al=root` to the line beneath to have the machine auto-login as + root. E.g. -(note that the current image has a `freebsd` user, but this isn't really -necessary) + 3wire:\ + :np:nc:sp#0: + becomes: -Once that's done, arrange for this script to run at login: + 3wire:\ + :al=root:np:nc:sp#0: -``` -#!/bin/sh + 1. Edit `/root/.login` and put this in it: -sudo kldload ext2fs -[ -e /dev/vtbd1 ] || exit 0 -sudo mount -t ext2fs /dev/vtbd1 /mnt -sh /mnt/run.sh /mnt -sudo poweroff -``` + [ -e /dev/vtbd1 ] || exit 0 + mount -t ext2fs /dev/vtbd1 /mnt + sh /mnt/run.sh /mnt + poweroff + + 1. Exit the post install shell: `exit` + 1. Back in in the installer choose Reboot + 1. If all went well the machine should reboot and show a login prompt. + If you switch to the serial console by choosing View > serial0 in + the qemu menu, you should be logged in as root. + 1. Shutdown the machine: `shutdown -p now` Helpful links diff --git a/third_party/rust/libc/ci/docker/aarch64-unknown-linux-musl/Dockerfile b/third_party/rust/libc/ci/docker/aarch64-unknown-linux-musl/Dockerfile index e86c4c0ae3d2..caec1572cbb9 100644 --- a/third_party/rust/libc/ci/docker/aarch64-unknown-linux-musl/Dockerfile +++ b/third_party/rust/libc/ci/docker/aarch64-unknown-linux-musl/Dockerfile @@ -3,21 +3,21 @@ FROM ubuntu:17.10 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc make libc6-dev git curl ca-certificates \ gcc-aarch64-linux-gnu qemu-user -RUN curl https://www.musl-libc.org/releases/musl-1.1.16.tar.gz | \ +RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ tar xzf - && \ - cd musl-1.1.16 && \ + cd musl-1.1.19 && \ CC=aarch64-linux-gnu-gcc \ ./configure --prefix=/musl-aarch64 --enable-wrapper=yes && \ make install -j4 && \ cd .. && \ - rm -rf musl-1.1.16 && \ + rm -rf musl-1.1.19 # Install linux kernel headers sanitized for use with musl - curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-5.tar.gz | \ +RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ tar xzf - && \ - cd kernel-headers-3.12.6-5 && \ + cd kernel-headers-3.12.6-6 && \ make ARCH=arm64 prefix=/musl-aarch64 install -j4 && \ cd .. && \ - rm -rf kernel-headers-3.12.6-5 + rm -rf kernel-headers-3.12.6-6 # FIXME: shouldn't need the `-lgcc` here, shouldn't that be in libstd? ENV PATH=$PATH:/musl-aarch64/bin:/rust/bin \ diff --git a/third_party/rust/libc/ci/docker/arm-unknown-linux-musleabihf/Dockerfile b/third_party/rust/libc/ci/docker/arm-unknown-linux-musleabihf/Dockerfile index 130730b99733..86304130fe30 100644 --- a/third_party/rust/libc/ci/docker/arm-unknown-linux-musleabihf/Dockerfile +++ b/third_party/rust/libc/ci/docker/arm-unknown-linux-musleabihf/Dockerfile @@ -4,21 +4,21 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ gcc make libc6-dev git curl ca-certificates \ gcc-arm-linux-gnueabihf qemu-user -RUN curl https://www.musl-libc.org/releases/musl-1.1.16.tar.gz | tar xzf - -WORKDIR /musl-1.1.16 +RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | tar xzf - +WORKDIR /musl-1.1.19 RUN CC=arm-linux-gnueabihf-gcc \ CFLAGS="-march=armv6 -marm" \ ./configure --prefix=/musl-arm --enable-wrapper=yes RUN make install -j4 # Install linux kernel headers sanitized for use with musl -RUN \ - curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-5.tar.gz | \ +RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ tar xzf - && \ - cd kernel-headers-3.12.6-5 && \ + cd kernel-headers-3.12.6-6 && \ make ARCH=arm prefix=/musl-arm install -j4 && \ cd .. && \ - rm -rf kernel-headers-3.12.6-5 + rm -rf kernel-headers-3.12.6-6 + ENV PATH=$PATH:/musl-arm/bin:/rust/bin \ CC_arm_unknown_linux_musleabihf=musl-gcc \ CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_LINKER=musl-gcc \ diff --git a/third_party/rust/libc/ci/docker/i686-unknown-linux-musl/Dockerfile b/third_party/rust/libc/ci/docker/i686-unknown-linux-musl/Dockerfile index 3adb92004084..49f37d70f2af 100644 --- a/third_party/rust/libc/ci/docker/i686-unknown-linux-musl/Dockerfile +++ b/third_party/rust/libc/ci/docker/i686-unknown-linux-musl/Dockerfile @@ -12,19 +12,20 @@ RUN apt-get install -y --no-install-recommends \ # since otherwise the script will fail to find a compiler. # * We manually unset CROSS_COMPILE when running make; otherwise the makefile # will call the non-existent binary 'i686-ar'. -RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ +RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ tar xzf - && \ - cd musl-1.1.15 && \ + cd musl-1.1.19 && \ CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ make CROSS_COMPILE= install -j4 && \ cd .. && \ - rm -rf musl-1.1.15 && \ + rm -rf musl-1.1.19 # Install linux kernel headers sanitized for use with musl - curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-5.tar.gz | \ +RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ tar xzf - && \ - cd kernel-headers-3.12.6-5 && \ + cd kernel-headers-3.12.6-6 && \ make ARCH=i386 prefix=/musl-i686 install -j4 && \ cd .. && \ - rm -rf kernel-headers-3.12.6-5 + rm -rf kernel-headers-3.12.6-6 + ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ CC_i686_unknown_linux_musl=musl-gcc diff --git a/third_party/rust/libc/ci/docker/sparc64-unknown-linux-gnu/Dockerfile b/third_party/rust/libc/ci/docker/sparc64-unknown-linux-gnu/Dockerfile index 90b5ea9bdaa0..d9edaab42635 100644 --- a/third_party/rust/libc/ci/docker/sparc64-unknown-linux-gnu/Dockerfile +++ b/third_party/rust/libc/ci/docker/sparc64-unknown-linux-gnu/Dockerfile @@ -5,7 +5,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ gcc libc6-dev \ gcc-sparc64-linux-gnu libc6-dev-sparc64-cross \ qemu-system-sparc64 openbios-sparc seabios ipxe-qemu \ - p7zip-full cpio + p7zip-full cpio linux-libc-dev-sparc64-cross linux-headers-4.9.0-3-common + +# Put linux/module.h into the right spot as it is not shipped by debian +RUN cp /usr/src/linux-headers-4.9.0-3-common/include/uapi/linux/module.h /usr/sparc64-linux-gnu/include/linux/ COPY linux-sparc64.sh / RUN bash /linux-sparc64.sh diff --git a/third_party/rust/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile b/third_party/rust/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile index 7ad3faff37c0..35f103657585 100644 --- a/third_party/rust/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile +++ b/third_party/rust/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile @@ -1,13 +1,13 @@ -FROM alexcrichton/port-prebuilt-freebsd:2017-09-16 +FROM wezm/port-prebuilt-freebsd11@sha256:43553e2265ec702ec72a63a765df333f50b1858b896e69385749e96d8624e9b0 RUN apt-get update RUN apt-get install -y --no-install-recommends \ - qemu genext2fs + qemu genext2fs xz-utils RUN apt-get install -y curl ca-certificates gcc ENTRYPOINT ["sh"] ENV PATH=$PATH:/rust/bin \ - QEMU=2016-11-06/freebsd.qcow2.gz \ + QEMU=2018-03-15/FreeBSD-11.1-RELEASE-amd64.qcow2.xz \ CAN_CROSS=1 \ - CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd10-gcc + CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd11-gcc diff --git a/third_party/rust/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile b/third_party/rust/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile index d9d651138661..6e2b7d9e5ea7 100644 --- a/third_party/rust/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile +++ b/third_party/rust/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile @@ -3,18 +3,18 @@ FROM ubuntu:17.10 RUN apt-get update RUN apt-get install -y --no-install-recommends \ gcc make libc6-dev git curl ca-certificates -RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ +RUN curl https://www.musl-libc.org/releases/musl-1.1.19.tar.gz | \ tar xzf - && \ - cd musl-1.1.15 && \ + cd musl-1.1.19 && \ ./configure --prefix=/musl-x86_64 && \ make install -j4 && \ cd .. && \ - rm -rf musl-1.1.15 && \ + rm -rf musl-1.1.19 # Install linux kernel headers sanitized for use with musl - curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-5.tar.gz | \ +RUN curl -L https://github.com/sabotage-linux/kernel-headers/archive/v3.12.6-6.tar.gz | \ tar xzf - && \ - cd kernel-headers-3.12.6-5 && \ + cd kernel-headers-3.12.6-6 && \ make ARCH=x86_64 prefix=/musl-x86_64 install -j4 && \ cd .. && \ - rm -rf kernel-headers-3.12.6-5 + rm -rf kernel-headers-3.12.6-6 ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff --git a/third_party/rust/libc/ci/dox.sh b/third_party/rust/libc/ci/dox.sh index 85e92439484c..b8ffa7dd0d07 100644 --- a/third_party/rust/libc/ci/dox.sh +++ b/third_party/rust/libc/ci/dox.sh @@ -16,7 +16,7 @@ cp ci/landing-page-head.html target/doc/index.html for target in $TARGETS; do echo documenting $target - rustdoc -o target/doc/$target --target $target src/lib.rs --cfg dox \ + rustdoc -o target/doc/$target --target $target src/lib.rs --cfg cross_platform_docs \ --crate-name libc echo "
  • $target
  • " \ diff --git a/third_party/rust/libc/ci/ios/deploy_and_run_on_ios_simulator.rs b/third_party/rust/libc/ci/ios/deploy_and_run_on_ios_simulator.rs index b14615036d02..95df52d76d59 100644 --- a/third_party/rust/libc/ci/ios/deploy_and_run_on_ios_simulator.rs +++ b/third_party/rust/libc/ci/ios/deploy_and_run_on_ios_simulator.rs @@ -123,6 +123,7 @@ fn run_app_on_simulator() { .arg("com.rust.unittests") .output()); + println!("status: {}", output.status); println!("stdout --\n{}\n", String::from_utf8_lossy(&output.stdout)); println!("stderr --\n{}\n", String::from_utf8_lossy(&output.stderr)); diff --git a/third_party/rust/libc/ci/linux-sparc64.sh b/third_party/rust/libc/ci/linux-sparc64.sh index 33a3c46c77af..4452b120e1b6 100644 --- a/third_party/rust/libc/ci/linux-sparc64.sh +++ b/third_party/rust/libc/ci/linux-sparc64.sh @@ -3,7 +3,7 @@ set -ex mkdir -m 777 /qemu cd /qemu -curl -LO https://cdimage.debian.org/cdimage/ports/debian-9.0-sparc64-NETINST-1.iso +curl -LO https://cdimage.debian.org/cdimage/ports/9.0/sparc64/iso-cd/debian-9.0-sparc64-NETINST-1.iso 7z e debian-9.0-sparc64-NETINST-1.iso boot/initrd.gz 7z e debian-9.0-sparc64-NETINST-1.iso boot/sparc64 mv sparc64 kernel diff --git a/third_party/rust/libc/ci/run.sh b/third_party/rust/libc/ci/run.sh index 8a1d10b29edb..27ffc054a081 100755 --- a/third_party/rust/libc/ci/run.sh +++ b/third_party/rust/libc/ci/run.sh @@ -24,6 +24,13 @@ if [ "$QEMU" != "" ]; then curl https://s3-us-west-1.amazonaws.com/rust-lang-ci2/libc/$QEMU | \ gunzip -d > $tmpdir/$qemufile fi + elif [ -z "${QEMU#*.xz}" ]; then + # image is .xz : download and uncompress it + qemufile=$(echo ${QEMU%.xz} | sed 's/\//__/g') + if [ ! -f $tmpdir/$qemufile ]; then + curl https://s3-us-west-1.amazonaws.com/rust-lang-ci2/libc/$QEMU | \ + unxz > $tmpdir/$qemufile + fi else # plain qcow2 image: just download it qemufile=$(echo ${QEMU} | sed 's/\//__/g') @@ -72,12 +79,20 @@ if [ "$QEMU" != "" ]; then exec grep "^PASSED .* tests" $CARGO_TARGET_DIR/out.log fi -# FIXME: x86_64-unknown-linux-gnux32 fail to compile wihout --release +# FIXME: x86_64-unknown-linux-gnux32 fail to compile without --release # See https://github.com/rust-lang/rust/issues/45417 opt= if [ "$TARGET" = "x86_64-unknown-linux-gnux32" ]; then opt="--release" fi -cargo test $opt --no-default-features --manifest-path libc-test/Cargo.toml --target $TARGET +# Building with --no-default-features is currently broken on rumprun because we +# need cfg(target_vendor), which is currently unstable. +if [ "$TARGET" != "x86_64-rumprun-netbsd" ]; then + cargo test $opt --no-default-features --manifest-path libc-test/Cargo.toml --target $TARGET +fi +# Test the #[repr(align(x))] feature if this is building on Rust >= 1.25 +if [ $(rustc --version | sed -E 's/^rustc 1\.([0-9]*)\..*/\1/') -ge 25 ]; then + cargo test $opt --features align --manifest-path libc-test/Cargo.toml --target $TARGET +fi exec cargo test $opt --manifest-path libc-test/Cargo.toml --target $TARGET diff --git a/third_party/rust/libc/ci/style.rs b/third_party/rust/libc/ci/style.rs index 32e4ba772c5b..bf31576b909a 100644 --- a/third_party/rust/libc/ci/style.rs +++ b/third_party/rust/libc/ci/style.rs @@ -127,7 +127,9 @@ fn check_style(file: &str, path: &Path, err: &mut Errors) { if line.contains("extern \"C\"") { err.error(path, i, "use `extern` instead of `extern \"C\""); } - if line.contains("#[cfg(") && !line.contains(" if ") { + if line.contains("#[cfg(") && !line.contains(" if ") + && !line.contains("target_endian") + { if state != State::Structs { err.error(path, i, "use cfg_if! and submodules \ instead of #[cfg]"); diff --git a/third_party/rust/libc/src/dox.rs b/third_party/rust/libc/src/dox.rs index 5c095b9c76ad..779641b3cc36 100644 --- a/third_party/rust/libc/src/dox.rs +++ b/third_party/rust/libc/src/dox.rs @@ -1,6 +1,6 @@ pub use self::imp::*; -#[cfg(not(dox))] +#[cfg(not(cross_platform_docs))] mod imp { pub use core::option::Option; pub use core::clone::Clone; @@ -8,7 +8,7 @@ mod imp { pub use core::mem; } -#[cfg(dox)] +#[cfg(cross_platform_docs)] mod imp { pub enum Option { Some(T), @@ -19,6 +19,16 @@ mod imp { fn clone(&self) -> Option { loop {} } } + impl Copy for *mut T {} + impl Clone for *mut T { + fn clone(&self) -> *mut T { loop {} } + } + + impl Copy for *const T {} + impl Clone for *const T { + fn clone(&self) -> *const T { loop {} } + } + pub trait Clone { fn clone(&self) -> Self; } @@ -58,13 +68,13 @@ mod imp { } #[lang = "div"] - pub trait Div { + pub trait Div { type Output; fn div(self, rhs: RHS) -> Self::Output; } #[lang = "shl"] - pub trait Shl { + pub trait Shl { type Output; fn shl(self, rhs: RHS) -> Self::Output; } @@ -81,12 +91,39 @@ mod imp { fn sub(self, rhs: RHS) -> Self::Output; } + #[lang = "bitand"] + pub trait BitAnd { + type Output; + fn bitand(self, rhs: RHS) -> Self::Output; + } + + #[lang = "bitand_assign"] + pub trait BitAndAssign { + fn bitand_assign(&mut self, rhs: RHS); + } + #[lang = "bitor"] - pub trait Bitor { + pub trait BitOr { type Output; fn bitor(self, rhs: RHS) -> Self::Output; } + #[lang = "bitor_assign"] + pub trait BitOrAssign { + fn bitor_assign(&mut self, rhs: RHS); + } + + #[lang = "bitxor"] + pub trait BitXor { + type Output; + fn bitxor(self, rhs: RHS) -> Self::Output; + } + + #[lang = "bitxor_assign"] + pub trait BitXorAssign { + fn bitxor_assign(&mut self, rhs: RHS); + } + #[lang = "neg"] pub trait Neg { type Output; @@ -124,10 +161,27 @@ mod imp { type Output = $i; fn sub(self, rhs: $i) -> $i { self - rhs } } - impl Bitor for $i { + impl BitAnd for $i { + type Output = $i; + fn bitand(self, rhs: $i) -> $i { self & rhs } + } + impl BitAndAssign for $i { + fn bitand_assign(&mut self, rhs: $i) { *self &= rhs; } + } + impl BitOr for $i { type Output = $i; fn bitor(self, rhs: $i) -> $i { self | rhs } } + impl BitOrAssign for $i { + fn bitor_assign(&mut self, rhs: $i) { *self |= rhs; } + } + impl BitXor for $i { + type Output = $i; + fn bitxor(self, rhs: $i) -> $i { self ^ rhs } + } + impl BitXorAssign for $i { + fn bitxor_assign(&mut self, rhs: $i) { *self ^= rhs; } + } impl Neg for $i { type Output = $i; fn neg(self) -> $i { -self } @@ -140,12 +194,16 @@ mod imp { type Output = $i; fn add(self, other: $i) -> $i { self + other } } + impl Copy for $i {} + impl Clone for $i { + fn clone(&self) -> $i { loop {} } + } )*) } each_int!(impl_traits); pub mod mem { pub fn size_of_val(_: &T) -> usize { 4 } - pub fn size_of(_: &T) -> usize { 4 } + pub const fn size_of() -> usize { 4 } } } diff --git a/third_party/rust/libc/src/fuchsia/mod.rs b/third_party/rust/libc/src/fuchsia/mod.rs index 929acaf8dcc1..e103292979c3 100644 --- a/third_party/rust/libc/src/fuchsia/mod.rs +++ b/third_party/rust/libc/src/fuchsia/mod.rs @@ -166,8 +166,10 @@ s! { pub s_addr: in_addr_t, } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct in6_addr { pub s6_addr: [u8; 16], + #[cfg(not(feature = "align"))] __align: [u32; 0], } @@ -518,14 +520,30 @@ s! { pub ifa_data: *mut ::c_void } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64")))), + repr(align(8)))] pub struct pthread_mutex_t { - #[cfg(any(target_arch = "mips", - target_arch = "arm", - target_arch = "powerpc", - all(target_arch = "x86_64", - target_pointer_width = "32")))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + all(target_arch = "x86_64", + target_pointer_width = "32"))))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", + #[cfg(not(any(feature = "align", + target_arch = "mips", target_arch = "arm", target_arch = "powerpc", all(target_arch = "x86_64", @@ -534,14 +552,30 @@ s! { size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64")))), + repr(align(8)))] pub struct pthread_rwlock_t { - #[cfg(any(target_arch = "mips", - target_arch = "arm", - target_arch = "powerpc", - all(target_arch = "x86_64", - target_pointer_width = "32")))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + all(target_arch = "x86_64", + target_pointer_width = "32"))))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", + #[cfg(not(any(feature = "align", + target_arch = "mips", target_arch = "arm", target_arch = "powerpc", all(target_arch = "x86_64", @@ -550,39 +584,78 @@ s! { size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl"))), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl")))), + repr(align(8)))] pub struct pthread_mutexattr_t { - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + #[cfg(all(not(features = "align"), + any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64", target_arch = "aarch64")))] - __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "gnu"))] - __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "musl"))] + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl"))))] __align: [::c_int; 0], + #[cfg(all(not(features = "align"), + not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl")))))] + __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } + #[cfg_attr(all(feature = "align", + any(target_env = "musl", target_pointer_width = "32")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + target_pointer_width = "64"), + repr(align(8)))] pub struct pthread_rwlockattr_t { - #[cfg(any(target_env = "musl"))] + #[cfg(all(not(feature = "align"), target_env = "musl"))] __align: [::c_int; 0], - #[cfg(not(any(target_env = "musl")))] + #[cfg(all(not(feature = "align"), not(target_env = "musl")))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCKATTR_T], } + #[cfg_attr(all(feature = "align", + target_env = "musl", + target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + target_env = "musl", + target_pointer_width = "64"), + repr(align(8)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + target_arch = "x86"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + not(target_arch = "x86")), + repr(align(8)))] pub struct pthread_cond_t { - #[cfg(any(target_env = "musl"))] + #[cfg(all(not(feature = "align"), target_env = "musl"))] __align: [*const ::c_void; 0], - #[cfg(not(any(target_env = "musl")))] + #[cfg(not(any(feature = "align", target_env = "musl")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } @@ -2004,18 +2077,17 @@ pub const RTLD_NOW: ::c_int = 0x2; pub const TCP_MD5SIG: ::c_int = 14; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; +align_const! { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], + }; +} pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; @@ -3796,6 +3868,8 @@ extern { pub fn sched_rr_get_interval(pid: ::pid_t, tp: *mut ::timespec) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn sched_setparam(pid: ::pid_t, param: *const ::sched_param) -> ::c_int; pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; pub fn swapoff(puath: *const ::c_char) -> ::c_int; diff --git a/third_party/rust/libc/src/lib.rs b/third_party/rust/libc/src/lib.rs index b58a873eac93..b6b5cdb1618c 100644 --- a/third_party/rust/libc/src/lib.rs +++ b/third_party/rust/libc/src/lib.rs @@ -13,80 +13,81 @@ #![allow(bad_style, overflowing_literals, improper_ctypes)] #![crate_type = "rlib"] #![crate_name = "libc"] -#![cfg_attr(dox, feature(no_core, lang_items))] -#![cfg_attr(dox, no_core)] +#![cfg_attr(cross_platform_docs, feature(no_core, lang_items, const_fn))] +#![cfg_attr(cross_platform_docs, no_core)] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico")] #![cfg_attr(all(target_os = "linux", target_arch = "x86_64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-linux-gnu" ))] #![cfg_attr(all(target_os = "linux", target_arch = "x86"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-unknown-linux-gnu" + html_root_url = "https://rust-lang.github.io/libc/i686-unknown-linux-gnu" ))] #![cfg_attr(all(target_os = "linux", target_arch = "arm"), doc( - html_root_url = "https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf" + html_root_url = "https://rust-lang.github.io/libc/arm-unknown-linux-gnueabihf" ))] #![cfg_attr(all(target_os = "linux", target_arch = "mips"), doc( - html_root_url = "https://doc.rust-lang.org/libc/mips-unknown-linux-gnu" + html_root_url = "https://rust-lang.github.io/libc/mips-unknown-linux-gnu" ))] #![cfg_attr(all(target_os = "linux", target_arch = "aarch64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu" + html_root_url = "https://rust-lang.github.io/libc/aarch64-unknown-linux-gnu" ))] #![cfg_attr(all(target_os = "linux", target_env = "musl"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-linux-musl" ))] #![cfg_attr(all(target_os = "macos", target_arch = "x86_64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-apple-darwin" + html_root_url = "https://rust-lang.github.io/libc/x86_64-apple-darwin" ))] #![cfg_attr(all(target_os = "macos", target_arch = "x86"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-apple-darwin" + html_root_url = "https://rust-lang.github.io/libc/i686-apple-darwin" ))] #![cfg_attr(all(windows, target_arch = "x86_64", target_env = "gnu"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-pc-windows-gnu" + html_root_url = "https://rust-lang.github.io/libc/x86_64-pc-windows-gnu" ))] #![cfg_attr(all(windows, target_arch = "x86", target_env = "gnu"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-pc-windows-gnu" + html_root_url = "https://rust-lang.github.io/libc/i686-pc-windows-gnu" ))] #![cfg_attr(all(windows, target_arch = "x86_64", target_env = "msvc"), doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-pc-windows-msvc" + html_root_url = "https://rust-lang.github.io/libc/x86_64-pc-windows-msvc" ))] #![cfg_attr(all(windows, target_arch = "x86", target_env = "msvc"), doc( - html_root_url = "https://doc.rust-lang.org/libc/i686-pc-windows-msvc" + html_root_url = "https://rust-lang.github.io/libc/i686-pc-windows-msvc" ))] #![cfg_attr(target_os = "android", doc( - html_root_url = "https://doc.rust-lang.org/libc/arm-linux-androideabi" + html_root_url = "https://rust-lang.github.io/libc/arm-linux-androideabi" ))] #![cfg_attr(target_os = "freebsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-freebsd" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-freebsd" ))] #![cfg_attr(target_os = "openbsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-openbsd" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-openbsd" ))] #![cfg_attr(target_os = "bitrig", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-bitrig" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-bitrig" ))] #![cfg_attr(target_os = "netbsd", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-netbsd" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-netbsd" ))] #![cfg_attr(target_os = "dragonfly", doc( - html_root_url = "https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly" + html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-dragonfly" +))] +#![cfg_attr(target_os = "solaris", doc( + html_root_url = "https://rust-lang.github.io/libc/x86_64-sun-solaris" ))] #![cfg_attr(all(target_os = "emscripten", target_arch = "asmjs"), doc( - html_root_url = "https://doc.rust-lang.org/libc/asmjs-unknown-emscripten" + html_root_url = "https://rust-lang.github.io/libc/asmjs-unknown-emscripten" ))] #![cfg_attr(all(target_os = "emscripten", target_arch = "wasm32"), doc( - html_root_url = "https://doc.rust-lang.org/libc/wasm32-unknown-emscripten" + html_root_url = "https://rust-lang.github.io/libc/wasm32-unknown-emscripten" ))] -#![cfg_attr(all(target_os = "linux", target_arch = "xparc64"), doc( - html_root_url = "https://doc.rust-lang.org/libc/sparc64-unknown-linux-gnu" +#![cfg_attr(all(target_os = "linux", target_arch = "sparc64"), doc( + html_root_url = "https://rust-lang.github.io/libc/sparc64-unknown-linux-gnu" ))] // Attributes needed when building as part of the standard library -#![cfg_attr(feature = "stdbuild", feature(no_std, staged_api, custom_attribute, cfg_target_vendor))] -#![cfg_attr(feature = "stdbuild", feature(link_cfg))] -#![cfg_attr(feature = "stdbuild", no_std)] -#![cfg_attr(feature = "stdbuild", staged_api)] +#![cfg_attr(feature = "stdbuild", feature(staged_api, cfg_target_vendor))] +#![cfg_attr(feature = "stdbuild", feature(link_cfg, repr_packed))] #![cfg_attr(feature = "stdbuild", allow(warnings))] #![cfg_attr(feature = "stdbuild", unstable(feature = "libc", reason = "use `libc` from crates.io", @@ -94,7 +95,7 @@ #![cfg_attr(not(feature = "use_std"), no_std)] -#[cfg(all(not(dox), feature = "use_std"))] +#[cfg(all(not(cross_platform_docs), feature = "use_std"))] extern crate std as core; #[macro_use] mod macros; diff --git a/third_party/rust/libc/src/macros.rs b/third_party/rust/libc/src/macros.rs index 842944201441..f48ad45941f6 100644 --- a/third_party/rust/libc/src/macros.rs +++ b/third_party/rust/libc/src/macros.rs @@ -35,11 +35,11 @@ macro_rules! __cfg_if_apply { } macro_rules! s { - ($($(#[$attr:meta])* pub struct $i:ident { $($field:tt)* })*) => ($( + ($($(#[$attr:meta])* pub $t:ident $i:ident { $($field:tt)* })*) => ($( __item! { #[repr(C)] $(#[$attr])* - pub struct $i { $($field)* } + pub $t $i { $($field)* } } impl ::dox::Copy for $i {} impl ::dox::Clone for $i { @@ -53,12 +53,12 @@ macro_rules! f { $($body:stmt);* })*) => ($( #[inline] - #[cfg(not(dox))] + #[cfg(not(cross_platform_docs))] pub unsafe extern fn $i($($arg: $argty),*) -> $ret { $($body);* } - #[cfg(dox)] + #[cfg(cross_platform_docs)] #[allow(dead_code)] pub unsafe extern fn $i($($arg: $argty),*) -> $ret { loop {} @@ -69,3 +69,20 @@ macro_rules! f { macro_rules! __item { ($i:item) => ($i) } + +#[allow(unused_macros)] +macro_rules! align_const { + ($($(#[$attr:meta])* pub const $name:ident : $t1:ty = $t2:ident { $($field:tt)* };)*) => ($( + #[cfg(feature = "align")] + $(#[$attr])* + pub const $name : $t1 = $t2 { + $($field)* + }; + #[cfg(not(feature = "align"))] + $(#[$attr])* + pub const $name : $t1 = $t2 { + $($field)* + __align: [], + }; + )*) +} diff --git a/third_party/rust/libc/src/redox/mod.rs b/third_party/rust/libc/src/redox/mod.rs index 82b296f96553..e32c4b385b17 100644 --- a/third_party/rust/libc/src/redox/mod.rs +++ b/third_party/rust/libc/src/redox/mod.rs @@ -85,7 +85,41 @@ pub const O_SYMLINK: ::c_int = 0x4000_0000; pub const O_NOFOLLOW: ::c_int = 0x8000_0000; pub const O_ACCMODE: ::c_int = O_RDONLY | O_WRONLY | O_RDWR; +pub const SIGHUP: ::c_int = 1; +pub const SIGINT: ::c_int = 2; +pub const SIGQUIT: ::c_int = 3; +pub const SIGILL: ::c_int = 4; +pub const SIGTRAP: ::c_int = 5; +pub const SIGABRT: ::c_int = 6; +pub const SIGBUS: ::c_int = 7; +pub const SIGFPE: ::c_int = 8; +pub const SIGKILL: ::c_int = 9; +pub const SIGUSR1: ::c_int = 10; +pub const SIGSEGV: ::c_int = 11; +pub const SIGUSR2: ::c_int = 12; +pub const SIGPIPE: ::c_int = 13; +pub const SIGALRM: ::c_int = 14; +pub const SIGTERM: ::c_int = 15; +pub const SIGSTKFLT: ::c_int = 16; +pub const SIGCHLD: ::c_int = 17; +pub const SIGCONT: ::c_int = 18; +pub const SIGSTOP: ::c_int = 19; +pub const SIGTSTP: ::c_int = 20; +pub const SIGTTIN: ::c_int = 21; +pub const SIGTTOU: ::c_int = 22; +pub const SIGURG: ::c_int = 23; +pub const SIGXCPU: ::c_int = 24; +pub const SIGXFSZ: ::c_int = 25; +pub const SIGVTALRM: ::c_int = 26; +pub const SIGPROF: ::c_int = 27; +pub const SIGWINCH: ::c_int = 28; +pub const SIGIO: ::c_int = 29; +pub const SIGPWR: ::c_int = 30; +pub const SIGSYS: ::c_int = 31; + extern { + pub fn gethostname(name: *mut ::c_char, len: ::size_t) -> ::c_int; + pub fn getpid() -> pid_t; pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) -> ::ssize_t; diff --git a/third_party/rust/libc/src/redox/net.rs b/third_party/rust/libc/src/redox/net.rs index 091691643014..fcbb181c3297 100644 --- a/third_party/rust/libc/src/redox/net.rs +++ b/third_party/rust/libc/src/redox/net.rs @@ -9,8 +9,10 @@ s! { pub s_addr: in_addr_t, } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct in6_addr { pub s6_addr: [u8; 16], + #[cfg(not(feature = "align"))] __align: [u32; 0], } @@ -107,4 +109,16 @@ extern { pub fn setsockopt(socket: ::c_int, level: ::c_int, name: ::c_int, value: *const ::c_void, option_len: socklen_t) -> ::c_int; + pub fn getpeername(socket: ::c_int, address: *mut sockaddr, + address_len: *mut socklen_t) -> ::c_int; + pub fn sendto(socket: ::c_int, buf: *const ::c_void, len: ::size_t, + flags: ::c_int, addr: *const sockaddr, + addrlen: socklen_t) -> ::ssize_t; + pub fn send(socket: ::c_int, buf: *const ::c_void, len: ::size_t, + flags: ::c_int) -> ::ssize_t; + pub fn recvfrom(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, + flags: ::c_int, addr: *mut ::sockaddr, + addrlen: *mut ::socklen_t) -> ::ssize_t; + pub fn recv(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, + flags: ::c_int) -> ::ssize_t; } diff --git a/third_party/rust/libc/src/unix/bsd/apple/b64.rs b/third_party/rust/libc/src/unix/bsd/apple/b64.rs index 2b34f853457e..ca98f2095236 100644 --- a/third_party/rust/libc/src/unix/bsd/apple/b64.rs +++ b/third_party/rust/libc/src/unix/bsd/apple/b64.rs @@ -63,5 +63,3 @@ pub const __PTHREAD_RWLOCKATTR_SIZE__: usize = 16; pub const TIOCTIMESTAMP: ::c_ulong = 0x40107459; pub const TIOCDCDTIMESTAMP: ::c_ulong = 0x40107458; - -pub const FIONREAD: ::c_ulong = 0x4004667f; diff --git a/third_party/rust/libc/src/unix/bsd/apple/mod.rs b/third_party/rust/libc/src/unix/bsd/apple/mod.rs index 9cd5db654708..cf48528b4af2 100644 --- a/third_party/rust/libc/src/unix/bsd/apple/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/apple/mod.rs @@ -245,7 +245,7 @@ s! { pub f_reserved: [::uint32_t; 8], } - // FIXME: this should have align 4 but it's got align 8 on 64-bit + #[cfg_attr(feature = "stdbuild", repr(packed(4)))] pub struct kevent { pub ident: ::uintptr_t, pub filter: ::int16_t, @@ -512,6 +512,17 @@ s! { pub sc_reserved: [::uint32_t; 5], } + pub struct in_pktinfo { + pub ipi_ifindex: ::c_uint, + pub ipi_spec_dst: ::in_addr, + pub ipi_addr: ::in_addr, + } + + pub struct in6_pktinfo { + pub ipi6_addr: ::in6_addr, + pub ipi6_ifindex: ::c_uint, + } + // sys/ipc.h: pub struct ipc_perm { @@ -524,7 +535,7 @@ s! { pub _key: ::key_t, } - // FIXME: this should have align 4 but it's got align 8 on 64-bit + #[cfg_attr(feature = "stdbuild", repr(packed(4)))] pub struct shmid_ds { pub shm_perm: ipc_perm, pub shm_segsz: ::size_t, @@ -538,6 +549,14 @@ s! { pub shm_internal: *mut ::c_void, } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } } pub const _UTX_USERSIZE: usize = 256; @@ -746,16 +765,8 @@ pub const VM_FLAGS_SUPERPAGE_MASK: ::c_int = 0x70000; pub const VM_FLAGS_RETURN_DATA_ADDR: ::c_int = 0x100000; pub const VM_FLAGS_RETURN_4K_DATA_ADDR: ::c_int = 0x800000; pub const VM_FLAGS_ALIAS_MASK: ::c_int = 0xFF000000; -pub const VM_FLAGS_USER_ALLOCATE: ::c_int = VM_FLAGS_FIXED | VM_FLAGS_ANYWHERE | - VM_FLAGS_PURGABLE | - VM_FLAGS_RANDOM_ADDR | - VM_FLAGS_NO_CACHE | - VM_FLAGS_OVERWRITE | - VM_FLAGS_SUPERPAGE_MASK | - VM_FLAGS_ALIAS_MASK; -pub const VM_FLAGS_USER_MAP: ::c_int = VM_FLAGS_USER_ALLOCATE | - VM_FLAGS_RETURN_4K_DATA_ADDR | - VM_FLAGS_RETURN_DATA_ADDR; +pub const VM_FLAGS_USER_ALLOCATE: ::c_int = 0xff07401b; +pub const VM_FLAGS_USER_MAP: ::c_int = 0xff97401b; pub const VM_FLAGS_USER_REMAP: ::c_int = VM_FLAGS_FIXED | VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR | VM_FLAGS_OVERWRITE | @@ -1066,6 +1077,13 @@ pub const TIOCPTYGRANT: ::c_uint = 0x20007454; pub const TIOCPTYGNAME: ::c_uint = 0x40807453; pub const TIOCPTYUNLK: ::c_uint = 0x20007452; +pub const FIONCLEX: ::c_uint = 0x20006602; +pub const FIONREAD: ::c_ulong = 0x4004667f; +pub const FIOASYNC: ::c_ulong = 0x8004667d; +pub const FIOSETOWN: ::c_ulong = 0x8004667c; +pub const FIOGETOWN: ::c_ulong = 0x4004667b; +pub const FIODTYPE: ::c_ulong = 0x4004667a; + pub const B0: speed_t = 0; pub const B50: speed_t = 50; pub const B75: speed_t = 75; @@ -1140,6 +1158,7 @@ pub const _SC_XOPEN_SHM: ::c_int = 113; pub const _SC_XOPEN_UNIX: ::c_int = 115; pub const _SC_XOPEN_VERSION: ::c_int = 116; pub const _SC_XOPEN_XCU_VERSION: ::c_int = 121; +pub const _SC_PHYS_PAGES: ::c_int = 200; pub const PTHREAD_PROCESS_PRIVATE: ::c_int = 2; pub const PTHREAD_PROCESS_SHARED: ::c_int = 1; @@ -1503,8 +1522,11 @@ pub const IP_TTL: ::c_int = 4; pub const IP_HDRINCL: ::c_int = 2; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; +pub const IP_PKTINFO: ::c_int = 26; pub const IPV6_JOIN_GROUP: ::c_int = 12; pub const IPV6_LEAVE_GROUP: ::c_int = 13; +pub const IPV6_PKTINFO: ::c_int = 46; +pub const IPV6_RECVPKTINFO: ::c_int = 61; pub const TCP_NODELAY: ::c_int = 0x01; pub const TCP_KEEPALIVE: ::c_int = 0x10; @@ -1845,6 +1867,9 @@ pub const NI_MAXHOST: ::socklen_t = 1025; pub const Q_GETQUOTA: ::c_int = 0x300; pub const Q_SETQUOTA: ::c_int = 0x400; +pub const RENAME_SWAP: ::c_uint = 0x00000002; +pub const RENAME_EXCL: ::c_uint = 0x00000004; + pub const RTLD_LOCAL: ::c_int = 0x4; pub const RTLD_FIRST: ::c_int = 0x100; pub const RTLD_NODELETE: ::c_int = 0x80; @@ -2227,10 +2252,14 @@ pub const DLT_LOOP: ::c_uint = 108; // sizeof(int32_t) pub const BPF_ALIGNMENT: ::c_int = 4; +// sys/spawn.h: pub const POSIX_SPAWN_RESETIDS: ::c_int = 0x01; pub const POSIX_SPAWN_SETPGROUP: ::c_int = 0x02; pub const POSIX_SPAWN_SETSIGDEF: ::c_int = 0x04; pub const POSIX_SPAWN_SETSIGMASK: ::c_int = 0x08; +pub const POSIX_SPAWN_SETEXEC: ::c_int = 0x40; +pub const POSIX_SPAWN_START_SUSPENDED: ::c_int = 0x80; +pub const POSIX_SPAWN_CLOEXEC_DEFAULT: ::c_int = 0x4000; // sys/ipc.h: pub const IPC_CREAT: ::c_int = 0x200; @@ -2437,6 +2466,11 @@ extern { size: ::size_t, flags: ::c_int) -> ::ssize_t; pub fn removexattr(path: *const ::c_char, name: *const ::c_char, flags: ::c_int) -> ::c_int; + pub fn renamex_np(from: *const ::c_char, to: *const ::c_char, + flags: ::c_uint) -> ::c_int; + pub fn renameatx_np(fromfd: ::c_int, from: *const ::c_char, + tofd: ::c_int, to: *const ::c_char, + flags: ::c_uint) -> ::c_int; pub fn fremovexattr(filedes: ::c_int, name: *const ::c_char, flags: ::c_int) -> ::c_int; diff --git a/third_party/rust/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/third_party/rust/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs index f399f27ccb09..9e1082e53bf8 100644 --- a/third_party/rust/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/freebsdlike/dragonfly/mod.rs @@ -115,6 +115,27 @@ s! { pub f_uid_uuid: ::uuid_t, } + pub struct statfs { + pub f_bsize: ::c_long, + pub f_iosize: ::c_long, + pub f_blocks: ::c_long, + pub f_bfree: ::c_long, + pub f_bavail: ::c_long, + pub f_files: ::c_long, + pub f_ffree: ::c_long, + pub f_fsid: ::fsid_t, + pub f_owner: ::uid_t, + pub f_type: ::int32_t, + pub f_flags: ::int32_t, + pub f_syncwrites: ::c_long, + pub f_asyncwrites: ::c_long, + pub f_fstypename: [::c_char; 16], + pub f_mntonname: [::c_char; 90], + pub f_syncreads: ::c_long, + pub f_asyncreads: ::c_long, + pub f_mntfromname: [::c_char; 90], + } + pub struct stat { pub st_ino: ::ino_t, pub st_nlink: ::nlink_t, @@ -760,4 +781,7 @@ extern { pub fn lwp_rtprio(function: ::c_int, pid: ::pid_t, lwpid: lwpid_t, rtp: *mut super::rtprio) -> ::c_int; + + pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int; + pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int; } diff --git a/third_party/rust/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/third_party/rust/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs index 34d34d89ecd2..a64dbc468f0d 100644 --- a/third_party/rust/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/freebsdlike/freebsd/mod.rs @@ -3,7 +3,7 @@ pub type clock_t = i32; pub type ino_t = u32; pub type lwpid_t = i32; pub type nlink_t = u16; -pub type blksize_t = u32; +pub type blksize_t = i32; pub type clockid_t = ::c_int; pub type sem_t = _sem; @@ -99,6 +99,31 @@ s! { pub f_namemax: ::c_ulong, } + pub struct statfs { + pub f_version: ::uint32_t, + pub f_type: ::uint32_t, + pub f_flags: ::uint64_t, + pub f_bsize: ::uint64_t, + pub f_iosize: ::uint64_t, + pub f_blocks: ::uint64_t, + pub f_bfree: ::uint64_t, + pub f_bavail: ::int64_t, + pub f_files: ::uint64_t, + pub f_ffree: ::int64_t, + pub f_syncwrites: ::uint64_t, + pub f_asyncwrites: ::uint64_t, + pub f_syncreads: ::uint64_t, + pub f_asyncreads: ::uint64_t, + f_spare: [::uint64_t; 10], + pub f_namemax: ::uint32_t, + pub f_owner: ::uid_t, + pub f_fsid: ::fsid_t, + f_charspare: [::c_char; 80], + pub f_fstypename: [::c_char; 16], + pub f_mntfromname: [::c_char; 88], + pub f_mntonname: [::c_char; 88], + } + // internal structure has changed over time pub struct _sem { data: [u32; 4], @@ -168,6 +193,8 @@ pub const SIGSTKSZ: ::size_t = 34816; pub const SF_NODISKIO: ::c_int = 0x00000001; pub const SF_MNOWAIT: ::c_int = 0x00000002; pub const SF_SYNC: ::c_int = 0x00000004; +pub const SF_USER_READAHEAD: ::c_int = 0x00000008; +pub const SF_NOCACHE: ::c_int = 0x00000010; pub const O_CLOEXEC: ::c_int = 0x00100000; pub const O_DIRECTORY: ::c_int = 0x00020000; pub const O_EXEC: ::c_int = 0x00040000; @@ -182,7 +209,9 @@ pub const EOWNERDEAD: ::c_int = 96; pub const ELAST: ::c_int = 96; pub const RLIMIT_NPTS: ::c_int = 11; pub const RLIMIT_SWAP: ::c_int = 12; -pub const RLIM_NLIMITS: ::rlim_t = 13; +pub const RLIMIT_KQUEUES: ::c_int = 13; +pub const RLIMIT_UMTXP: ::c_int = 14; +pub const RLIM_NLIMITS: ::rlim_t = 15; pub const Q_GETQUOTA: ::c_int = 0x700; pub const Q_SETQUOTA: ::c_int = 0x800; @@ -801,10 +830,10 @@ pub const SHUTDOWN_TIME: ::c_short = 8; pub const LC_COLLATE_MASK: ::c_int = (1 << 0); pub const LC_CTYPE_MASK: ::c_int = (1 << 1); -pub const LC_MESSAGES_MASK: ::c_int = (1 << 2); -pub const LC_MONETARY_MASK: ::c_int = (1 << 3); -pub const LC_NUMERIC_MASK: ::c_int = (1 << 4); -pub const LC_TIME_MASK: ::c_int = (1 << 5); +pub const LC_MONETARY_MASK: ::c_int =(1 << 2); +pub const LC_NUMERIC_MASK: ::c_int = (1 << 3); +pub const LC_TIME_MASK: ::c_int = (1 << 4); +pub const LC_MESSAGES_MASK: ::c_int = (1 << 5); pub const LC_ALL_MASK: ::c_int = LC_COLLATE_MASK | LC_CTYPE_MASK | LC_MESSAGES_MASK @@ -879,6 +908,7 @@ extern { pub fn jail_set(iov: *mut ::iovec, niov: ::c_uint, flags: ::c_int) -> ::c_int; + pub fn fdatasync(fd: ::c_int) -> ::c_int; pub fn posix_fallocate(fd: ::c_int, offset: ::off_t, len: ::off_t) -> ::c_int; pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t, @@ -989,6 +1019,9 @@ extern { fd: ::c_int, newfd: ::c_int, ) -> ::c_int; + + pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int; + pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int; } cfg_if! { diff --git a/third_party/rust/libc/src/unix/bsd/freebsdlike/mod.rs b/third_party/rust/libc/src/unix/bsd/freebsdlike/mod.rs index b3922426f64b..75a7a670e932 100644 --- a/third_party/rust/libc/src/unix/bsd/freebsdlike/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/freebsdlike/mod.rs @@ -175,6 +175,19 @@ s! { pub type_: ::c_ushort, pub prio: ::c_ushort, } + + pub struct in6_pktinfo { + pub ipi6_addr: ::in6_addr, + pub ipi6_ifindex: ::c_uint, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } } pub const AIO_LISTIO_MAX: ::c_int = 16; @@ -635,13 +648,20 @@ pub const SOCK_NONBLOCK: ::c_int = 0x20000000; pub const SOCK_MAXADDRLEN: ::c_int = 255; pub const IP_TTL: ::c_int = 4; pub const IP_HDRINCL: ::c_int = 2; +pub const IP_RECVDSTADDR: ::c_int = 7; +pub const IP_SENDSRCADDR: ::c_int = IP_RECVDSTADDR; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; pub const IPV6_JOIN_GROUP: ::c_int = 12; pub const IPV6_LEAVE_GROUP: ::c_int = 13; +pub const IPV6_RECVPKTINFO: ::c_int = 36; +pub const IPV6_PKTINFO: ::c_int = 46; + +pub const TCP_NODELAY: ::c_int = 1; +pub const TCP_KEEPIDLE: ::c_int = 256; +pub const TCP_KEEPINTVL: ::c_int = 512; +pub const TCP_KEEPCNT: ::c_int = 1024; -pub const TCP_NODELAY: ::c_int = 1; -pub const TCP_KEEPIDLE: ::c_int = 256; pub const SOL_SOCKET: ::c_int = 0xffff; pub const SO_DEBUG: ::c_int = 0x01; pub const SO_ACCEPTCONN: ::c_int = 0x0002; @@ -1158,8 +1178,6 @@ extern { pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; pub fn rtprio(function: ::c_int, pid: ::pid_t, rtp: *mut rtprio) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - #[cfg_attr(target_os = "freebsd", link_name = "mknodat@FBSD_1.1")] pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; @@ -1172,6 +1190,8 @@ extern { pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const ::timespec) -> ::c_int; pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int; diff --git a/third_party/rust/libc/src/unix/bsd/netbsdlike/mod.rs b/third_party/rust/libc/src/unix/bsd/netbsdlike/mod.rs index 397fce7ddd6b..6384a29f8c12 100644 --- a/third_party/rust/libc/src/unix/bsd/netbsdlike/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/netbsdlike/mod.rs @@ -35,6 +35,11 @@ s! { pub sin_zero: [::int8_t; 8], } + pub struct in6_pktinfo { + pub ipi6_addr: ::in6_addr, + pub ipi6_ifindex: ::c_uint, + } + pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, @@ -177,7 +182,6 @@ pub const SIGSEGV : ::c_int = 11; pub const SIGPIPE : ::c_int = 13; pub const SIGALRM : ::c_int = 14; pub const SIGTERM : ::c_int = 15; -pub const SIGSTKSZ : ::size_t = 40960; pub const PROT_NONE : ::c_int = 0; pub const PROT_READ : ::c_int = 1; @@ -413,8 +417,11 @@ pub const IP_TTL: ::c_int = 4; pub const IP_HDRINCL: ::c_int = 2; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; +pub const IPV6_RECVPKTINFO: ::c_int = 36; +pub const IPV6_PKTINFO: ::c_int = 46; + +pub const TCP_NODELAY: ::c_int = 0x01; -pub const TCP_NODELAY: ::c_int = 0x01; pub const SOL_SOCKET: ::c_int = 0xffff; pub const SO_DEBUG: ::c_int = 0x01; pub const SO_ACCEPTCONN: ::c_int = 0x0002; @@ -547,6 +554,9 @@ pub const TIOCMBIS: ::c_ulong = 0x8004746c; pub const TIOCMSET: ::c_ulong = 0x8004746d; pub const TIOCSTART: ::c_ulong = 0x2000746e; pub const TIOCSTOP: ::c_ulong = 0x2000746f; +pub const TIOCSCTTY: ::c_ulong = 0x20007461; +pub const TIOCGWINSZ: ::c_ulong = 0x40087468; +pub const TIOCSWINSZ: ::c_ulong = 0x80087467; pub const TIOCM_LE: ::c_int = 0o0001; pub const TIOCM_DTR: ::c_int = 0o0002; pub const TIOCM_RTS: ::c_int = 0o0004; @@ -617,14 +627,14 @@ extern { pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: ::clockid_t) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; @@ -637,15 +647,6 @@ extern { groups: *mut ::gid_t, ngroups: *mut ::c_int) -> ::c_int; pub fn initgroups(name: *const ::c_char, basegid: ::gid_t) -> ::c_int; - #[cfg_attr(target_os = "netbsd", link_name = "__getpwent_r50")] - pub fn getpwent_r(pwd: *mut ::passwd, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut ::passwd) -> ::c_int; - pub fn getgrent_r(grp: *mut ::group, - buf: *mut ::c_char, - buflen: ::size_t, - result: *mut *mut ::group) -> ::c_int; pub fn fexecve(fd: ::c_int, argv: *const *const ::c_char, envp: *const *const ::c_char) -> ::c_int; diff --git a/third_party/rust/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs b/third_party/rust/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs index 4c528b568ed4..065f6bd36a22 100644 --- a/third_party/rust/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/netbsdlike/netbsd/mod.rs @@ -313,6 +313,20 @@ s! { pub sdl_slen: ::uint8_t, pub sdl_data: [::c_char; 12], } + + pub struct in_pktinfo { + pub ipi_addr: ::in_addr, + pub ipi_ifindex: ::c_uint, + } + + #[repr(packed)] + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } } pub const AT_FDCWD: ::c_int = -100; @@ -371,9 +385,16 @@ pub const F_GETNOSIGPIPE: ::c_int = 13; pub const F_SETNOSIGPIPE: ::c_int = 14; pub const F_MAXFD: ::c_int = 11; +pub const IP_PKTINFO: ::c_int = 25; +pub const IP_RECVPKTINFO: ::c_int = 26; pub const IPV6_JOIN_GROUP: ::c_int = 12; pub const IPV6_LEAVE_GROUP: ::c_int = 13; +pub const TCP_KEEPIDLE: ::c_int = 3; +pub const TCP_KEEPINTVL: ::c_int = 5; +pub const TCP_KEEPCNT: ::c_int = 6; +pub const TCP_KEEPINIT: ::c_int = 7; + pub const SOCK_CONN_DGRAM: ::c_int = 6; pub const SOCK_DCCP: ::c_int = SOCK_CONN_DGRAM; pub const SOCK_NOSIGPIPE: ::c_int = 0x40000000; @@ -947,6 +968,8 @@ pub const CHWFLOW: ::tcflag_t = ::MDMBUF | ::CRTSCTS | ::CDTRCTS; pub const SOCK_CLOEXEC: ::c_int = 0x10000000; pub const SOCK_NONBLOCK: ::c_int = 0x20000000; +pub const SIGSTKSZ : ::size_t = 40960; + // dirfd() is a macro on netbsd to access // the first field of the struct where dirp points to: // http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36 @@ -1074,5 +1097,18 @@ extern { pub fn settimeofday(tv: *const ::timeval, tz: *const ::c_void) -> ::c_int; } +#[link(name = "util")] +extern { + #[cfg_attr(target_os = "netbsd", link_name = "__getpwent_r50")] + pub fn getpwent_r(pwd: *mut ::passwd, + buf: *mut ::c_char, + buflen: ::size_t, + result: *mut *mut ::passwd) -> ::c_int; + pub fn getgrent_r(grp: *mut ::group, + buf: *mut ::c_char, + buflen: ::size_t, + result: *mut *mut ::group) -> ::c_int; +} + mod other; pub use self::other::*; diff --git a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs index ac4acadd30d4..557420485a6a 100644 --- a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs @@ -65,6 +65,26 @@ pub const ALTMON_12: ::nl_item = 69; pub const KERN_RND: ::c_int = 31; +// https://github.com/bitrig/bitrig/blob/master/sys/net/if.h#L187 +pub const IFF_UP: ::c_int = 0x1; // interface is up +pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: ::c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: ::c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: ::c_int = 0x10; // interface is point-to-point link +pub const IFF_NOTRAILERS: ::c_int = 0x20; // avoid use of trailers +pub const IFF_RUNNING: ::c_int = 0x40; // resources allocated +pub const IFF_NOARP: ::c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: ::c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: ::c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE: ::c_int = 0x400; // transmission in progress +pub const IFF_SIMPLEX: ::c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit +pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast + +pub const SIGSTKSZ : ::size_t = 40960; + extern { pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; pub fn duplocale(base: ::locale_t) -> ::locale_t; @@ -73,6 +93,8 @@ extern { locale: *const ::c_char, base: ::locale_t) -> ::locale_t; pub fn uselocale(loc: ::locale_t) -> ::locale_t; + pub fn pledge(promises: *const ::c_char, + paths: *mut *const ::c_char) -> ::c_int; pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; } diff --git a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs index 1f40e346c8b4..bf5ddd2e7a8e 100644 --- a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs @@ -187,6 +187,20 @@ s! { pub sdl_slen: ::c_uchar, pub sdl_data: [::c_char; 24], } + + pub struct sockpeercred { + pub uid: ::uid_t, + pub gid: ::gid_t, + pub pid: ::pid_t, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } } pub const UT_NAMESIZE: usize = 32; @@ -236,24 +250,6 @@ pub const SO_RTABLE: ::c_int = 0x1021; pub const SO_PEERCRED: ::c_int = 0x1022; pub const SO_SPLICE: ::c_int = 0x1023; -// https://github.com/openbsd/src/blob/master/sys/net/if.h#L187 -pub const IFF_UP: ::c_int = 0x1; // interface is up -pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid -pub const IFF_DEBUG: ::c_int = 0x4; // turn on debugging -pub const IFF_LOOPBACK: ::c_int = 0x8; // is a loopback net -pub const IFF_POINTOPOINT: ::c_int = 0x10; // interface is point-to-point link -pub const IFF_NOTRAILERS: ::c_int = 0x20; // avoid use of trailers -pub const IFF_RUNNING: ::c_int = 0x40; // resources allocated -pub const IFF_NOARP: ::c_int = 0x80; // no address resolution protocol -pub const IFF_PROMISC: ::c_int = 0x100; // receive all packets -pub const IFF_ALLMULTI: ::c_int = 0x200; // receive all multicast packets -pub const IFF_OACTIVE: ::c_int = 0x400; // transmission in progress -pub const IFF_SIMPLEX: ::c_int = 0x800; // can't hear own transmissions -pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit -pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit -pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit -pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast - // sys/netinet/in.h // Protocols (RFC 1700) // NOTE: These are in addition to the constants defined in src/unix/mod.rs @@ -320,6 +316,9 @@ pub const IPPROTO_MAX: ::c_int = 256; /// Divert sockets pub const IPPROTO_DIVERT: ::c_int = 258; +pub const IP_RECVDSTADDR: ::c_int = 7; +pub const IP_SENDSRCADDR: ::c_int = IP_RECVDSTADDR; + pub const AF_ECMA: ::c_int = 8; pub const AF_ROUTE: ::c_int = 17; pub const AF_ENCAP: ::c_int = 28; @@ -588,6 +587,7 @@ pub const CTL_MACHDEP: ::c_int = 7; pub const CTL_DDB: ::c_int = 9; pub const CTL_VFS: ::c_int = 10; pub const CTL_MAXID: ::c_int = 11; +pub const HW_NCPUONLINE: ::c_int = 25; pub const KERN_OSTYPE: ::c_int = 1; pub const KERN_OSRELEASE: ::c_int = 2; pub const KERN_OSREV: ::c_int = 3; @@ -662,7 +662,8 @@ pub const KERN_PROC_VMMAP: ::c_int = 80; pub const KERN_GLOBAL_PTRACE: ::c_int = 81; pub const KERN_CONSBUFSIZE: ::c_int = 82; pub const KERN_CONSBUF: ::c_int = 83; -pub const KERN_MAXID: ::c_int = 84; +pub const KERN_AUDIO: ::c_int = 84; +pub const KERN_MAXID: ::c_int = 85; pub const KERN_PROC_ALL: ::c_int = 0; pub const KERN_PROC_PID: ::c_int = 1; pub const KERN_PROC_PGRP: ::c_int = 2; @@ -731,8 +732,6 @@ extern { newlen: ::size_t) -> ::c_int; pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int; - pub fn pledge(promises: *const ::c_char, - paths: *mut *const ::c_char) -> ::c_int; pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int; pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int; } diff --git a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs index e7e5876ba4dc..5e6948115f86 100644 --- a/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs +++ b/third_party/rust/libc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs @@ -25,13 +25,242 @@ s! { pub int_p_sign_posn: ::c_char, pub int_n_sign_posn: ::c_char, } + + pub struct statfs { + pub f_flags: ::uint32_t, + pub f_bsize: ::uint32_t, + pub f_iosize: ::uint32_t, + pub f_blocks: ::uint64_t, + pub f_bfree: ::uint64_t, + pub f_bavail: ::int64_t, + pub f_files: ::uint64_t, + pub f_ffree: ::uint64_t, + pub f_favail: ::int64_t, + pub f_syncwrites: ::uint64_t, + pub f_syncreads: ::uint64_t, + pub f_asyncwrites: ::uint64_t, + pub f_asyncreads: ::uint64_t, + pub f_fsid: ::fsid_t, + pub f_namemax: ::uint32_t, + pub f_owner: ::uid_t, + pub f_ctime: ::uint64_t, + pub f_fstypename: [::c_char; 16], + pub f_mntonname: [::c_char; 90], + pub f_mntfromname: [::c_char; 90], + pub f_mntfromspec: [::c_char; 90], + pub mount_info: mount_info, + } + + pub union mount_info { + pub ufs_args: ufs_args, + pub mfs_args: mfs_args, + pub nfs_args: nfs_args, + pub iso_args: iso_args, + pub msdosfs_args: msdosfs_args, + pub ntfs_args: ntfs_args, + pub tmpfs_args: tmpfs_args, + align: [::c_char; 160], + } + + pub struct ufs_args { + pub fspec: *mut ::c_char, + pub export_info: export_args, + } + + pub struct mfs_args { + pub fspec: *mut ::c_char, + pub export_info: export_args, + // https://github.com/openbsd/src/blob/master/sys/sys/types.h#L134 + pub base: *mut ::c_char, + pub size: ::c_ulong, + } + + pub struct iso_args { + pub fspec: *mut ::c_char, + pub export_info: export_args, + pub flags: ::c_int, + pub sess: ::c_int, + } + + pub struct nfs_args { + pub version: ::c_int, + pub addr: *mut ::sockaddr, + pub addrlen: ::c_int, + pub sotype: ::c_int, + pub proto: ::c_int, + pub fh: *mut ::c_uchar, + pub fhsize: ::c_int, + pub flags: ::c_int, + pub wsize: ::c_int, + pub rsize: ::c_int, + pub readdirsize: ::c_int, + pub timeo: ::c_int, + pub retrans: ::c_int, + pub maxgrouplist: ::c_int, + pub readahead: ::c_int, + pub leaseterm: ::c_int, + pub deadthresh: ::c_int, + pub hostname: *mut ::c_char, + pub acregmin: ::c_int, + pub acregmax: ::c_int, + pub acdirmin: ::c_int, + pub acdirmax: ::c_int, + } + + pub struct msdosfs_args { + pub fspec: *mut ::c_char, + pub export_info: export_args, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub mask: ::mode_t, + pub flags: ::c_int, + } + + pub struct ntfs_args { + pub fspec: *mut ::c_char, + pub export_info: export_args, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub mode: ::mode_t, + pub flag: ::c_ulong, + } + + pub struct udf_args { + pub fspec: *mut ::c_char, + pub lastblock: ::uint32_t, + } + + pub struct tmpfs_args { + pub ta_version: ::c_int, + pub ta_nodes_max: ::ino_t, + pub ta_size_max: ::off_t, + pub ta_root_uid: ::uid_t, + pub ta_root_gid: ::gid_t, + pub ta_root_mode: ::mode_t, + } + + pub struct fusefs_args { + pub name: *mut ::c_char, + pub fd: ::c_int, + pub max_read: ::c_int, + pub allow_other: ::c_int, + } + + pub struct xucred { + pub cr_uid: ::uid_t, + pub cr_gid: ::gid_t, + pub cr_ngroups: ::c_short, + //https://github.com/openbsd/src/blob/master/sys/sys/syslimits.h#L44 + pub cr_groups: [::gid_t; 16], + } + + pub struct export_args { + pub ex_flags: ::c_int, + pub ex_root: ::uid_t, + pub ex_anon: xucred, + pub ex_addr: *mut ::sockaddr, + pub ex_addrlen: ::c_int, + pub ex_mask: *mut ::sockaddr, + pub ex_masklen: ::c_int, + } } +//https://github.com/openbsd/src/blob/master/sys/sys/mount.h +pub const ISOFSMNT_NORRIP: ::c_int = 0x1; // disable Rock Ridge Ext +pub const ISOFSMNT_GENS: ::c_int = 0x2; // enable generation numbers +pub const ISOFSMNT_EXTATT: ::c_int = 0x4; // enable extended attr +pub const ISOFSMNT_NOJOLIET: ::c_int = 0x8; // disable Joliet Ext +pub const ISOFSMNT_SESS: ::c_int = 0x10; // use iso_args.sess + +pub const NFS_ARGSVERSION: ::c_int = 4; // change when nfs_args changes + +pub const NFSMNT_RESVPORT: ::c_int = 0; // always use reserved ports +pub const NFSMNT_SOFT: ::c_int = 0x1; // soft mount (hard is default) +pub const NFSMNT_WSIZE: ::c_int = 0x2; // set write size +pub const NFSMNT_RSIZE: ::c_int = 0x4; // set read size +pub const NFSMNT_TIMEO: ::c_int = 0x8; // set initial timeout +pub const NFSMNT_RETRANS: ::c_int = 0x10; // set number of request retries +pub const NFSMNT_MAXGRPS: ::c_int = 0x20; // set maximum grouplist size +pub const NFSMNT_INT: ::c_int = 0x40; // allow interrupts on hard mount +pub const NFSMNT_NOCONN: ::c_int = 0x80; // Don't Connect the socket +pub const NFSMNT_NQNFS: ::c_int = 0x100; // Use Nqnfs protocol +pub const NFSMNT_NFSV3: ::c_int = 0x200; // Use NFS Version 3 protocol +pub const NFSMNT_KERB: ::c_int = 0x400; // Use Kerberos authentication +pub const NFSMNT_DUMBTIMR: ::c_int = 0x800; // Don't estimate rtt dynamically +pub const NFSMNT_LEASETERM: ::c_int = 0x1000; // set lease term (nqnfs) +pub const NFSMNT_READAHEAD: ::c_int = 0x2000; // set read ahead +pub const NFSMNT_DEADTHRESH: ::c_int = 0x4000; // set dead server retry thresh +pub const NFSMNT_NOAC: ::c_int = 0x8000; // disable attribute cache +pub const NFSMNT_RDIRPLUS: ::c_int = 0x10000; // Use Readdirplus for V3 +pub const NFSMNT_READDIRSIZE: ::c_int = 0x20000; // Set readdir size + +/* Flags valid only in mount syscall arguments */ +pub const NFSMNT_ACREGMIN: ::c_int = 0x40000; // acregmin field valid +pub const NFSMNT_ACREGMAX: ::c_int = 0x80000; // acregmax field valid +pub const NFSMNT_ACDIRMIN: ::c_int = 0x100000; // acdirmin field valid +pub const NFSMNT_ACDIRMAX: ::c_int = 0x200000; // acdirmax field valid + +/* Flags valid only in kernel */ +pub const NFSMNT_INTERNAL: ::c_int = 0xfffc0000; // Bits set internally +pub const NFSMNT_HASWRITEVERF: ::c_int = 0x40000; // Has write verifier for V3 +pub const NFSMNT_GOTPATHCONF: ::c_int = 0x80000; // Got the V3 pathconf info +pub const NFSMNT_GOTFSINFO: ::c_int = 0x100000; // Got the V3 fsinfo +pub const NFSMNT_MNTD: ::c_int = 0x200000; // Mnt server for mnt point +pub const NFSMNT_DISMINPROG: ::c_int = 0x400000; // Dismount in progress +pub const NFSMNT_DISMNT: ::c_int = 0x800000; // Dismounted +pub const NFSMNT_SNDLOCK: ::c_int = 0x1000000; // Send socket lock +pub const NFSMNT_WANTSND: ::c_int = 0x2000000; // Want above +pub const NFSMNT_RCVLOCK: ::c_int = 0x4000000; // Rcv socket lock +pub const NFSMNT_WANTRCV: ::c_int = 0x8000000; // Want above +pub const NFSMNT_WAITAUTH: ::c_int = 0x10000000; // Wait for authentication +pub const NFSMNT_HASAUTH: ::c_int = 0x20000000; // Has authenticator +pub const NFSMNT_WANTAUTH: ::c_int = 0x40000000; // Wants an authenticator +pub const NFSMNT_AUTHERR: ::c_int = 0x80000000; // Authentication error + +pub const MSDOSFSMNT_SHORTNAME: ::c_int = 0x1; // Force old DOS short names only +pub const MSDOSFSMNT_LONGNAME: ::c_int = 0x2; // Force Win'95 long names +pub const MSDOSFSMNT_NOWIN95: ::c_int = 0x4; // Completely ignore Win95 entries + +pub const NTFS_MFLAG_CASEINS: ::c_int = 0x1; +pub const NTFS_MFLAG_ALLNAMES: ::c_int = 0x2; + +pub const TMPFS_ARGS_VERSION: ::c_int = 1; + +pub const MAP_STACK : ::c_int = 0x4000; + +// https://github.com/openbsd/src/blob/master/sys/net/if.h#L187 +pub const IFF_UP: ::c_int = 0x1; // interface is up +pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid +pub const IFF_DEBUG: ::c_int = 0x4; // turn on debugging +pub const IFF_LOOPBACK: ::c_int = 0x8; // is a loopback net +pub const IFF_POINTOPOINT: ::c_int = 0x10; // interface is point-to-point link +pub const IFF_STATICARP: ::c_int = 0x20; // only static ARP +pub const IFF_RUNNING: ::c_int = 0x40; // resources allocated +pub const IFF_NOARP: ::c_int = 0x80; // no address resolution protocol +pub const IFF_PROMISC: ::c_int = 0x100; // receive all packets +pub const IFF_ALLMULTI: ::c_int = 0x200; // receive all multicast packets +pub const IFF_OACTIVE: ::c_int = 0x400; // transmission in progress +pub const IFF_SIMPLEX: ::c_int = 0x800; // can't hear own transmissions +pub const IFF_LINK0: ::c_int = 0x1000; // per link layer defined bit +pub const IFF_LINK1: ::c_int = 0x2000; // per link layer defined bit +pub const IFF_LINK2: ::c_int = 0x4000; // per link layer defined bit +pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast + +pub const SIGSTKSZ : ::size_t = 28672; + extern { pub fn accept4(s: ::c_int, addr: *mut ::sockaddr, addrlen: *mut ::socklen_t, flags: ::c_int) -> ::c_int; pub fn execvpe(file: *const ::c_char, argv: *const *const ::c_char, envp: *const *const ::c_char) -> ::c_int; + pub fn pledge(promises: *const ::c_char, + execpromises: *const ::c_char) -> ::c_int; + pub fn strtonum(nptr: *const ::c_char, minval: ::c_longlong, + maxval: ::c_longlong, + errstr: *mut *const ::c_char) -> ::c_longlong; + + pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int; + pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int; } cfg_if! { diff --git a/third_party/rust/libc/src/unix/haiku/mod.rs b/third_party/rust/libc/src/unix/haiku/mod.rs index 9b0252d8ecce..2ef36f8f38e9 100644 --- a/third_party/rust/libc/src/unix/haiku/mod.rs +++ b/third_party/rust/libc/src/unix/haiku/mod.rs @@ -379,6 +379,7 @@ pub const RLIMIT_CPU: ::c_int = 1; pub const RLIMIT_DATA: ::c_int = 2; pub const RLIMIT_FSIZE: ::c_int = 3; pub const RLIMIT_NOFILE: ::c_int = 4; +pub const RLIMIT_STACK: ::c_int = 5; pub const RLIMIT_AS: ::c_int = 6; // Haiku specific pub const RLIMIT_NOVMON: ::c_int = 7; @@ -386,7 +387,7 @@ pub const RLIMIT_NLIMITS: ::c_int = 8; pub const RUSAGE_SELF: ::c_int = 0; -pub const RTLD_LAXY: ::c_int = 0; +pub const RTLD_LAZY: ::c_int = 0; pub const NCCS: usize = 11; @@ -645,22 +646,43 @@ pub const AF_UNIX: ::c_int = AF_LOCAL; pub const AF_BLUETOOTH: ::c_int = 10; pub const AF_MAX: ::c_int = 11; +pub const IP_OPTIONS: ::c_int = 1; +pub const IP_HDRINCL: ::c_int = 2; +pub const IP_TOS: ::c_int = 3; +pub const IP_TTL: ::c_int = 4; +pub const IP_RECVOPTS: ::c_int = 5; +pub const IP_RECVRETOPTS: ::c_int = 6; +pub const IP_RECVDSTADDR: ::c_int = 7; +pub const IP_RETOPTS: ::c_int = 8; +pub const IP_MULTICAST_IF: ::c_int = 9; pub const IP_MULTICAST_TTL: ::c_int = 10; pub const IP_MULTICAST_LOOP: ::c_int = 11; -pub const IP_TTL: ::c_int = 4; -pub const IP_HDRINCL: ::c_int = 2; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; +pub const IP_BLOCK_SOURCE: ::c_int = 14; +pub const IP_UNBLOCK_SOURCE: ::c_int = 15; +pub const IP_ADD_SOURCE_MEMBERSHIP: ::c_int = 16; +pub const IP_DROP_SOURCE_MEMBERSHIP: ::c_int = 17; pub const TCP_NODELAY: ::c_int = 0x01; pub const TCP_MAXSEG: ::c_int = 0x02; pub const TCP_NOPUSH: ::c_int = 0x04; pub const TCP_NOOPT: ::c_int = 0x08; +pub const IPV6_MULTICAST_IF: ::c_int = 24; +pub const IPV6_MULTICAST_HOPS: ::c_int = 25; pub const IPV6_MULTICAST_LOOP: ::c_int = 26; +pub const IPV6_UNICAST_HOPS: ::c_int = 27; pub const IPV6_JOIN_GROUP: ::c_int = 28; pub const IPV6_LEAVE_GROUP: ::c_int = 29; pub const IPV6_V6ONLY: ::c_int = 30; +pub const IPV6_PKTINFO: ::c_int = 31; +pub const IPV6_RECVPKTINFO: ::c_int = 32; +pub const IPV6_HOPLIMIT: ::c_int = 33; +pub const IPV6_REVCHOPLIMIT: ::c_int = 34; +pub const IPV6_HOPOPTS: ::c_int = 35; +pub const IPV6_DSTOPTS: ::c_int = 36; +pub const IPV6_RTHDR: ::c_int = 37; pub const MSG_OOB: ::c_int = 0x0001; pub const MSG_PEEK: ::c_int = 0x0002; @@ -701,7 +723,9 @@ pub const SA_ONESHOT: ::c_int = SA_RESETHAND; pub const FD_SETSIZE: usize = 1024; +pub const RTLD_LOCAL: ::c_int = 0x0; pub const RTLD_NOW: ::c_int = 0x1; +pub const RTLD_GLOBAL: ::c_int = 0x2; pub const RTLD_DEFAULT: *mut ::c_void = 0isize as *mut ::c_void; pub const BUFSIZ: ::c_uint = 8192; @@ -1094,7 +1118,6 @@ extern { pub fn waitid(idtype: idtype_t, id: id_t, infop: *mut ::siginfo_t, options: ::c_int) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; pub fn glob(pattern: *const ::c_char, flags: ::c_int, errfunc: Option or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// liblibc port for HermitCore (https://hermitcore.org) +// HermitCore is a unikernel based on lwIP, newlib, and +// pthread-embedded. +// Consider these definitions when porting liblibc to another +// lwIP/newlib/pte-based target. +// +// Ported by Colin Finck + +pub type c_long = i64; +pub type c_ulong = u64; + +pub type uid_t = u16; +pub type gid_t = u16; +pub type speed_t = ::c_uint; +pub type mode_t = u32; +pub type dev_t = i16; +pub type nfds_t = ::c_ulong; +pub type socklen_t = u32; +pub type sa_family_t = u8; +pub type clock_t = c_ulong; +pub type time_t = c_long; +pub type suseconds_t = c_long; +pub type off_t = i64; +pub type rlim_t = ::c_ulonglong; +pub type sigset_t = ::c_ulong; +pub type ino_t = u16; +pub type nlink_t = u16; +pub type blksize_t = c_long; +pub type blkcnt_t = c_long; +pub type stat64 = stat; +pub type clockid_t = c_ulong; +pub type pthread_t = pte_handle_t; +pub type pthread_attr_t = usize; +pub type pthread_cond_t = usize; +pub type pthread_condattr_t = usize; +pub type pthread_key_t = usize; +pub type pthread_mutex_t = usize; +pub type pthread_mutexattr_t = usize; +pub type pthread_rwlock_t = usize; +pub type pthread_rwlockattr_t = usize; + +s! { + pub struct addrinfo { + pub ai_flags: ::c_int, + pub ai_family: ::c_int, + pub ai_socktype: ::c_int, + pub ai_protocol: ::c_int, + pub ai_addrlen: socklen_t, + pub ai_addr: *mut ::sockaddr, + pub ai_canonname: *mut c_char, + pub ai_next: *mut addrinfo, + } + + pub struct dirent { + pub d_ino: ::c_long, + pub d_off: off_t, + pub d_reclen: u16, + pub d_name: [::c_char; 256], + } + + pub struct Dl_info {} + + pub struct fd_set { + fds_bits: [::c_ulong; FD_SETSIZE / ULONG_SIZE], + } + + pub struct lconv { + pub decimal_point: *mut ::c_char, + pub thousands_sep: *mut ::c_char, + pub grouping: *mut ::c_char, + pub int_curr_symbol: *mut ::c_char, + pub currency_symbol: *mut ::c_char, + pub mon_decimal_point: *mut ::c_char, + pub mon_thousands_sep: *mut ::c_char, + pub mon_grouping: *mut ::c_char, + pub positive_sign: *mut ::c_char, + pub negative_sign: *mut ::c_char, + pub int_frac_digits: ::c_char, + pub frac_digits: ::c_char, + pub p_cs_precedes: ::c_char, + pub p_sep_by_space: ::c_char, + pub n_cs_precedes: ::c_char, + pub n_sep_by_space: ::c_char, + pub p_sign_posn: ::c_char, + pub n_sign_posn: ::c_char, + pub int_p_cs_precedes: ::c_char, + pub int_p_sep_by_space: ::c_char, + pub int_n_cs_precedes: ::c_char, + pub int_n_sep_by_space: ::c_char, + pub int_p_sign_posn: ::c_char, + pub int_n_sign_posn: ::c_char, + } + + pub struct passwd { // Unverified + pub pw_name: *mut ::c_char, + pub pw_passwd: *mut ::c_char, + pub pw_uid: ::uid_t, + pub pw_gid: ::gid_t, + pub pw_gecos: *mut ::c_char, + pub pw_dir: *mut ::c_char, + pub pw_shell: *mut ::c_char, + } + + pub struct pte_handle_t { + pub p: usize, + pub x: ::c_uint, + } + + pub struct sched_param { + pub sched_priority: ::c_int, + } + + pub struct sem_t { + pub value: i32, + pub lock: usize, + pub sem: usize, + } + + pub struct sigaction { + pub sa_flags: ::c_int, + pub sa_mask: sigset_t, + pub sa_handler: usize, + } + + pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [::c_char; 14], + } + + pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: ::in_port_t, + pub sin_addr: ::in_addr, + pub sin_zero: [::c_char; 8], + } + + pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: ::in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: ::in6_addr, + pub sin6_scope_id: u32, + } + + pub struct sockaddr_storage { + pub s2_len: u8, + pub ss_family: sa_family_t, + pub s2_data1: [::c_char; 2], + pub s2_data2: [u32; 3], + pub s2_data3: [u32; 3], + } + + // Dummy + pub struct sockaddr_un { + pub sun_family: sa_family_t, + pub sun_path: [::c_char; 108], + } + + pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: dev_t, + pub st_size: off_t, + pub st_atime: time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: time_t, + pub st_ctime_nsec: ::c_long, + pub st_blksize: blksize_t, + pub st_blocks: blkcnt_t, + pub st_spare4: [::c_long; 2], + } + + pub struct statvfs {} + + pub struct tm { + pub tm_sec: ::c_int, + pub tm_min: ::c_int, + pub tm_hour: ::c_int, + pub tm_mday: ::c_int, + pub tm_mon: ::c_int, + pub tm_year: ::c_int, + pub tm_wday: ::c_int, + pub tm_yday: ::c_int, + pub tm_isdst: ::c_int, + } + + pub struct tms { + pub tms_utime: ::clock_t, + pub tms_stime: ::clock_t, + pub tms_cutime: ::clock_t, + pub tms_cstime: ::clock_t, + } + + pub struct termios {} + + pub struct utsname {} +} + +pub const AF_UNSPEC: ::c_int = 0; +pub const AF_INET: ::c_int = 2; +pub const AF_INET6: ::c_int = 10; + +// Dummy +pub const AF_UNIX: ::c_int = 1; + +pub const CLOCK_REALTIME: ::clockid_t = 1; +pub const CLOCK_MONOTONIC: ::clockid_t = 4; + +// Dummy +pub const EAI_SYSTEM: ::c_int = -11; + +pub const EPERM: ::c_int = 1; +pub const ENOENT: ::c_int = 2; +pub const ESRCH: ::c_int = 3; +pub const EINTR: ::c_int = 4; +pub const EIO: ::c_int = 5; +pub const ENXIO: ::c_int = 6; +pub const E2BIG: ::c_int = 7; +pub const ENOEXEC: ::c_int = 8; +pub const EBADF: ::c_int = 9; +pub const ECHILD: ::c_int = 10; +pub const EAGAIN: ::c_int = 11; +pub const ENOMEM: ::c_int = 12; +pub const EACCES: ::c_int = 13; +pub const EFAULT: ::c_int = 14; +pub const EBUSY: ::c_int = 16; +pub const EEXIST: ::c_int = 17; +pub const EXDEV: ::c_int = 18; +pub const ENODEV: ::c_int = 19; +pub const ENOTDIR: ::c_int = 20; +pub const EISDIR: ::c_int = 21; +pub const EINVAL: ::c_int = 22; +pub const ENFILE: ::c_int = 23; +pub const EMFILE: ::c_int = 24; +pub const ENOTTY: ::c_int = 25; +pub const ETXTBSY: ::c_int = 26; +pub const EFBIG: ::c_int = 27; +pub const ENOSPC: ::c_int = 28; +pub const ESPIPE: ::c_int = 29; +pub const EROFS: ::c_int = 30; +pub const EMLINK: ::c_int = 31; +pub const EPIPE: ::c_int = 32; +pub const EDOM: ::c_int = 33; +pub const ERANGE: ::c_int = 34; +pub const EDEADLK: ::c_int = 35; +pub const ENAMETOOLONG: ::c_int = 36; +pub const ENOLCK: ::c_int = 37; +pub const ENOSYS: ::c_int = 38; +pub const ENOTEMPTY: ::c_int = 39; +pub const ELOOP: ::c_int = 40; +pub const EWOULDBLOCK: ::c_int = EAGAIN; +pub const ENOMSG: ::c_int = 42; +pub const EIDRM: ::c_int = 43; +pub const ECHRNG: ::c_int = 44; +pub const EL2NSYNC: ::c_int = 45; +pub const EL3HLT: ::c_int = 46; +pub const EL3RST: ::c_int = 47; +pub const ELNRNG: ::c_int = 48; +pub const EUNATCH: ::c_int = 49; +pub const ENOCSI: ::c_int = 50; +pub const EL2HLT: ::c_int = 51; +pub const EBADE: ::c_int = 52; +pub const EBADR: ::c_int = 53; +pub const EXFULL: ::c_int = 54; +pub const ENOANO: ::c_int = 55; +pub const EBADRQC: ::c_int = 56; +pub const EBADSLT: ::c_int = 57; +pub const EDEADLOCK: ::c_int = EDEADLK; +pub const EBFONT: ::c_int = 59; +pub const ENOSTR: ::c_int = 60; +pub const ENODATA: ::c_int = 61; +pub const ETIME: ::c_int = 62; +pub const ENOSR: ::c_int = 63; +pub const ENONET: ::c_int = 64; +pub const ENOPKG: ::c_int = 65; +pub const EREMOTE: ::c_int = 66; +pub const ENOLINK: ::c_int = 67; +pub const EADV: ::c_int = 68; +pub const ESRMNT: ::c_int = 69; +pub const ECOMM: ::c_int = 70; +pub const EPROTO: ::c_int = 71; +pub const EMULTIHOP: ::c_int = 72; +pub const EDOTDOT: ::c_int = 73; +pub const EBADMSG: ::c_int = 74; +pub const EOVERFLOW: ::c_int = 75; +pub const ENOTUNIQ: ::c_int = 76; +pub const EBADFD: ::c_int = 77; +pub const EREMCHG: ::c_int = 78; +pub const ELIBACC: ::c_int = 79; +pub const ELIBBAD: ::c_int = 80; +pub const ELIBSCN: ::c_int = 81; +pub const ELIBMAX: ::c_int = 82; +pub const ELIBEXEC: ::c_int = 83; +pub const EILSEQ: ::c_int = 84; +pub const ERESTART: ::c_int = 85; +pub const ESTRPIPE: ::c_int = 86; +pub const EUSERS: ::c_int = 87; +pub const ENOTSOCK: ::c_int = 88; +pub const EDESTADDRREQ: ::c_int = 89; +pub const EMSGSIZE: ::c_int = 90; +pub const EPROTOTYPE: ::c_int = 91; +pub const ENOPROTOOPT: ::c_int = 92; +pub const EPROTONOSUPPORT: ::c_int = 93; +pub const ESOCKTNOSUPPORT: ::c_int = 94; +pub const EOPNOTSUPP: ::c_int = 95; +pub const EPFNOSUPPORT: ::c_int = 96; +pub const EAFNOSUPPORT: ::c_int = 97; +pub const EADDRINUSE: ::c_int = 98; +pub const EADDRNOTAVAIL: ::c_int = 99; +pub const ENETDOWN: ::c_int = 100; +pub const ENETUNREACH: ::c_int = 101; +pub const ENETRESET: ::c_int = 102; +pub const ECONNABORTED: ::c_int = 103; +pub const ECONNRESET: ::c_int = 104; +pub const ENOBUFS: ::c_int = 105; +pub const EISCONN: ::c_int = 106; +pub const ENOTCONN: ::c_int = 107; +pub const ESHUTDOWN: ::c_int = 108; +pub const ETOOMANYREFS: ::c_int = 109; +pub const ETIMEDOUT: ::c_int = 110; +pub const ECONNREFUSED: ::c_int = 111; +pub const EHOSTDOWN: ::c_int = 112; +pub const EHOSTUNREACH: ::c_int = 113; +pub const EALREADY: ::c_int = 114; +pub const EINPROGRESS: ::c_int = 115; +pub const ESTALE: ::c_int = 116; +pub const EUCLEAN: ::c_int = 117; +pub const ENOTNAM: ::c_int = 118; +pub const ENAVAIL: ::c_int = 119; +pub const EISNAM: ::c_int = 120; +pub const EREMOTEIO: ::c_int = 121; +pub const EDQUOT: ::c_int = 122; +pub const ENOMEDIUM: ::c_int = 123; +pub const EMEDIUMTYPE: ::c_int = 124; +pub const ECANCELED: ::c_int = 125; +pub const ENOKEY: ::c_int = 126; +pub const EKEYEXPIRED: ::c_int = 127; +pub const EKEYREVOKED: ::c_int = 128; +pub const EKEYREJECTED: ::c_int = 129; +pub const EOWNERDEAD: ::c_int = 130; +pub const ENOTRECOVERABLE: ::c_int = 131; +pub const ERFKILL: ::c_int = 132; +pub const EHWPOISON: ::c_int = 133; + +pub const EXIT_FAILURE: ::c_int = 1; +pub const EXIT_SUCCESS: ::c_int = 0; + +pub const F_DUPFD: ::c_int = 0; +pub const F_GETFD: ::c_int = 1; +pub const F_SETFD: ::c_int = 2; +pub const F_GETFL: ::c_int = 3; +pub const F_SETFL: ::c_int = 4; +pub const F_GETOWN: ::c_int = 5; +pub const F_SETOWN: ::c_int = 6; +pub const F_GETLK: ::c_int = 7; +pub const F_SETLK: ::c_int = 8; +pub const F_SETLKW: ::c_int = 9; +pub const F_RGETLK: ::c_int = 10; +pub const F_RSETLK: ::c_int = 11; +pub const F_CNVT: ::c_int = 12; +pub const F_RSETLKW: ::c_int = 13; +pub const F_DUPFD_CLOEXEC: ::c_int = 14; + +pub const FD_SETSIZE: usize = 1024; + +// Dummy +pub const FIOCLEX: ::c_int = 0x5451; + +pub const FIONBIO: ::c_int = 0x8004667e; +pub const FIONREAD: ::c_int = 0x4004667f; + +pub const IP_ADD_MEMBERSHIP: ::c_int = 3; +pub const IP_DROP_MEMBERSHIP: ::c_int = 4; + +pub const IP_TOS: ::c_int = 1; +pub const IP_TTL: ::c_int = 2; + +pub const IP_MULTICAST_TTL: ::c_int = 5; +pub const IP_MULTICAST_IF: ::c_int = 6; +pub const IP_MULTICAST_LOOP: ::c_int = 7; + +pub const IPV6_JOIN_GROUP: ::c_int = 12; +pub const IPV6_ADD_MEMBERSHIP: ::c_int = 12; +pub const IPV6_LEAVE_GROUP: ::c_int = 13; +pub const IPV6_DROP_MEMBERSHIP: ::c_int = 13; +pub const IPV6_V6ONLY: ::c_int = 27; + +// Dummy +pub const IPV6_MULTICAST_LOOP: ::c_int = 7; + +pub const MSG_PEEK: ::c_int = 0x01; +pub const MSG_WAITALL: ::c_int = 0x02; +pub const MSG_OOB: ::c_int = 0x04; +pub const MSG_DONTWAIT: ::c_int = 0x08; +pub const MSG_MORE: ::c_int = 0x10; + +pub const O_ACCMODE: ::c_int = 3; +pub const O_RDONLY: ::c_int = 0; +pub const O_WRONLY: ::c_int = 1; +pub const O_RDWR: ::c_int = 2; +pub const O_APPEND: ::c_int = 1024; +pub const O_CREAT: ::c_int = 64; +pub const O_EXCL: ::c_int = 128; +pub const O_NOCTTY: ::c_int = 256; +pub const O_NONBLOCK: ::c_int = 2048; +pub const O_TRUNC: ::c_int = 512; +pub const O_CLOEXEC: ::c_int = 524288; + +pub const POLLIN: ::c_short = 0x1; +pub const POLLPRI: ::c_short = 0x2; +pub const POLLOUT: ::c_short = 0x4; +pub const POLLERR: ::c_short = 0x8; +pub const POLLHUP: ::c_short = 0x10; +pub const POLLNVAL: ::c_short = 0x20; + +pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = usize::max_value(); +pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = usize::max_value(); +pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = usize::max_value(); + +pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; +pub const PTHREAD_STACK_MIN: ::size_t = 0; + +// Dummy +pub const RTLD_DEFAULT: *mut ::c_void = 0i64 as *mut ::c_void; + +pub const _SC_ARG_MAX: ::c_int = 0; +pub const _SC_CHILD_MAX: ::c_int = 1; +pub const _SC_CLK_TCK: ::c_int = 2; +pub const _SC_NGROUPS_MAX: ::c_int = 3; +pub const _SC_OPEN_MAX: ::c_int = 4; +pub const _SC_JOB_CONTROL: ::c_int = 5; +pub const _SC_SAVED_IDS: ::c_int = 6; +pub const _SC_VERSION: ::c_int = 7; +pub const _SC_PAGESIZE: ::c_int = 8; +pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; +pub const _SC_NPROCESSORS_CONF: ::c_int = 9; +pub const _SC_NPROCESSORS_ONLN: ::c_int = 10; +pub const _SC_PHYS_PAGES: ::c_int = 11; +pub const _SC_AVPHYS_PAGES: ::c_int = 12; +pub const _SC_MQ_OPEN_MAX: ::c_int = 13; +pub const _SC_MQ_PRIO_MAX: ::c_int = 14; +pub const _SC_RTSIG_MAX: ::c_int = 15; +pub const _SC_SEM_NSEMS_MAX: ::c_int = 16; +pub const _SC_SEM_VALUE_MAX: ::c_int = 17; +pub const _SC_SIGQUEUE_MAX: ::c_int = 18; +pub const _SC_TIMER_MAX: ::c_int = 19; +pub const _SC_TZNAME_MAX: ::c_int = 20; +pub const _SC_ASYNCHRONOUS_IO: ::c_int = 21; +pub const _SC_FSYNC: ::c_int = 22; +pub const _SC_MAPPED_FILES: ::c_int = 23; +pub const _SC_MEMLOCK: ::c_int = 24; +pub const _SC_MEMLOCK_RANGE: ::c_int = 25; +pub const _SC_MEMORY_PROTECTION: ::c_int = 26; +pub const _SC_MESSAGE_PASSING: ::c_int = 27; +pub const _SC_PRIORITIZED_IO: ::c_int = 28; +pub const _SC_REALTIME_SIGNALS: ::c_int = 29; +pub const _SC_SEMAPHORES: ::c_int = 30; +pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 31; +pub const _SC_SYNCHRONIZED_IO: ::c_int = 32; +pub const _SC_TIMERS: ::c_int = 33; +pub const _SC_AIO_LISTIO_MAX: ::c_int = 34; +pub const _SC_AIO_MAX: ::c_int = 35; +pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 36; +pub const _SC_DELAYTIMER_MAX: ::c_int = 37; +pub const _SC_THREAD_KEYS_MAX: ::c_int = 38; +pub const _SC_THREAD_STACK_MIN: ::c_int = 39; +pub const _SC_THREAD_THREADS_MAX: ::c_int = 40; +pub const _SC_TTY_NAME_MAX: ::c_int = 41; +pub const _SC_THREADS: ::c_int = 42; +pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 43; +pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 44; +pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 45; +pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 46; +pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 47; +pub const _SC_THREAD_PRIO_CEILING: ::c_int = _SC_THREAD_PRIO_PROTECT; +pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 48; +pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 49; +pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 50; +pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 51; +pub const _SC_LOGIN_NAME_MAX: ::c_int = 52; +pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 53; +pub const _SC_ADVISORY_INFO: ::c_int = 54; +pub const _SC_ATEXIT_MAX: ::c_int = 55; +pub const _SC_BARRIERS: ::c_int = 56; +pub const _SC_BC_BASE_MAX: ::c_int = 57; +pub const _SC_BC_DIM_MAX: ::c_int = 58; +pub const _SC_BC_SCALE_MAX: ::c_int = 59; +pub const _SC_BC_STRING_MAX: ::c_int = 60; +pub const _SC_CLOCK_SELECTION: ::c_int = 61; +pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 62; +pub const _SC_CPUTIME: ::c_int = 63; +pub const _SC_EXPR_NEST_MAX: ::c_int = 64; +pub const _SC_HOST_NAME_MAX: ::c_int = 65; +pub const _SC_IOV_MAX: ::c_int = 66; +pub const _SC_IPV6: ::c_int = 67; +pub const _SC_LINE_MAX: ::c_int = 68; +pub const _SC_MONOTONIC_CLOCK: ::c_int = 69; +pub const _SC_RAW_SOCKETS: ::c_int = 70; +pub const _SC_READER_WRITER_LOCKS: ::c_int = 71; +pub const _SC_REGEXP: ::c_int = 72; +pub const _SC_RE_DUP_MAX: ::c_int = 73; +pub const _SC_SHELL: ::c_int = 74; +pub const _SC_SPAWN: ::c_int = 75; +pub const _SC_SPIN_LOCKS: ::c_int = 76; +pub const _SC_SPORADIC_SERVER: ::c_int = 77; +pub const _SC_SS_REPL_MAX: ::c_int = 78; +pub const _SC_SYMLOOP_MAX: ::c_int = 79; +pub const _SC_THREAD_CPUTIME: ::c_int = 80; +pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 81; +pub const _SC_TIMEOUTS: ::c_int = 82; +pub const _SC_TRACE: ::c_int = 83; +pub const _SC_TRACE_EVENT_FILTER: ::c_int = 84; +pub const _SC_TRACE_EVENT_NAME_MAX: ::c_int = 85; +pub const _SC_TRACE_INHERIT: ::c_int = 86; +pub const _SC_TRACE_LOG: ::c_int = 87; +pub const _SC_TRACE_NAME_MAX: ::c_int = 88; +pub const _SC_TRACE_SYS_MAX: ::c_int = 89; +pub const _SC_TRACE_USER_EVENT_MAX: ::c_int = 90; +pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 91; +pub const _SC_V7_ILP32_OFF32: ::c_int = 92; +pub const _SC_V6_ILP32_OFF32: ::c_int =_SC_V7_ILP32_OFF32; +pub const _SC_XBS5_ILP32_OFF32: ::c_int = _SC_V7_ILP32_OFF32; +pub const _SC_V7_ILP32_OFFBIG: ::c_int = 93; +pub const _SC_V6_ILP32_OFFBIG: ::c_int = _SC_V7_ILP32_OFFBIG; +pub const _SC_XBS5_ILP32_OFFBIG: ::c_int = _SC_V7_ILP32_OFFBIG; +pub const _SC_V7_LP64_OFF64: ::c_int = 94; +pub const _SC_V6_LP64_OFF64: ::c_int = _SC_V7_LP64_OFF64; +pub const _SC_XBS5_LP64_OFF64: ::c_int = _SC_V7_LP64_OFF64; +pub const _SC_V7_LPBIG_OFFBIG: ::c_int = 95; +pub const _SC_V6_LPBIG_OFFBIG: ::c_int = _SC_V7_LPBIG_OFFBIG; +pub const _SC_XBS5_LPBIG_OFFBIG: ::c_int = _SC_V7_LPBIG_OFFBIG; +pub const _SC_XOPEN_CRYPT: ::c_int = 96; +pub const _SC_XOPEN_ENH_I18N: ::c_int = 97; +pub const _SC_XOPEN_LEGACY: ::c_int = 98; +pub const _SC_XOPEN_REALTIME: ::c_int = 99; +pub const _SC_STREAM_MAX: ::c_int = 100; +pub const _SC_PRIORITY_SCHEDULING: ::c_int = 101; +pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 102; +pub const _SC_XOPEN_SHM: ::c_int = 103; +pub const _SC_XOPEN_STREAMS: ::c_int = 104; +pub const _SC_XOPEN_UNIX: ::c_int = 105; +pub const _SC_XOPEN_VERSION: ::c_int = 106; +pub const _SC_2_CHAR_TERM: ::c_int = 107; +pub const _SC_2_C_BIND: ::c_int = 108; +pub const _SC_2_C_DEV: ::c_int = 109; +pub const _SC_2_FORT_DEV: ::c_int = 110; +pub const _SC_2_FORT_RUN: ::c_int = 111; +pub const _SC_2_LOCALEDEF: ::c_int = 112; +pub const _SC_2_PBS: ::c_int = 113; +pub const _SC_2_PBS_ACCOUNTING: ::c_int = 114; +pub const _SC_2_PBS_CHECKPOINT: ::c_int = 115; +pub const _SC_2_PBS_LOCATE: ::c_int = 116; +pub const _SC_2_PBS_MESSAGE: ::c_int = 117; +pub const _SC_2_PBS_TRACK: ::c_int = 118; +pub const _SC_2_SW_DEV: ::c_int = 119; +pub const _SC_2_UPE: ::c_int = 120; +pub const _SC_2_VERSION: ::c_int = 121; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: ::c_int = 122; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: ::c_int = 123; +pub const _SC_XOPEN_UUCP: ::c_int = 124; +pub const _SC_LEVEL1_ICACHE_SIZE: ::c_int = 125; +pub const _SC_LEVEL1_ICACHE_ASSOC: ::c_int = 126; +pub const _SC_LEVEL1_ICACHE_LINESIZE: ::c_int = 127; +pub const _SC_LEVEL1_DCACHE_SIZE: ::c_int = 128; +pub const _SC_LEVEL1_DCACHE_ASSOC: ::c_int = 129; +pub const _SC_LEVEL1_DCACHE_LINESIZE: ::c_int = 130; +pub const _SC_LEVEL2_CACHE_SIZE: ::c_int = 131; +pub const _SC_LEVEL2_CACHE_ASSOC: ::c_int = 132; +pub const _SC_LEVEL2_CACHE_LINESIZE: ::c_int = 133; +pub const _SC_LEVEL3_CACHE_SIZE: ::c_int = 134; +pub const _SC_LEVEL3_CACHE_ASSOC: ::c_int = 135; +pub const _SC_LEVEL3_CACHE_LINESIZE: ::c_int = 136; +pub const _SC_LEVEL4_CACHE_SIZE: ::c_int = 137; +pub const _SC_LEVEL4_CACHE_ASSOC: ::c_int = 138; +pub const _SC_LEVEL4_CACHE_LINESIZE: ::c_int = 139; + +pub const S_BLKSIZE: ::mode_t = 1024; +pub const S_IREAD: ::mode_t = 256; +pub const S_IWRITE: ::mode_t = 128; +pub const S_IEXEC: ::mode_t = 64; +pub const S_ENFMT: ::mode_t = 1024; +pub const S_IFMT: ::mode_t = 61440; +pub const S_IFDIR: ::mode_t = 16384; +pub const S_IFCHR: ::mode_t = 8192; +pub const S_IFBLK: ::mode_t = 24576; +pub const S_IFREG: ::mode_t = 32768; +pub const S_IFLNK: ::mode_t = 40960; +pub const S_IFSOCK: ::mode_t = 49152; +pub const S_IFIFO: ::mode_t = 4096; +pub const S_IRUSR: ::mode_t = 256; +pub const S_IWUSR: ::mode_t = 128; +pub const S_IXUSR: ::mode_t = 64; +pub const S_IRGRP: ::mode_t = 32; +pub const S_IWGRP: ::mode_t = 16; +pub const S_IXGRP: ::mode_t = 8; +pub const S_IROTH: ::mode_t = 4; +pub const S_IWOTH: ::mode_t = 2; +pub const S_IXOTH: ::mode_t = 1; + +pub const SEEK_SET: ::c_int = 0; +pub const SEEK_CUR: ::c_int = 1; +pub const SEEK_END: ::c_int = 2; + +pub const SHUT_RD: ::c_int = 0; +pub const SHUT_WR: ::c_int = 1; +pub const SHUT_RDWR: ::c_int = 2; + +pub const SIG_SETMASK: ::c_int = 0; + +pub const SIGHUP: ::c_int = 1; +pub const SIGINT: ::c_int = 2; +pub const SIGQUIT: ::c_int = 3; +pub const SIGILL: ::c_int = 4; +pub const SIGABRT: ::c_int = 6; +pub const SIGEMT: ::c_int = 7; +pub const SIGFPE: ::c_int = 8; +pub const SIGKILL: ::c_int = 9; +pub const SIGSEGV: ::c_int = 11; +pub const SIGPIPE: ::c_int = 13; +pub const SIGALRM: ::c_int = 14; +pub const SIGTERM: ::c_int = 15; + +pub const SO_DEBUG: ::c_int = 0x0001; +pub const SO_ACCEPTCONN: ::c_int = 0x0002; +pub const SO_REUSEADDR: ::c_int = 0x0004; +pub const SO_KEEPALIVE: ::c_int = 0x0008; +pub const SO_DONTROUTE: ::c_int = 0x0010; +pub const SO_BROADCAST: ::c_int = 0x0020; +pub const SO_USELOOPBACK: ::c_int = 0x0040; +pub const SO_LINGER: ::c_int = 0x0080; +pub const SO_OOBINLINE: ::c_int = 0x0100; +pub const SO_REUSEPORT: ::c_int = 0x0200; +pub const SO_SNDBUF: ::c_int = 0x1001; +pub const SO_RCVBUF: ::c_int = 0x1002; +pub const SO_SNDLOWAT: ::c_int = 0x1003; +pub const SO_RCVLOWAT: ::c_int = 0x1004; +pub const SO_SNDTIMEO: ::c_int = 0x1005; +pub const SO_RCVTIMEO: ::c_int = 0x1006; +pub const SO_ERROR: ::c_int = 0x1007; +pub const SO_TYPE: ::c_int = 0x1008; +pub const SO_CONTIMEO: ::c_int = 0x1009; +pub const SO_NO_CHECK: ::c_int = 0x100a; + +pub const SOCK_STREAM: ::c_int = 1; +pub const SOCK_DGRAM: ::c_int = 2; +pub const SOCK_RAW: ::c_int = 3; + +pub const SOL_SOCKET: ::c_int = 0xfff; + +pub const STDIN_FILENO: ::c_int = 0; +pub const STDOUT_FILENO: ::c_int = 1; +pub const STDERR_FILENO: ::c_int = 2; + +pub const TCP_NODELAY: ::c_int = 0x01; +pub const TCP_KEEPALIVE: ::c_int = 0x02; +pub const TCP_KEEPIDLE: ::c_int = 0x03; +pub const TCP_KEEPINTVL: ::c_int = 0x04; +pub const TCP_KEEPCNT: ::c_int = 0x05; + +const ULONG_SIZE: usize = 64; + +pub const WNOHANG: ::c_int = 0x00000001; + +f! { + pub fn WEXITSTATUS(status: ::c_int) -> ::c_int { + (status >> 8) & 0xff + } + + pub fn WIFEXITED(status: ::c_int) -> bool { + (status & 0xff) == 0 + } + + pub fn WTERMSIG(status: ::c_int) -> ::c_int { + status & 0x7f + } +} + +extern { + pub fn bind(s: ::c_int, name: *const ::sockaddr, namelen: ::socklen_t) + -> ::c_int; + + pub fn clock_gettime(clock_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; + + pub fn getpwuid_r(uid: ::uid_t, pwd: *mut passwd, buf: *mut ::c_char, + buflen: ::size_t, result: *mut *mut passwd) -> ::c_int; + + // Dummy + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; + + pub fn memalign(align: ::size_t, nbytes: ::size_t) -> *mut ::c_void; + + pub fn pthread_create(tid: *mut ::pthread_t, attr: *const ::pthread_attr_t, + start: extern fn(*mut ::c_void) -> *mut ::c_void, arg: *mut ::c_void) + -> ::c_int; + + pub fn pthread_sigmask(how: ::c_int, set: *const ::sigset_t, + oset: *mut ::sigset_t) -> ::c_int; + + pub fn recvfrom(s: ::c_int, mem: *mut ::c_void, len: ::size_t, + flags: ::c_int, from: *mut ::sockaddr, fromlen: *mut ::socklen_t) + -> ::c_int; + + pub fn setgroups(ngroups: ::c_int, grouplist: *const ::gid_t) -> ::c_int; +} + +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + mod aarch64; + pub use self::aarch64::*; + } else if #[cfg(target_arch = "x86_64")] { + mod x86_64; + pub use self::x86_64::*; + } else { + // Unknown target_arch + } +} diff --git a/third_party/rust/libc/src/unix/hermit/x86_64.rs b/third_party/rust/libc/src/unix/hermit/x86_64.rs new file mode 100644 index 000000000000..76ec3ce823e8 --- /dev/null +++ b/third_party/rust/libc/src/unix/hermit/x86_64.rs @@ -0,0 +1,2 @@ +pub type c_char = i8; +pub type wchar_t = i32; diff --git a/third_party/rust/libc/src/unix/mod.rs b/third_party/rust/libc/src/unix/mod.rs index 134f44c5a8de..9c68178ad15c 100644 --- a/third_party/rust/libc/src/unix/mod.rs +++ b/third_party/rust/libc/src/unix/mod.rs @@ -104,8 +104,10 @@ s! { pub s_addr: in_addr_t, } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct in6_addr { pub s6_addr: [u8; 16], + #[cfg(not(feature = "align"))] __align: [u32; 0], } @@ -268,8 +270,16 @@ pub const INADDR_ANY: in_addr_t = 0; pub const INADDR_BROADCAST: in_addr_t = 4294967295; pub const INADDR_NONE: in_addr_t = 4294967295; +pub const ARPOP_REQUEST: u16 = 1; +pub const ARPOP_REPLY: u16 = 2; + +pub const ATF_COM: ::c_int = 0x02; +pub const ATF_PERM: ::c_int = 0x04; +pub const ATF_PUBL: ::c_int = 0x08; +pub const ATF_USETRAILERS: ::c_int = 0x10; + cfg_if! { - if #[cfg(dox)] { + if #[cfg(cross_platform_docs)] { // on dox builds don't pull in anything } else if #[cfg(target_os = "l4re")] { // required libraries for L4Re are linked externally, ATM @@ -286,11 +296,11 @@ cfg_if! { } else if #[cfg(target_os = "emscripten")] { #[link(name = "c")] extern {} - } else if #[cfg(all(target_os = "netbsd"))] { + } else if #[cfg(all(target_os = "netbsd", + feature = "stdbuild", target_vendor = "rumprun"))] { // Since we don't use -nodefaultlibs on Rumprun, libc is always pulled // in automatically by the linker. We avoid passing it explicitly, as it // causes some versions of binutils to crash with an assertion failure. - #[cfg_attr(feature = "stdbuild", target_vendor = "rumprun")] #[link(name = "m")] extern {} } else if #[cfg(any(target_os = "macos", @@ -313,6 +323,11 @@ cfg_if! { #[link(name = "c")] #[link(name = "m")] extern {} + } else if #[cfg(target_os = "hermit")] { + // no_default_libraries is set to false for HermitCore, so only a link + // to "pthread" needs to be added. + #[link(name = "pthread")] + extern {} } else { #[link(name = "c")] #[link(name = "m")] @@ -416,6 +431,13 @@ extern { link_name = "opendir$INODE64$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] pub fn opendir(dirname: *const c_char) -> *mut ::DIR; + + #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), + link_name = "fdopendir$INODE64")] + #[cfg_attr(all(target_os = "macos", target_arch = "x86"), + link_name = "fdopendir$INODE64$UNIX2003")] + pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; + #[cfg_attr(target_os = "macos", link_name = "readdir$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__readdir30")] #[cfg_attr(target_os = "freebsd", link_name = "readdir@FBSD_1.0")] @@ -914,6 +936,7 @@ extern { pub fn openlog(ident: *const ::c_char, logopt: ::c_int, facility: ::c_int); pub fn closelog(); pub fn setlogmask(maskpri: ::c_int) -> ::c_int; + #[cfg_attr(target_os = "macos", link_name = "syslog$DARWIN_EXTSN")] pub fn syslog(priority: ::c_int, message: *const ::c_char, ...); #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "nice$UNIX2003")] @@ -953,6 +976,9 @@ cfg_if! { } else if #[cfg(target_os = "haiku")] { mod haiku; pub use self::haiku::*; + } else if #[cfg(target_os = "hermit")] { + mod hermit; + pub use self::hermit::*; } else { // Unknown target_os } diff --git a/third_party/rust/libc/src/unix/newlib/mod.rs b/third_party/rust/libc/src/unix/newlib/mod.rs index 6cf8633e6b91..9968d3668503 100644 --- a/third_party/rust/libc/src/unix/newlib/mod.rs +++ b/third_party/rust/libc/src/unix/newlib/mod.rs @@ -238,34 +238,80 @@ s! { __size: [u64; 7] } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_mutex_t { // Unverified - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_rwlock_t { // Unverified - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))), + repr(align(8)))] pub struct pthread_mutexattr_t { // Unverified - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + #[cfg(all(not(feature = "align"), + any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", target_arch = "sparc64")))] + __align: [::c_int; 0], + #[cfg(all(not(feature = "align"), + not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } @@ -275,12 +321,16 @@ s! { __pshared: ::c_int, } + #[cfg_attr(feature = "align", repr(align(8)))] pub struct pthread_cond_t { // Unverified + #[cfg(not(feature = "align"))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { // Unverified + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } @@ -288,18 +338,17 @@ s! { } // unverified constants -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; +align_const! { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], + }; +} pub const NCCS: usize = 32; pub const __SIZEOF_PTHREAD_ATTR_T: usize = 56; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; diff --git a/third_party/rust/libc/src/unix/notbsd/android/b32/mod.rs b/third_party/rust/libc/src/unix/notbsd/android/b32/mod.rs index 99af6d8ab31c..394abe8fe24c 100644 --- a/third_party/rust/libc/src/unix/notbsd/android/b32/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/android/b32/mod.rs @@ -198,22 +198,6 @@ pub const SIGSTKSZ: ::size_t = 8192; pub const MINSIGSTKSZ: ::size_t = 2048; extern { - pub fn bind(socket: ::c_int, address: *const ::sockaddr, - address_len: socklen_t) -> ::c_int; - - pub fn writev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - pub fn readv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - - pub fn sendmsg(fd: ::c_int, - msg: *const ::msghdr, - flags: ::c_int) -> ::ssize_t; - pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int) - -> ::ssize_t; - pub fn timegm64(tm: *const ::tm) -> ::time64_t; } diff --git a/third_party/rust/libc/src/unix/notbsd/android/b64/mod.rs b/third_party/rust/libc/src/unix/notbsd/android/b64/mod.rs index 4aa69977a7e1..fb943349b398 100644 --- a/third_party/rust/libc/src/unix/notbsd/android/b64/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/android/b64/mod.rs @@ -154,31 +154,6 @@ pub const UT_LINESIZE: usize = 32; pub const UT_NAMESIZE: usize = 32; pub const UT_HOSTSIZE: usize = 256; -// Some weirdness in Android -extern { - // address_len should be socklen_t, but it is c_int! - pub fn bind(socket: ::c_int, address: *const ::sockaddr, - address_len: ::c_int) -> ::c_int; - - // the return type should be ::ssize_t, but it is c_int! - pub fn writev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::c_int; - - // the return type should be ::ssize_t, but it is c_int! - pub fn readv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::c_int; - - // the return type should be ::ssize_t, but it is c_int! - pub fn sendmsg(fd: ::c_int, - msg: *const ::msghdr, - flags: ::c_int) -> ::c_int; - - // the return type should be ::ssize_t, but it is c_int! - pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int) -> ::c_int; -} - cfg_if! { if #[cfg(target_arch = "x86_64")] { mod x86_64; diff --git a/third_party/rust/libc/src/unix/notbsd/android/mod.rs b/third_party/rust/libc/src/unix/notbsd/android/mod.rs index 22043fdbfdd8..3e10acd5056f 100644 --- a/third_party/rust/libc/src/unix/notbsd/android/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/android/mod.rs @@ -187,47 +187,52 @@ s! { } pub struct genlmsghdr { - cmd: u8, - version: u8, - reserved: u16, + pub cmd: u8, + pub version: u8, + pub reserved: u16, } pub struct nlmsghdr { - nlmsg_len: u32, - nlmsg_type: u16, - nlmsg_flags: u16, - nlmsg_seq: u32, - nlmsg_pid: u32, + pub nlmsg_len: u32, + pub nlmsg_type: u16, + pub nlmsg_flags: u16, + pub nlmsg_seq: u32, + pub nlmsg_pid: u32, } pub struct nlmsgerr { - error: ::c_int, - msg: nlmsghdr, + pub error: ::c_int, + pub msg: nlmsghdr, } pub struct nl_pktinfo { - group: u32, + pub group: u32, } pub struct nl_mmap_req { - nm_block_size: ::c_uint, - nm_block_nr: ::c_uint, - nm_frame_size: ::c_uint, - nm_frame_nr: ::c_uint, + pub nm_block_size: ::c_uint, + pub nm_block_nr: ::c_uint, + pub nm_frame_size: ::c_uint, + pub nm_frame_nr: ::c_uint, } pub struct nl_mmap_hdr { - nm_status: ::c_uint, - nm_len: ::c_uint, - nm_group: u32, - nm_pid: u32, - nm_uid: u32, - nm_gid: u32, + pub nm_status: ::c_uint, + pub nm_len: ::c_uint, + pub nm_group: u32, + pub nm_pid: u32, + pub nm_uid: u32, + pub nm_gid: u32, } pub struct nlattr { - nla_len: u16, - nla_type: u16, + pub nla_len: u16, + pub nla_type: u16, + } + + pub struct in6_pktinfo { + pub ipi6_addr: ::in6_addr, + pub ipi6_ifindex: ::c_int, } } @@ -386,6 +391,49 @@ pub const _SC_PHYS_PAGES: ::c_int = 98; pub const _SC_AVPHYS_PAGES: ::c_int = 99; pub const _SC_MONOTONIC_CLOCK: ::c_int = 100; +pub const _SC_2_PBS: ::c_int = 101; +pub const _SC_2_PBS_ACCOUNTING: ::c_int = 102; +pub const _SC_2_PBS_CHECKPOINT: ::c_int = 103; +pub const _SC_2_PBS_LOCATE: ::c_int = 104; +pub const _SC_2_PBS_MESSAGE: ::c_int = 105; +pub const _SC_2_PBS_TRACK: ::c_int = 106; +pub const _SC_ADVISORY_INFO: ::c_int = 107; +pub const _SC_BARRIERS: ::c_int = 108; +pub const _SC_CLOCK_SELECTION: ::c_int = 109; +pub const _SC_CPUTIME: ::c_int = 110; +pub const _SC_HOST_NAME_MAX: ::c_int = 111; +pub const _SC_IPV6: ::c_int = 112; +pub const _SC_RAW_SOCKETS: ::c_int = 113; +pub const _SC_READER_WRITER_LOCKS: ::c_int = 114; +pub const _SC_REGEXP: ::c_int = 115; +pub const _SC_SHELL: ::c_int = 116; +pub const _SC_SPAWN: ::c_int = 117; +pub const _SC_SPIN_LOCKS: ::c_int = 118; +pub const _SC_SPORADIC_SERVER: ::c_int = 119; +pub const _SC_SS_REPL_MAX: ::c_int = 120; +pub const _SC_SYMLOOP_MAX: ::c_int = 121; +pub const _SC_THREAD_CPUTIME: ::c_int = 122; +pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 123; +pub const _SC_THREAD_ROBUST_PRIO_INHERIT: ::c_int = 124; +pub const _SC_THREAD_ROBUST_PRIO_PROTECT: ::c_int = 125; +pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 126; +pub const _SC_TIMEOUTS: ::c_int = 127; +pub const _SC_TRACE: ::c_int = 128; +pub const _SC_TRACE_EVENT_FILTER: ::c_int = 129; +pub const _SC_TRACE_EVENT_NAME_MAX: ::c_int = 130; +pub const _SC_TRACE_INHERIT: ::c_int = 131; +pub const _SC_TRACE_LOG: ::c_int = 132; +pub const _SC_TRACE_NAME_MAX: ::c_int = 133; +pub const _SC_TRACE_SYS_MAX: ::c_int = 134; +pub const _SC_TRACE_USER_EVENT_MAX: ::c_int = 135; +pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 136; +pub const _SC_V7_ILP32_OFF32: ::c_int = 137; +pub const _SC_V7_ILP32_OFFBIG: ::c_int = 138; +pub const _SC_V7_LP64_OFF64: ::c_int = 139; +pub const _SC_V7_LPBIG_OFFBIG: ::c_int = 140; +pub const _SC_XOPEN_STREAMS: ::c_int = 141; +pub const _SC_XOPEN_UUCP: ::c_int = 142; + pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; @@ -1372,6 +1420,51 @@ pub const ETH_P_XDSA: ::c_int = 0x00F8; /* see rust-lang/libc#924 pub const ETH_P_MAP: ::c_int = 0x00F9;*/ // end android/platform/bionic/libc/kernel/uapi/linux/if_ether.h +pub const SIOCADDRT: ::c_ulong = 0x0000890B; +pub const SIOCDELRT: ::c_ulong = 0x0000890C; +pub const SIOCGIFNAME: ::c_ulong = 0x00008910; +pub const SIOCSIFLINK: ::c_ulong = 0x00008911; +pub const SIOCGIFCONF: ::c_ulong = 0x00008912; +pub const SIOCGIFFLAGS: ::c_ulong = 0x00008913; +pub const SIOCSIFFLAGS: ::c_ulong = 0x00008914; +pub const SIOCGIFADDR: ::c_ulong = 0x00008915; +pub const SIOCSIFADDR: ::c_ulong = 0x00008916; +pub const SIOCGIFDSTADDR: ::c_ulong = 0x00008917; +pub const SIOCSIFDSTADDR: ::c_ulong = 0x00008918; +pub const SIOCGIFBRDADDR: ::c_ulong = 0x00008919; +pub const SIOCSIFBRDADDR: ::c_ulong = 0x0000891A; +pub const SIOCGIFNETMASK: ::c_ulong = 0x0000891B; +pub const SIOCSIFNETMASK: ::c_ulong = 0x0000891C; +pub const SIOCGIFMETRIC: ::c_ulong = 0x0000891D; +pub const SIOCSIFMETRIC: ::c_ulong = 0x0000891E; +pub const SIOCGIFMEM: ::c_ulong = 0x0000891F; +pub const SIOCSIFMEM: ::c_ulong = 0x00008920; +pub const SIOCGIFMTU: ::c_ulong = 0x00008921; +pub const SIOCSIFMTU: ::c_ulong = 0x00008922; +pub const SIOCSIFHWADDR: ::c_ulong = 0x00008924; +pub const SIOCGIFENCAP: ::c_ulong = 0x00008925; +pub const SIOCSIFENCAP: ::c_ulong = 0x00008926; +pub const SIOCGIFHWADDR: ::c_ulong = 0x00008927; +pub const SIOCGIFSLAVE: ::c_ulong = 0x00008929; +pub const SIOCSIFSLAVE: ::c_ulong = 0x00008930; +pub const SIOCADDMULTI: ::c_ulong = 0x00008931; +pub const SIOCDELMULTI: ::c_ulong = 0x00008932; +pub const SIOCDARP: ::c_ulong = 0x00008953; +pub const SIOCGARP: ::c_ulong = 0x00008954; +pub const SIOCSARP: ::c_ulong = 0x00008955; +pub const SIOCDRARP: ::c_ulong = 0x00008960; +pub const SIOCGRARP: ::c_ulong = 0x00008961; +pub const SIOCSRARP: ::c_ulong = 0x00008962; +pub const SIOCGIFMAP: ::c_ulong = 0x00008970; +pub const SIOCSIFMAP: ::c_ulong = 0x00008971; + +// linux/module.h +pub const MODULE_INIT_IGNORE_MODVERSIONS: ::c_uint = 0x0001; +pub const MODULE_INIT_IGNORE_VERMAGIC: ::c_uint = 0x0002; + +// Similarity to Linux it's not used but defined for compatibility. +pub const ENOATTR: ::c_int = ::ENODATA; + f! { pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.__bits.iter_mut() { @@ -1495,6 +1588,8 @@ extern { pub fn sched_rr_get_interval(pid: ::pid_t, tp: *mut ::timespec) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn sched_setparam(pid: ::pid_t, param: *const ::sched_param) -> ::c_int; pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; pub fn swapoff(puath: *const ::c_char) -> ::c_int; diff --git a/third_party/rust/libc/src/unix/notbsd/emscripten.rs b/third_party/rust/libc/src/unix/notbsd/emscripten.rs index 90c056cafe38..b30fee32323d 100644 --- a/third_party/rust/libc/src/unix/notbsd/emscripten.rs +++ b/third_party/rust/libc/src/unix/notbsd/emscripten.rs @@ -72,42 +72,47 @@ s! { __unused5: *mut ::c_void, } - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: ::c_uint, - pub ifa_addr: *mut ::sockaddr, - pub ifa_netmask: *mut ::sockaddr, - pub ifa_ifu: *mut ::sockaddr, // FIXME This should be a union - pub ifa_data: *mut ::c_void - } - + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_mutex_t { + #[cfg(not(feature = "align"))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_rwlock_t { + #[cfg(not(feature = "align"))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_mutexattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_rwlockattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCKATTR_T], } + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct pthread_cond_t { + #[cfg(not(feature = "align"))] __align: [*const ::c_void; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } @@ -461,6 +466,15 @@ s! { pub f_namemax: ::c_ulong, __f_spare: [::c_int; 6], } + + pub struct arpd_request { + pub req: ::c_ushort, + pub ip: u32, + pub dev: ::c_ulong, + pub stamp: ::c_ulong, + pub updated: ::c_ulong, + pub ha: [::c_uchar; ::MAX_ADDR_LEN], + } } pub const ABDAY_1: ::nl_item = 0x20000; @@ -748,18 +762,18 @@ pub const RTLD_NOW: ::c_int = 0x2; pub const TCP_MD5SIG: ::c_int = 14; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; +align_const! { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], + }; +} + pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; @@ -1484,6 +1498,12 @@ pub const TIOCM_CD: ::c_int = TIOCM_CAR; pub const TIOCM_RI: ::c_int = TIOCM_RNG; pub const O_TMPFILE: ::c_int = 0x400000; +pub const MAX_ADDR_LEN: usize = 7; +pub const ARPD_UPDATE: ::c_ushort = 0x01; +pub const ARPD_LOOKUP: ::c_ushort = 0x02; +pub const ARPD_FLUSH: ::c_ushort = 0x03; +pub const ATF_MAGIC: ::c_int = 0x80; + f! { pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.bits.iter_mut() { @@ -1600,8 +1620,6 @@ extern { mode: ::mode_t) -> ::c_int; pub fn if_nameindex() -> *mut if_nameindex; pub fn if_freenameindex(ptr: *mut if_nameindex); - pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ::ifaddrs); pub fn mremap(addr: *mut ::c_void, len: ::size_t, @@ -1635,21 +1653,6 @@ extern { pub fn mkstemps(template: *mut ::c_char, suffixlen: ::c_int) -> ::c_int; pub fn nl_langinfo(item: ::nl_item) -> *mut ::c_char; - pub fn bind(socket: ::c_int, address: *const ::sockaddr, - address_len: ::socklen_t) -> ::c_int; - - pub fn writev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - pub fn readv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - - pub fn sendmsg(fd: ::c_int, - msg: *const ::msghdr, - flags: ::c_int) -> ::ssize_t; - pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int) - -> ::ssize_t; pub fn getdomainname(name: *mut ::c_char, len: ::size_t) -> ::c_int; pub fn setdomainname(name: *const ::c_char, len: ::size_t) -> ::c_int; pub fn sendmmsg(sockfd: ::c_int, msgvec: *mut mmsghdr, vlen: ::c_uint, diff --git a/third_party/rust/libc/src/unix/notbsd/linux/mips/mips32.rs b/third_party/rust/libc/src/unix/notbsd/linux/mips/mips32.rs index 8fad85b006eb..a6c08a5ad71b 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/mips/mips32.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/mips/mips32.rs @@ -1,3 +1,5 @@ +use pthread_mutex_t; + pub type c_char = i8; pub type c_long = i32; pub type c_ulong = u32; @@ -91,6 +93,22 @@ s! { pub f_spare: [::c_long; 5], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + __f_unused: ::c_int, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct statvfs64 { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -100,11 +118,8 @@ s! { pub f_files: u64, pub f_ffree: u64, pub f_favail: u64, - #[cfg(target_endian = "little")] pub f_fsid: ::c_ulong, __f_unused: ::c_int, - #[cfg(target_endian = "big")] - pub f_fsid: ::c_ulong, pub f_flag: ::c_ulong, pub f_namemax: ::c_ulong, __f_spare: [::c_int; 6], @@ -265,6 +280,57 @@ pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +align_const! { + #[cfg(target_endian = "little")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; +} + pub const O_LARGEFILE: ::c_int = 0x2000; pub const RLIM_INFINITY: ::rlim_t = 0x7fffffff; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/mips/mips64.rs b/third_party/rust/libc/src/unix/notbsd/linux/mips/mips64.rs index 45b3df8fc147..e8b02a36b0e7 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/mips/mips64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/mips/mips64.rs @@ -1,3 +1,5 @@ +use pthread_mutex_t; + pub type blkcnt_t = i64; pub type blksize_t = i64; pub type c_char = i8; @@ -91,6 +93,21 @@ s! { pub f_spare: [::c_long; 5], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct statvfs64 { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -247,6 +264,57 @@ pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +align_const! { + #[cfg(target_endian = "little")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + pub const O_LARGEFILE: ::c_int = 0; pub const RLIM_INFINITY: ::rlim_t = 0xffff_ffff_ffff_ffff; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/mips/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/mips/mod.rs index 405a2bdb1242..4c14d12ebced 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/mips/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/mips/mod.rs @@ -21,11 +21,16 @@ s! { } // FIXME this is actually a union + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { #[cfg(target_pointer_width = "32")] __size: [::c_char; 16], #[cfg(target_pointer_width = "64")] __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } @@ -41,41 +46,41 @@ s! { } pub struct nlmsghdr { - nlmsg_len: u32, - nlmsg_type: u16, - nlmsg_flags: u16, - nlmsg_seq: u32, - nlmsg_pid: u32, + pub nlmsg_len: u32, + pub nlmsg_type: u16, + pub nlmsg_flags: u16, + pub nlmsg_seq: u32, + pub nlmsg_pid: u32, } pub struct nlmsgerr { - error: ::c_int, - msg: nlmsghdr, + pub error: ::c_int, + pub msg: nlmsghdr, } pub struct nl_pktinfo { - group: u32, + pub group: u32, } pub struct nl_mmap_req { - nm_block_size: ::c_uint, - nm_block_nr: ::c_uint, - nm_frame_size: ::c_uint, - nm_frame_nr: ::c_uint, + pub nm_block_size: ::c_uint, + pub nm_block_nr: ::c_uint, + pub nm_frame_size: ::c_uint, + pub nm_frame_nr: ::c_uint, } pub struct nl_mmap_hdr { - nm_status: ::c_uint, - nm_len: ::c_uint, - nm_group: u32, - nm_pid: u32, - nm_uid: u32, - nm_gid: u32, + pub nm_status: ::c_uint, + pub nm_len: ::c_uint, + pub nm_group: u32, + pub nm_pid: u32, + pub nm_uid: u32, + pub nm_gid: u32, } pub struct nlattr { - nla_len: u16, - nla_type: u16, + pub nla_len: u16, + pub nla_type: u16, } } @@ -463,6 +468,7 @@ pub const POLLWRNORM: ::c_short = 0x004; pub const POLLWRBAND: ::c_short = 0x100; pub const PTHREAD_STACK_MIN: ::size_t = 131072; +pub const PTHREAD_MUTEX_ADAPTIVE_NP: ::c_int = 3; pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/mod.rs index e495dc2d18f4..6bc7db191c94 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/mod.rs @@ -71,24 +71,32 @@ s! { __unused5: *mut ::c_void, } - pub struct ifaddrs { - pub ifa_next: *mut ifaddrs, - pub ifa_name: *mut c_char, - pub ifa_flags: ::c_uint, - pub ifa_addr: *mut ::sockaddr, - pub ifa_netmask: *mut ::sockaddr, - pub ifa_ifu: *mut ::sockaddr, // FIXME This should be a union - pub ifa_data: *mut ::c_void - } - + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64", + target_arch = "x86")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64", + target_arch = "x86")))), + repr(align(8)))] pub struct pthread_mutex_t { - #[cfg(any(target_arch = "mips", - target_arch = "arm", - target_arch = "powerpc", - all(target_arch = "x86_64", - target_pointer_width = "32")))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + all(target_arch = "x86_64", + target_pointer_width = "32"))))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", + #[cfg(not(any(feature = "align", + target_arch = "mips", target_arch = "arm", target_arch = "powerpc", all(target_arch = "x86_64", @@ -97,14 +105,32 @@ s! { size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64", + target_arch = "x86")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "x86_64", + target_arch = "x86")))), + repr(align(8)))] pub struct pthread_rwlock_t { - #[cfg(any(target_arch = "mips", - target_arch = "arm", - target_arch = "powerpc", - all(target_arch = "x86_64", - target_pointer_width = "32")))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc", + all(target_arch = "x86_64", + target_pointer_width = "32"))))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", + #[cfg(not(any(feature = "align", + target_arch = "mips", target_arch = "arm", target_arch = "powerpc", all(target_arch = "x86_64", @@ -113,39 +139,78 @@ s! { size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl"))), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl")))), + repr(align(8)))] pub struct pthread_mutexattr_t { - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + #[cfg(all(not(features = "align"), + any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64", target_arch = "aarch64")))] - __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "gnu"))] - __align: [::c_long; 0], - #[cfg(all(target_arch = "aarch64", target_env = "musl"))] + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl"))))] __align: [::c_int; 0], + #[cfg(all(not(features = "align"), + not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64", + all(target_arch = "aarch64", target_env = "musl")))))] + __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } + #[cfg_attr(all(feature = "align", + any(target_env = "musl", target_pointer_width = "32")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + target_pointer_width = "64"), + repr(align(8)))] pub struct pthread_rwlockattr_t { - #[cfg(any(target_env = "musl"))] + #[cfg(all(not(feature = "align"), target_env = "musl"))] __align: [::c_int; 0], - #[cfg(not(any(target_env = "musl")))] + #[cfg(all(not(feature = "align"), not(target_env = "musl")))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCKATTR_T], } + #[cfg_attr(all(feature = "align", + target_env = "musl", + target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + target_env = "musl", + target_pointer_width = "64"), + repr(align(8)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + target_arch = "x86"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(target_env = "musl"), + not(target_arch = "x86")), + repr(align(8)))] pub struct pthread_cond_t { - #[cfg(any(target_env = "musl"))] + #[cfg(all(not(feature = "align"), target_env = "musl"))] __align: [*const ::c_void; 0], - #[cfg(not(any(target_env = "musl")))] + #[cfg(not(any(feature = "align", target_env = "musl")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } @@ -172,26 +237,6 @@ s! { pub sp_flag: ::c_ulong, } - pub struct statvfs { - pub f_bsize: ::c_ulong, - pub f_frsize: ::c_ulong, - pub f_blocks: ::fsblkcnt_t, - pub f_bfree: ::fsblkcnt_t, - pub f_bavail: ::fsblkcnt_t, - pub f_files: ::fsfilcnt_t, - pub f_ffree: ::fsfilcnt_t, - pub f_favail: ::fsfilcnt_t, - #[cfg(target_endian = "little")] - pub f_fsid: ::c_ulong, - #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] - __f_unused: ::c_int, - #[cfg(target_endian = "big")] - pub f_fsid: ::c_ulong, - pub f_flag: ::c_ulong, - pub f_namemax: ::c_ulong, - __f_spare: [::c_int; 6], - } - pub struct dqblk { pub dqb_bhardlimit: ::uint64_t, pub dqb_bsoftlimit: ::uint64_t, @@ -259,6 +304,13 @@ s! { pad: [::c_long; 4], } + pub struct packet_mreq { + pub mr_ifindex: ::c_int, + pub mr_type: ::c_ushort, + pub mr_alen: ::c_ushort, + pub mr_address: [::c_uchar; 8], + } + pub struct cpu_set_t { #[cfg(all(target_pointer_width = "32", not(target_arch = "x86_64")))] @@ -485,9 +537,23 @@ s! { } pub struct genlmsghdr { - cmd: u8, - version: u8, - reserved: u16, + pub cmd: u8, + pub version: u8, + pub reserved: u16, + } + + pub struct in6_pktinfo { + pub ipi6_addr: ::in6_addr, + pub ipi6_ifindex: ::c_uint, + } + + pub struct arpd_request { + pub req: ::c_ushort, + pub ip: u32, + pub dev: ::c_ulong, + pub stamp: ::c_ulong, + pub updated: ::c_ulong, + pub ha: [::c_uchar; ::MAX_ADDR_LEN], } } @@ -801,18 +867,17 @@ pub const RTLD_NOW: ::c_int = 0x2; pub const TCP_MD5SIG: ::c_int = 14; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; +align_const! { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], + }; +} pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; @@ -1316,6 +1381,15 @@ pub const CTRL_ATTR_MCAST_GRP_UNSPEC: ::c_int = 0; pub const CTRL_ATTR_MCAST_GRP_NAME: ::c_int = 1; pub const CTRL_ATTR_MCAST_GRP_ID: ::c_int = 2; +// linux/if_packet.h +pub const PACKET_ADD_MEMBERSHIP: ::c_int = 1; +pub const PACKET_DROP_MEMBERSHIP: ::c_int = 2; + +pub const PACKET_MR_MULTICAST: ::c_int = 0; +pub const PACKET_MR_PROMISC: ::c_int = 1; +pub const PACKET_MR_ALLMULTI: ::c_int = 2; +pub const PACKET_MR_UNICAST: ::c_int = 3; + // linux/netfilter.h pub const NF_DROP: ::c_int = 0; pub const NF_ACCEPT: ::c_int = 1; @@ -1394,6 +1468,114 @@ pub const NF_IP6_PRI_SELINUX_LAST: ::c_int = 225; pub const NF_IP6_PRI_CONNTRACK_HELPER: ::c_int = 300; pub const NF_IP6_PRI_LAST: ::c_int = ::INT_MAX; +pub const SIOCADDRT: ::c_ulong = 0x0000890B; +pub const SIOCDELRT: ::c_ulong = 0x0000890C; +pub const SIOCGIFNAME: ::c_ulong = 0x00008910; +pub const SIOCSIFLINK: ::c_ulong = 0x00008911; +pub const SIOCGIFCONF: ::c_ulong = 0x00008912; +pub const SIOCGIFFLAGS: ::c_ulong = 0x00008913; +pub const SIOCSIFFLAGS: ::c_ulong = 0x00008914; +pub const SIOCGIFADDR: ::c_ulong = 0x00008915; +pub const SIOCSIFADDR: ::c_ulong = 0x00008916; +pub const SIOCGIFDSTADDR: ::c_ulong = 0x00008917; +pub const SIOCSIFDSTADDR: ::c_ulong = 0x00008918; +pub const SIOCGIFBRDADDR: ::c_ulong = 0x00008919; +pub const SIOCSIFBRDADDR: ::c_ulong = 0x0000891A; +pub const SIOCGIFNETMASK: ::c_ulong = 0x0000891B; +pub const SIOCSIFNETMASK: ::c_ulong = 0x0000891C; +pub const SIOCGIFMETRIC: ::c_ulong = 0x0000891D; +pub const SIOCSIFMETRIC: ::c_ulong = 0x0000891E; +pub const SIOCGIFMEM: ::c_ulong = 0x0000891F; +pub const SIOCSIFMEM: ::c_ulong = 0x00008920; +pub const SIOCGIFMTU: ::c_ulong = 0x00008921; +pub const SIOCSIFMTU: ::c_ulong = 0x00008922; +pub const SIOCSIFHWADDR: ::c_ulong = 0x00008924; +pub const SIOCGIFENCAP: ::c_ulong = 0x00008925; +pub const SIOCSIFENCAP: ::c_ulong = 0x00008926; +pub const SIOCGIFHWADDR: ::c_ulong = 0x00008927; +pub const SIOCGIFSLAVE: ::c_ulong = 0x00008929; +pub const SIOCSIFSLAVE: ::c_ulong = 0x00008930; +pub const SIOCADDMULTI: ::c_ulong = 0x00008931; +pub const SIOCDELMULTI: ::c_ulong = 0x00008932; +pub const SIOCDARP: ::c_ulong = 0x00008953; +pub const SIOCGARP: ::c_ulong = 0x00008954; +pub const SIOCSARP: ::c_ulong = 0x00008955; +pub const SIOCDRARP: ::c_ulong = 0x00008960; +pub const SIOCGRARP: ::c_ulong = 0x00008961; +pub const SIOCSRARP: ::c_ulong = 0x00008962; +pub const SIOCGIFMAP: ::c_ulong = 0x00008970; +pub const SIOCSIFMAP: ::c_ulong = 0x00008971; + +pub const IPTOS_TOS_MASK: u8 = 0x1E; +pub const IPTOS_PREC_MASK: u8 = 0xE0; + +pub const RTF_UP: ::c_ushort = 0x0001; +pub const RTF_GATEWAY: ::c_ushort = 0x0002; + +pub const RTF_HOST: ::c_ushort = 0x0004; +pub const RTF_REINSTATE: ::c_ushort = 0x0008; +pub const RTF_DYNAMIC: ::c_ushort = 0x0010; +pub const RTF_MODIFIED: ::c_ushort = 0x0020; +pub const RTF_MTU: ::c_ushort = 0x0040; +pub const RTF_MSS: ::c_ushort = RTF_MTU; +pub const RTF_WINDOW: ::c_ushort = 0x0080; +pub const RTF_IRTT: ::c_ushort = 0x0100; +pub const RTF_REJECT: ::c_ushort = 0x0200; +pub const RTF_STATIC: ::c_ushort = 0x0400; +pub const RTF_XRESOLVE: ::c_ushort = 0x0800; +pub const RTF_NOFORWARD: ::c_ushort = 0x1000; +pub const RTF_THROW: ::c_ushort = 0x2000; +pub const RTF_NOPMTUDISC: ::c_ushort = 0x4000; + +pub const RTF_DEFAULT: u32 = 0x00010000; +pub const RTF_ALLONLINK: u32 = 0x00020000; +pub const RTF_ADDRCONF: u32 = 0x00040000; +pub const RTF_LINKRT: u32 = 0x00100000; +pub const RTF_NONEXTHOP: u32 = 0x00200000; +pub const RTF_CACHE: u32 = 0x01000000; +pub const RTF_FLOW: u32 = 0x02000000; +pub const RTF_POLICY: u32 = 0x04000000; + +pub const RTCF_VALVE: u32 = 0x00200000; +pub const RTCF_MASQ: u32 = 0x00400000; +pub const RTCF_NAT: u32 = 0x00800000; +pub const RTCF_DOREDIRECT: u32 = 0x01000000; +pub const RTCF_LOG: u32 = 0x02000000; +pub const RTCF_DIRECTSRC: u32 = 0x04000000; + +pub const RTF_LOCAL: u32 = 0x80000000; +pub const RTF_INTERFACE: u32 = 0x40000000; +pub const RTF_MULTICAST: u32 = 0x20000000; +pub const RTF_BROADCAST: u32 = 0x10000000; +pub const RTF_NAT: u32 = 0x08000000; +pub const RTF_ADDRCLASSMASK: u32 = 0xF8000000; + +pub const RT_CLASS_UNSPEC: u8 = 0; +pub const RT_CLASS_DEFAULT: u8 = 253; +pub const RT_CLASS_MAIN: u8 = 254; +pub const RT_CLASS_LOCAL: u8 = 255; +pub const RT_CLASS_MAX: u8 = 255; + +pub const RTMSG_OVERRUN: u32 = ::NLMSG_OVERRUN as u32; +pub const RTMSG_NEWDEVICE: u32 = 0x11; +pub const RTMSG_DELDEVICE: u32 = 0x12; +pub const RTMSG_NEWROUTE: u32 = 0x21; +pub const RTMSG_DELROUTE: u32 = 0x22; +pub const RTMSG_NEWRULE: u32 = 0x31; +pub const RTMSG_DELRULE: u32 = 0x32; +pub const RTMSG_CONTROL: u32 = 0x40; +pub const RTMSG_AR_FAILED: u32 = 0x51; + +pub const MAX_ADDR_LEN: usize = 7; +pub const ARPD_UPDATE: ::c_ushort = 0x01; +pub const ARPD_LOOKUP: ::c_ushort = 0x02; +pub const ARPD_FLUSH: ::c_ushort = 0x03; +pub const ATF_MAGIC: ::c_int = 0x80; + +// linux/module.h +pub const MODULE_INIT_IGNORE_MODVERSIONS: ::c_uint = 0x0001; +pub const MODULE_INIT_IGNORE_VERMAGIC: ::c_uint = 0x0002; + f! { pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.bits.iter_mut() { @@ -1449,6 +1631,26 @@ f! { dev |= (minor & 0xffffff00) << 12; dev } + + pub fn IPTOS_TOS(tos: u8) -> u8 { + tos & IPTOS_TOS_MASK + } + + pub fn IPTOS_PREC(tos: u8) -> u8 { + tos & IPTOS_PREC_MASK + } + + pub fn RT_TOS(tos: u8) -> u8 { + tos & ::IPTOS_TOS_MASK + } + + pub fn RT_ADDRCLASS(flags: u32) -> u32 { + flags >> 23 + } + + pub fn RT_LOCALADDR(flags: u32) -> bool { + (flags & RTF_ADDRCLASSMASK) == (RTF_LOCAL | RTF_INTERFACE) + } } extern { @@ -1639,8 +1841,6 @@ extern { pub fn if_freenameindex(ptr: *mut if_nameindex); pub fn sync_file_range(fd: ::c_int, offset: ::off64_t, nbytes: ::off64_t, flags: ::c_uint) -> ::c_int; - pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; - pub fn freeifaddrs(ifa: *mut ::ifaddrs); pub fn mremap(addr: *mut ::c_void, len: ::size_t, new_len: ::size_t, @@ -1675,21 +1875,6 @@ extern { pub fn futimes(fd: ::c_int, times: *const ::timeval) -> ::c_int; pub fn nl_langinfo(item: ::nl_item) -> *mut ::c_char; - pub fn bind(socket: ::c_int, address: *const ::sockaddr, - address_len: ::socklen_t) -> ::c_int; - - pub fn writev(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - pub fn readv(fd: ::c_int, - iov: *const ::iovec, - iovcnt: ::c_int) -> ::ssize_t; - - pub fn sendmsg(fd: ::c_int, - msg: *const ::msghdr, - flags: ::c_int) -> ::ssize_t; - pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int) - -> ::ssize_t; pub fn getdomainname(name: *mut ::c_char, len: ::size_t) -> ::c_int; pub fn setdomainname(name: *const ::c_char, len: ::size_t) -> ::c_int; pub fn vhangup() -> ::c_int; @@ -1736,6 +1921,8 @@ extern { pub fn sched_rr_get_interval(pid: ::pid_t, tp: *mut ::timespec) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn sched_setparam(pid: ::pid_t, param: *const ::sched_param) -> ::c_int; pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; pub fn swapoff(puath: *const ::c_char) -> ::c_int; @@ -1937,6 +2124,11 @@ extern { fd: ::c_int, newfd: ::c_int, ) -> ::c_int; + pub fn fread_unlocked(ptr: *mut ::c_void, + size: ::size_t, + nobj: ::size_t, + stream: *mut ::FILE + ) -> ::size_t; } cfg_if! { diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/arm.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/arm.rs index 22bf16c1fda9..20fa33a3f776 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/arm.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/arm.rs @@ -52,6 +52,18 @@ s! { pub ss_size: ::size_t } + pub struct ipc_perm { + pub __ipc_perm_key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_int, + __unused1: ::c_long, + __unused2: ::c_long + } + pub struct shmid_ds { pub shm_perm: ::ipc_perm, pub shm_segsz: ::size_t, @@ -151,6 +163,9 @@ s! { } } +pub const SIGSTKSZ: ::size_t = 8192; +pub const MINSIGSTKSZ: ::size_t = 2048; + pub const O_DIRECT: ::c_int = 0x10000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; @@ -165,6 +180,76 @@ pub const RLIMIT_NOFILE: ::c_int = 7; pub const RLIMIT_AS: ::c_int = 9; pub const RLIMIT_NPROC: ::c_int = 6; pub const RLIMIT_MEMLOCK: ::c_int = 8; +pub const RLIMIT_NLIMITS: ::c_int = 16; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; +pub const CIBAUD: ::tcflag_t = 0o02003600000; +pub const CBAUDEX: ::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const XTABS: ::tcflag_t = 0o014000; +pub const B57600: ::speed_t = 0o010001; +pub const B115200: ::speed_t = 0o010002; +pub const B230400: ::speed_t = 0o010003; +pub const B460800: ::speed_t = 0o010004; +pub const B500000: ::speed_t = 0o010005; +pub const B576000: ::speed_t = 0o010006; +pub const B921600: ::speed_t = 0o010007; +pub const B1000000: ::speed_t = 0o010010; +pub const B1152000: ::speed_t = 0o010011; +pub const B1500000: ::speed_t = 0o010012; +pub const B2000000: ::speed_t = 0o010013; +pub const B2500000: ::speed_t = 0o010014; +pub const B3000000: ::speed_t = 0o010015; +pub const B3500000: ::speed_t = 0o010016; +pub const B4000000: ::speed_t = 0o010017; pub const O_APPEND: ::c_int = 1024; pub const O_CREAT: ::c_int = 64; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mips.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mips.rs index 89231a0c7516..bfde73c563dc 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mips.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mips.rs @@ -54,6 +54,18 @@ s! { pub ss_flags: ::c_int, } + pub struct ipc_perm { + pub __ipc_perm_key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_int, + __unused1: ::c_long, + __unused2: ::c_long + } + pub struct shmid_ds { pub shm_perm: ::ipc_perm, pub shm_segsz: ::size_t, @@ -162,6 +174,9 @@ s! { } } +pub const SIGSTKSZ: ::size_t = 8192; +pub const MINSIGSTKSZ: ::size_t = 2048; + pub const O_DIRECT: ::c_int = 0o100000; pub const O_DIRECTORY: ::c_int = 0o200000; pub const O_NOFOLLOW: ::c_int = 0o400000; @@ -176,6 +191,76 @@ pub const RLIMIT_NOFILE: ::c_int = 5; pub const RLIMIT_AS: ::c_int = 6; pub const RLIMIT_NPROC: ::c_int = 8; pub const RLIMIT_MEMLOCK: ::c_int = 9; +pub const RLIMIT_NLIMITS: ::c_int = 16; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; +pub const CIBAUD: ::tcflag_t = 0o02003600000; +pub const CBAUDEX: ::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const XTABS: ::tcflag_t = 0o014000; +pub const B57600: ::speed_t = 0o010001; +pub const B115200: ::speed_t = 0o010002; +pub const B230400: ::speed_t = 0o010003; +pub const B460800: ::speed_t = 0o010004; +pub const B500000: ::speed_t = 0o010005; +pub const B576000: ::speed_t = 0o010006; +pub const B921600: ::speed_t = 0o010007; +pub const B1000000: ::speed_t = 0o010010; +pub const B1152000: ::speed_t = 0o010011; +pub const B1500000: ::speed_t = 0o010012; +pub const B2000000: ::speed_t = 0o010013; +pub const B2500000: ::speed_t = 0o010014; +pub const B3000000: ::speed_t = 0o010015; +pub const B3500000: ::speed_t = 0o010016; +pub const B4000000: ::speed_t = 0o010017; pub const O_APPEND: ::c_int = 0o010; pub const O_CREAT: ::c_int = 0o400; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mod.rs index f6e19d981ddc..4128a8e4da6d 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/mod.rs @@ -32,26 +32,17 @@ s! { pub struct sem_t { __val: [::c_int; 4], } - - pub struct ipc_perm { - pub __ipc_perm_key: ::key_t, - pub uid: ::uid_t, - pub gid: ::gid_t, - pub cuid: ::uid_t, - pub cgid: ::gid_t, - pub mode: ::mode_t, - pub __seq: ::c_int, - __unused1: ::c_long, - __unused2: ::c_long - } } -pub const SIGSTKSZ: ::size_t = 8192; -pub const MINSIGSTKSZ: ::size_t = 2048; - pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 24; +pub const TIOCINQ: ::c_int = ::FIONREAD; + +extern { + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; +} + cfg_if! { if #[cfg(any(target_arch = "x86"))] { mod x86; @@ -62,6 +53,9 @@ cfg_if! { } else if #[cfg(any(target_arch = "arm"))] { mod arm; pub use self::arm::*; + } else if #[cfg(any(target_arch = "powerpc"))] { + mod powerpc; + pub use self::powerpc::*; } else { // Unknown target_arch } diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/powerpc.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/powerpc.rs new file mode 100644 index 000000000000..50b6b57ef11a --- /dev/null +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/powerpc.rs @@ -0,0 +1,866 @@ +pub type c_char = u8; +pub type wchar_t = i32; + +s! { + pub struct stat { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + __st_rdev_padding: ::c_short, + pub st_size: ::off_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + __unused: [::c_long; 2], + } + + pub struct stat64 { + pub st_dev: ::dev_t, + pub st_ino: ::ino_t, + pub st_mode: ::mode_t, + pub st_nlink: ::nlink_t, + pub st_uid: ::uid_t, + pub st_gid: ::gid_t, + pub st_rdev: ::dev_t, + __st_rdev_padding: ::c_short, + pub st_size: ::off_t, + pub st_blksize: ::blksize_t, + pub st_blocks: ::blkcnt_t, + pub st_atime: ::time_t, + pub st_atime_nsec: ::c_long, + pub st_mtime: ::time_t, + pub st_mtime_nsec: ::c_long, + pub st_ctime: ::time_t, + pub st_ctime_nsec: ::c_long, + __unused: [::c_long; 2], + } + + pub struct stack_t { + pub ss_sp: *mut ::c_void, + pub ss_flags: ::c_int, + pub ss_size: ::size_t + } + + pub struct ipc_perm { + pub __ipc_perm_key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_int, + __pad1: ::c_int, + __pad2: ::c_longlong, + __pad3: ::c_longlong + } + + pub struct shmid_ds { + pub shm_perm: ::ipc_perm, + __unused1: ::c_int, + pub shm_atime: ::time_t, + __unused2: ::c_int, + pub shm_dtime: ::time_t, + __unused3: ::c_int, + pub shm_ctime: ::time_t, + __unused4: ::c_int, + pub shm_segsz: ::size_t, + pub shm_cpid: ::pid_t, + pub shm_lpid: ::pid_t, + pub shm_nattch: ::c_ulong, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + + pub struct msqid_ds { + pub msg_perm: ::ipc_perm, + __unused1: ::c_int, + pub msg_stime: ::time_t, + __unused2: ::c_int, + pub msg_rtime: ::time_t, + __unused3: ::c_int, + pub msg_ctime: ::time_t, + __msg_cbytes: ::c_ulong, + pub msg_qnum: ::msgqnum_t, + pub msg_qbytes: ::msglen_t, + pub msg_lspid: ::pid_t, + pub msg_lrpid: ::pid_t, + __pad1: ::c_ulong, + __pad2: ::c_ulong, + } + + pub struct statfs { + pub f_type: ::c_ulong, + pub f_bsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_fsid: ::fsid_t, + pub f_namelen: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_flags: ::c_ulong, + pub f_spare: [::c_ulong; 4], + } + + pub struct siginfo_t { + pub si_signo: ::c_int, + pub si_errno: ::c_int, + pub si_code: ::c_int, + pub _pad: [::c_int; 29], + _align: [usize; 0], + } + + pub struct statfs64 { + pub f_type: ::c_ulong, + pub f_bsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_fsid: ::fsid_t, + pub f_namelen: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_flags: ::c_ulong, + pub f_spare: [::c_ulong; 4], + } + + pub struct statvfs64 { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: u64, + pub f_bfree: u64, + pub f_bavail: u64, + pub f_files: u64, + pub f_ffree: u64, + pub f_favail: u64, + #[cfg(target_endian = "little")] + pub f_fsid: ::c_ulong, + __f_unused: ::c_int, + #[cfg(target_endian = "big")] + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + + pub struct termios2 { + pub c_iflag: ::tcflag_t, + pub c_oflag: ::tcflag_t, + pub c_cflag: ::tcflag_t, + pub c_lflag: ::tcflag_t, + pub c_cc: [::cc_t; 19], + pub c_line: ::cc_t, + pub c_ispeed: ::speed_t, + pub c_ospeed: ::speed_t, + } +} + +pub const SIGSTKSZ: ::size_t = 10240; +pub const MINSIGSTKSZ: ::size_t = 4096; + +pub const O_DIRECT: ::c_int = 0x20000; +pub const O_DIRECTORY: ::c_int = 0x4000; +pub const O_NOFOLLOW: ::c_int = 0x8000; +pub const O_ASYNC: ::c_int = 0x2000; +pub const O_LARGEFILE: ::c_int = 0x10000; + +pub const FIOCLEX: ::c_int = 0x20006601; +pub const FIONBIO: ::c_int = 0x8004667E; + +pub const RLIMIT_RSS: ::c_int = 5; +pub const RLIMIT_NOFILE: ::c_int = 7; +pub const RLIMIT_AS: ::c_int = 9; +pub const RLIMIT_NPROC: ::c_int = 6; +pub const RLIMIT_MEMLOCK: ::c_int = 8; +pub const RLIMIT_NLIMITS: ::c_int = 15; + +pub const MCL_CURRENT: ::c_int = 0x2000; +pub const MCL_FUTURE: ::c_int = 0x4000; +pub const CBAUD: ::tcflag_t = 0o0000377; +pub const TAB1: ::c_int = 0x00000400; +pub const TAB2: ::c_int = 0x00000800; +pub const TAB3: ::c_int = 0x00000C00; +pub const CR1: ::c_int = 0x00001000; +pub const CR2: ::c_int = 0x00002000; +pub const CR3: ::c_int = 0x00003000; +pub const FF1: ::c_int = 0x00004000; +pub const BS1: ::c_int = 0x00008000; +pub const VT1: ::c_int = 0x00010000; +pub const VWERASE: usize = 10; +pub const VREPRINT: usize = 11; +pub const VSUSP: usize = 12; +pub const VSTART: usize = 13; +pub const VSTOP: usize = 14; +pub const VDISCARD: usize = 16; +pub const VTIME: usize = 7; +pub const IXON: ::tcflag_t = 0x00000200; +pub const IXOFF: ::tcflag_t = 0x00000400; +pub const ONLCR: ::tcflag_t = 0x00000002; +pub const CSIZE: ::tcflag_t = 0x00000300; +pub const CS6: ::tcflag_t = 0x00000100; +pub const CS7: ::tcflag_t = 0x00000200; +pub const CS8: ::tcflag_t = 0x00000300; +pub const CSTOPB: ::tcflag_t = 0x00000400; +pub const CREAD: ::tcflag_t = 0x00000800; +pub const PARENB: ::tcflag_t = 0x00001000; +pub const PARODD: ::tcflag_t = 0x00002000; +pub const HUPCL: ::tcflag_t = 0x00004000; +pub const CLOCAL: ::tcflag_t = 0x00008000; +pub const ECHOKE: ::tcflag_t = 0x00000001; +pub const ECHOE: ::tcflag_t = 0x00000002; +pub const ECHOK: ::tcflag_t = 0x00000004; +pub const ECHONL: ::tcflag_t = 0x00000010; +pub const ECHOPRT: ::tcflag_t = 0x00000020; +pub const ECHOCTL: ::tcflag_t = 0x00000040; +pub const ISIG: ::tcflag_t = 0x00000080; +pub const ICANON: ::tcflag_t = 0x00000100; +pub const PENDIN: ::tcflag_t = 0x20000000; +pub const NOFLSH: ::tcflag_t = 0x80000000; +pub const CIBAUD: ::tcflag_t = 0o00077600000; +pub const CBAUDEX: ::tcflag_t = 0o000020; +pub const VSWTC: usize = 9; +pub const OLCUC: ::tcflag_t = 0o000004; +pub const NLDLY: ::tcflag_t = 0o001400; +pub const CRDLY: ::tcflag_t = 0o030000; +pub const TABDLY: ::tcflag_t = 0o006000; +pub const BSDLY: ::tcflag_t = 0o100000; +pub const FFDLY: ::tcflag_t = 0o040000; +pub const VTDLY: ::tcflag_t = 0o200000; +pub const XTABS: ::tcflag_t = 0o006000; +pub const B57600: ::speed_t = 0o000020; +pub const B115200: ::speed_t = 0o000021; +pub const B230400: ::speed_t = 0o000022; +pub const B460800: ::speed_t = 0o000023; +pub const B500000: ::speed_t = 0o000024; +pub const B576000: ::speed_t = 0o000025; +pub const B921600: ::speed_t = 0o000026; +pub const B1000000: ::speed_t = 0o000027; +pub const B1152000: ::speed_t = 0o000030; +pub const B1500000: ::speed_t = 0o000031; +pub const B2000000: ::speed_t = 0o000032; +pub const B2500000: ::speed_t = 0o000033; +pub const B3000000: ::speed_t = 0o000034; +pub const B3500000: ::speed_t = 0o000035; +pub const B4000000: ::speed_t = 0o000036; + +pub const O_APPEND: ::c_int = 1024; +pub const O_CREAT: ::c_int = 64; +pub const O_EXCL: ::c_int = 128; +pub const O_NOCTTY: ::c_int = 256; +pub const O_NONBLOCK: ::c_int = 2048; +pub const O_SYNC: ::c_int = 1052672; +pub const O_RSYNC: ::c_int = 1052672; +pub const O_DSYNC: ::c_int = 4096; + +pub const SOCK_NONBLOCK: ::c_int = 2048; + +pub const MAP_ANON: ::c_int = 0x0020; +pub const MAP_GROWSDOWN: ::c_int = 0x0100; +pub const MAP_DENYWRITE: ::c_int = 0x0800; +pub const MAP_EXECUTABLE: ::c_int = 0x01000; +pub const MAP_LOCKED: ::c_int = 0x00080; +pub const MAP_NORESERVE: ::c_int = 0x00040; +pub const MAP_POPULATE: ::c_int = 0x08000; +pub const MAP_NONBLOCK: ::c_int = 0x010000; +pub const MAP_STACK: ::c_int = 0x020000; + +pub const SOCK_STREAM: ::c_int = 1; +pub const SOCK_DGRAM: ::c_int = 2; +pub const SOCK_SEQPACKET: ::c_int = 5; + +pub const SOL_SOCKET: ::c_int = 1; + +pub const EDEADLK: ::c_int = 35; +pub const ENAMETOOLONG: ::c_int = 36; +pub const ENOLCK: ::c_int = 37; +pub const ENOSYS: ::c_int = 38; +pub const ENOTEMPTY: ::c_int = 39; +pub const ELOOP: ::c_int = 40; +pub const ENOMSG: ::c_int = 42; +pub const EIDRM: ::c_int = 43; +pub const ECHRNG: ::c_int = 44; +pub const EL2NSYNC: ::c_int = 45; +pub const EL3HLT: ::c_int = 46; +pub const EL3RST: ::c_int = 47; +pub const ELNRNG: ::c_int = 48; +pub const EUNATCH: ::c_int = 49; +pub const ENOCSI: ::c_int = 50; +pub const EL2HLT: ::c_int = 51; +pub const EBADE: ::c_int = 52; +pub const EBADR: ::c_int = 53; +pub const EXFULL: ::c_int = 54; +pub const ENOANO: ::c_int = 55; +pub const EBADRQC: ::c_int = 56; +pub const EBADSLT: ::c_int = 57; +pub const EDEADLOCK: ::c_int = 58; +pub const EMULTIHOP: ::c_int = 72; +pub const EBADMSG: ::c_int = 74; +pub const EOVERFLOW: ::c_int = 75; +pub const ENOTUNIQ: ::c_int = 76; +pub const EBADFD: ::c_int = 77; +pub const EREMCHG: ::c_int = 78; +pub const ELIBACC: ::c_int = 79; +pub const ELIBBAD: ::c_int = 80; +pub const ELIBSCN: ::c_int = 81; +pub const ELIBMAX: ::c_int = 82; +pub const ELIBEXEC: ::c_int = 83; +pub const EILSEQ: ::c_int = 84; +pub const ERESTART: ::c_int = 85; +pub const ESTRPIPE: ::c_int = 86; +pub const EUSERS: ::c_int = 87; +pub const ENOTSOCK: ::c_int = 88; +pub const EDESTADDRREQ: ::c_int = 89; +pub const EMSGSIZE: ::c_int = 90; +pub const EPROTOTYPE: ::c_int = 91; +pub const ENOPROTOOPT: ::c_int = 92; +pub const EPROTONOSUPPORT: ::c_int = 93; +pub const ESOCKTNOSUPPORT: ::c_int = 94; +pub const EOPNOTSUPP: ::c_int = 95; +pub const ENOTSUP: ::c_int = EOPNOTSUPP; +pub const EPFNOSUPPORT: ::c_int = 96; +pub const EAFNOSUPPORT: ::c_int = 97; +pub const EADDRINUSE: ::c_int = 98; +pub const EADDRNOTAVAIL: ::c_int = 99; +pub const ENETDOWN: ::c_int = 100; +pub const ENETUNREACH: ::c_int = 101; +pub const ENETRESET: ::c_int = 102; +pub const ECONNABORTED: ::c_int = 103; +pub const ECONNRESET: ::c_int = 104; +pub const ENOBUFS: ::c_int = 105; +pub const EISCONN: ::c_int = 106; +pub const ENOTCONN: ::c_int = 107; +pub const ESHUTDOWN: ::c_int = 108; +pub const ETOOMANYREFS: ::c_int = 109; +pub const ETIMEDOUT: ::c_int = 110; +pub const ECONNREFUSED: ::c_int = 111; +pub const EHOSTDOWN: ::c_int = 112; +pub const EHOSTUNREACH: ::c_int = 113; +pub const EALREADY: ::c_int = 114; +pub const EINPROGRESS: ::c_int = 115; +pub const ESTALE: ::c_int = 116; +pub const EUCLEAN: ::c_int = 117; +pub const ENOTNAM: ::c_int = 118; +pub const ENAVAIL: ::c_int = 119; +pub const EISNAM: ::c_int = 120; +pub const EREMOTEIO: ::c_int = 121; +pub const EDQUOT: ::c_int = 122; +pub const ENOMEDIUM: ::c_int = 123; +pub const EMEDIUMTYPE: ::c_int = 124; +pub const ECANCELED: ::c_int = 125; +pub const ENOKEY: ::c_int = 126; +pub const EKEYEXPIRED: ::c_int = 127; +pub const EKEYREVOKED: ::c_int = 128; +pub const EKEYREJECTED: ::c_int = 129; +pub const EOWNERDEAD: ::c_int = 130; +pub const ENOTRECOVERABLE: ::c_int = 131; +pub const ERFKILL: ::c_int = 132; +pub const EHWPOISON: ::c_int = 133; + +pub const SO_REUSEADDR: ::c_int = 2; +pub const SO_TYPE: ::c_int = 3; +pub const SO_ERROR: ::c_int = 4; +pub const SO_DONTROUTE: ::c_int = 5; +pub const SO_BROADCAST: ::c_int = 6; +pub const SO_SNDBUF: ::c_int = 7; +pub const SO_RCVBUF: ::c_int = 8; +pub const SO_KEEPALIVE: ::c_int = 9; +pub const SO_OOBINLINE: ::c_int = 10; +pub const SO_NO_CHECK: ::c_int = 11; +pub const SO_PRIORITY: ::c_int = 12; +pub const SO_LINGER: ::c_int = 13; +pub const SO_BSDCOMPAT: ::c_int = 14; +pub const SO_REUSEPORT: ::c_int = 15; +pub const SO_RCVLOWAT: ::c_int = 16; +pub const SO_SNDLOWAT: ::c_int = 17; +pub const SO_RCVTIMEO: ::c_int = 18; +pub const SO_SNDTIMEO: ::c_int = 19; +pub const SO_PASSCRED: ::c_int = 20; +pub const SO_PEERCRED: ::c_int = 21; +pub const SO_ACCEPTCONN: ::c_int = 30; +pub const SO_SNDBUFFORCE: ::c_int = 32; +pub const SO_RCVBUFFORCE: ::c_int = 33; +pub const SO_PROTOCOL: ::c_int = 38; +pub const SO_DOMAIN: ::c_int = 39; + +pub const SA_ONSTACK: ::c_int = 0x08000000; +pub const SA_SIGINFO: ::c_int = 0x00000004; +pub const SA_NOCLDWAIT: ::c_int = 0x00000002; + +pub const SIGCHLD: ::c_int = 17; +pub const SIGBUS: ::c_int = 7; +pub const SIGTTIN: ::c_int = 21; +pub const SIGTTOU: ::c_int = 22; +pub const SIGXCPU: ::c_int = 24; +pub const SIGXFSZ: ::c_int = 25; +pub const SIGVTALRM: ::c_int = 26; +pub const SIGPROF: ::c_int = 27; +pub const SIGWINCH: ::c_int = 28; +pub const SIGUSR1: ::c_int = 10; +pub const SIGUSR2: ::c_int = 12; +pub const SIGCONT: ::c_int = 18; +pub const SIGSTOP: ::c_int = 19; +pub const SIGTSTP: ::c_int = 20; +pub const SIGURG: ::c_int = 23; +pub const SIGIO: ::c_int = 29; +pub const SIGSYS: ::c_int = 31; +pub const SIGSTKFLT: ::c_int = 16; +pub const SIGPOLL: ::c_int = 29; +pub const SIGPWR: ::c_int = 30; +pub const SIG_SETMASK: ::c_int = 2; +pub const SIG_BLOCK: ::c_int = 0x000000; +pub const SIG_UNBLOCK: ::c_int = 0x01; + +pub const EXTPROC: ::tcflag_t = 0x10000000; + +pub const MAP_HUGETLB: ::c_int = 0x040000; + +pub const F_GETLK: ::c_int = 12; +pub const F_GETOWN: ::c_int = 9; +pub const F_SETLK: ::c_int = 13; +pub const F_SETLKW: ::c_int = 14; +pub const F_SETOWN: ::c_int = 8; + +pub const VEOF: usize = 4; +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: ::tcflag_t = 0x00000400; +pub const TOSTOP: ::tcflag_t = 0x00400000; +pub const FLUSHO: ::tcflag_t = 0x00800000; + +pub const TCGETS: ::c_int = 0x402C7413; +pub const TCSETS: ::c_int = 0x802C7414; +pub const TCSETSW: ::c_int = 0x802C7415; +pub const TCSETSF: ::c_int = 0x802C7416; +pub const TCGETA: ::c_int = 0x40147417; +pub const TCSETA: ::c_int = 0x80147418; +pub const TCSETAW: ::c_int = 0x80147419; +pub const TCSETAF: ::c_int = 0x8014741C; +pub const TCSBRK: ::c_int = 0x2000741D; +pub const TCXONC: ::c_int = 0x2000741E; +pub const TCFLSH: ::c_int = 0x2000741F; +pub const TIOCGSOFTCAR: ::c_int = 0x5419; +pub const TIOCSSOFTCAR: ::c_int = 0x541A; +pub const TIOCLINUX: ::c_int = 0x541C; +pub const TIOCGSERIAL: ::c_int = 0x541E; +pub const TIOCEXCL: ::c_int = 0x540C; +pub const TIOCNXCL: ::c_int = 0x540D; +pub const TIOCSCTTY: ::c_int = 0x540E; +pub const TIOCGPGRP: ::c_int = 0x40047477; +pub const TIOCSPGRP: ::c_int = 0x80047476; +pub const TIOCOUTQ: ::c_int = 0x40047473; +pub const TIOCSTI: ::c_int = 0x5412; +pub const TIOCGWINSZ: ::c_int = 0x40087468; +pub const TIOCSWINSZ: ::c_int = 0x80087467; +pub const TIOCMGET: ::c_int = 0x5415; +pub const TIOCMBIS: ::c_int = 0x5416; +pub const TIOCMBIC: ::c_int = 0x5417; +pub const TIOCMSET: ::c_int = 0x5418; +pub const FIONREAD: ::c_int = 0x4004667F; +pub const TIOCCONS: ::c_int = 0x541D; + +pub const POLLWRNORM: ::c_short = 0x100; +pub const POLLWRBAND: ::c_short = 0x200; + +pub const TIOCM_LE: ::c_int = 0x001; +pub const TIOCM_DTR: ::c_int = 0x002; +pub const TIOCM_RTS: ::c_int = 0x004; +pub const TIOCM_ST: ::c_int = 0x008; +pub const TIOCM_SR: ::c_int = 0x010; +pub const TIOCM_CTS: ::c_int = 0x020; +pub const TIOCM_CAR: ::c_int = 0x040; +pub const TIOCM_RNG: ::c_int = 0x080; +pub const TIOCM_DSR: ::c_int = 0x100; +pub const TIOCM_CD: ::c_int = TIOCM_CAR; +pub const TIOCM_RI: ::c_int = TIOCM_RNG; + +// Syscall table +pub const SYS_restart_syscall: ::c_long = 0; +pub const SYS_exit: ::c_long = 1; +pub const SYS_fork: ::c_long = 2; +pub const SYS_read: ::c_long = 3; +pub const SYS_write: ::c_long = 4; +pub const SYS_open: ::c_long = 5; +pub const SYS_close: ::c_long = 6; +pub const SYS_waitpid: ::c_long = 7; +pub const SYS_creat: ::c_long = 8; +pub const SYS_link: ::c_long = 9; +pub const SYS_unlink: ::c_long = 10; +pub const SYS_execve: ::c_long = 11; +pub const SYS_chdir: ::c_long = 12; +pub const SYS_time: ::c_long = 13; +pub const SYS_mknod: ::c_long = 14; +pub const SYS_chmod: ::c_long = 15; +pub const SYS_lchown: ::c_long = 16; +pub const SYS_break: ::c_long = 17; +pub const SYS_oldstat: ::c_long = 18; +pub const SYS_lseek: ::c_long = 19; +pub const SYS_getpid: ::c_long = 20; +pub const SYS_mount: ::c_long = 21; +pub const SYS_umount: ::c_long = 22; +pub const SYS_setuid: ::c_long = 23; +pub const SYS_getuid: ::c_long = 24; +pub const SYS_stime: ::c_long = 25; +pub const SYS_ptrace: ::c_long = 26; +pub const SYS_alarm: ::c_long = 27; +pub const SYS_oldfstat: ::c_long = 28; +pub const SYS_pause: ::c_long = 29; +pub const SYS_utime: ::c_long = 30; +pub const SYS_stty: ::c_long = 31; +pub const SYS_gtty: ::c_long = 32; +pub const SYS_access: ::c_long = 33; +pub const SYS_nice: ::c_long = 34; +pub const SYS_ftime: ::c_long = 35; +pub const SYS_sync: ::c_long = 36; +pub const SYS_kill: ::c_long = 37; +pub const SYS_rename: ::c_long = 38; +pub const SYS_mkdir: ::c_long = 39; +pub const SYS_rmdir: ::c_long = 40; +pub const SYS_dup: ::c_long = 41; +pub const SYS_pipe: ::c_long = 42; +pub const SYS_times: ::c_long = 43; +pub const SYS_prof: ::c_long = 44; +pub const SYS_brk: ::c_long = 45; +pub const SYS_setgid: ::c_long = 46; +pub const SYS_getgid: ::c_long = 47; +pub const SYS_signal: ::c_long = 48; +pub const SYS_geteuid: ::c_long = 49; +pub const SYS_getegid: ::c_long = 50; +pub const SYS_acct: ::c_long = 51; +pub const SYS_umount2: ::c_long = 52; +pub const SYS_lock: ::c_long = 53; +pub const SYS_ioctl: ::c_long = 54; +pub const SYS_fcntl: ::c_long = 55; +pub const SYS_mpx: ::c_long = 56; +pub const SYS_setpgid: ::c_long = 57; +pub const SYS_ulimit: ::c_long = 58; +pub const SYS_oldolduname: ::c_long = 59; +pub const SYS_umask: ::c_long = 60; +pub const SYS_chroot: ::c_long = 61; +pub const SYS_ustat: ::c_long = 62; +pub const SYS_dup2: ::c_long = 63; +pub const SYS_getppid: ::c_long = 64; +pub const SYS_getpgrp: ::c_long = 65; +pub const SYS_setsid: ::c_long = 66; +pub const SYS_sigaction: ::c_long = 67; +pub const SYS_sgetmask: ::c_long = 68; +pub const SYS_ssetmask: ::c_long = 69; +pub const SYS_setreuid: ::c_long = 70; +pub const SYS_setregid: ::c_long = 71; +pub const SYS_sigsuspend: ::c_long = 72; +pub const SYS_sigpending: ::c_long = 73; +pub const SYS_sethostname: ::c_long = 74; +pub const SYS_setrlimit: ::c_long = 75; +pub const SYS_getrlimit: ::c_long = 76; +pub const SYS_getrusage: ::c_long = 77; +pub const SYS_gettimeofday: ::c_long = 78; +pub const SYS_settimeofday: ::c_long = 79; +pub const SYS_getgroups: ::c_long = 80; +pub const SYS_setgroups: ::c_long = 81; +pub const SYS_select: ::c_long = 82; +pub const SYS_symlink: ::c_long = 83; +pub const SYS_oldlstat: ::c_long = 84; +pub const SYS_readlink: ::c_long = 85; +pub const SYS_uselib: ::c_long = 86; +pub const SYS_swapon: ::c_long = 87; +pub const SYS_reboot: ::c_long = 88; +pub const SYS_readdir: ::c_long = 89; +pub const SYS_mmap: ::c_long = 90; +pub const SYS_munmap: ::c_long = 91; +pub const SYS_truncate: ::c_long = 92; +pub const SYS_ftruncate: ::c_long = 93; +pub const SYS_fchmod: ::c_long = 94; +pub const SYS_fchown: ::c_long = 95; +pub const SYS_getpriority: ::c_long = 96; +pub const SYS_setpriority: ::c_long = 97; +pub const SYS_profil: ::c_long = 98; +pub const SYS_statfs: ::c_long = 99; +pub const SYS_fstatfs: ::c_long = 100; +pub const SYS_ioperm: ::c_long = 101; +pub const SYS_socketcall: ::c_long = 102; +pub const SYS_syslog: ::c_long = 103; +pub const SYS_setitimer: ::c_long = 104; +pub const SYS_getitimer: ::c_long = 105; +pub const SYS_stat: ::c_long = 106; +pub const SYS_lstat: ::c_long = 107; +pub const SYS_fstat: ::c_long = 108; +pub const SYS_olduname: ::c_long = 109; +pub const SYS_iopl: ::c_long = 110; +pub const SYS_vhangup: ::c_long = 111; +pub const SYS_idle: ::c_long = 112; +pub const SYS_vm86: ::c_long = 113; +pub const SYS_wait4: ::c_long = 114; +pub const SYS_swapoff: ::c_long = 115; +pub const SYS_sysinfo: ::c_long = 116; +pub const SYS_ipc: ::c_long = 117; +pub const SYS_fsync: ::c_long = 118; +pub const SYS_sigreturn: ::c_long = 119; +pub const SYS_clone: ::c_long = 120; +pub const SYS_setdomainname: ::c_long = 121; +pub const SYS_uname: ::c_long = 122; +pub const SYS_modify_ldt: ::c_long = 123; +pub const SYS_adjtimex: ::c_long = 124; +pub const SYS_mprotect: ::c_long = 125; +pub const SYS_sigprocmask: ::c_long = 126; +pub const SYS_create_module: ::c_long = 127; +pub const SYS_init_module: ::c_long = 128; +pub const SYS_delete_module: ::c_long = 129; +pub const SYS_get_kernel_syms: ::c_long = 130; +pub const SYS_quotactl: ::c_long = 131; +pub const SYS_getpgid: ::c_long = 132; +pub const SYS_fchdir: ::c_long = 133; +pub const SYS_bdflush: ::c_long = 134; +pub const SYS_sysfs: ::c_long = 135; +pub const SYS_personality: ::c_long = 136; +pub const SYS_afs_syscall: ::c_long = 137; +pub const SYS_setfsuid: ::c_long = 138; +pub const SYS_setfsgid: ::c_long = 139; +pub const SYS__llseek: ::c_long = 140; +pub const SYS_getdents: ::c_long = 141; +pub const SYS__newselect: ::c_long = 142; +pub const SYS_flock: ::c_long = 143; +pub const SYS_msync: ::c_long = 144; +pub const SYS_readv: ::c_long = 145; +pub const SYS_writev: ::c_long = 146; +pub const SYS_getsid: ::c_long = 147; +pub const SYS_fdatasync: ::c_long = 148; +pub const SYS__sysctl: ::c_long = 149; +pub const SYS_mlock: ::c_long = 150; +pub const SYS_munlock: ::c_long = 151; +pub const SYS_mlockall: ::c_long = 152; +pub const SYS_munlockall: ::c_long = 153; +pub const SYS_sched_setparam: ::c_long = 154; +pub const SYS_sched_getparam: ::c_long = 155; +pub const SYS_sched_setscheduler: ::c_long = 156; +pub const SYS_sched_getscheduler: ::c_long = 157; +pub const SYS_sched_yield: ::c_long = 158; +pub const SYS_sched_get_priority_max: ::c_long = 159; +pub const SYS_sched_get_priority_min: ::c_long = 160; +pub const SYS_sched_rr_get_interval: ::c_long = 161; +pub const SYS_nanosleep: ::c_long = 162; +pub const SYS_mremap: ::c_long = 163; +pub const SYS_setresuid: ::c_long = 164; +pub const SYS_getresuid: ::c_long = 165; +pub const SYS_query_module: ::c_long = 166; +pub const SYS_poll: ::c_long = 167; +pub const SYS_nfsservctl: ::c_long = 168; +pub const SYS_setresgid: ::c_long = 169; +pub const SYS_getresgid: ::c_long = 170; +pub const SYS_prctl: ::c_long = 171; +pub const SYS_rt_sigreturn: ::c_long = 172; +pub const SYS_rt_sigaction: ::c_long = 173; +pub const SYS_rt_sigprocmask: ::c_long = 174; +pub const SYS_rt_sigpending: ::c_long = 175; +pub const SYS_rt_sigtimedwait: ::c_long = 176; +pub const SYS_rt_sigqueueinfo: ::c_long = 177; +pub const SYS_rt_sigsuspend: ::c_long = 178; +pub const SYS_pread64: ::c_long = 179; +pub const SYS_pwrite64: ::c_long = 180; +pub const SYS_chown: ::c_long = 181; +pub const SYS_getcwd: ::c_long = 182; +pub const SYS_capget: ::c_long = 183; +pub const SYS_capset: ::c_long = 184; +pub const SYS_sigaltstack: ::c_long = 185; +pub const SYS_sendfile: ::c_long = 186; +pub const SYS_getpmsg: ::c_long = 187; +pub const SYS_putpmsg: ::c_long = 188; +pub const SYS_vfork: ::c_long = 189; +pub const SYS_ugetrlimit: ::c_long = 190; +pub const SYS_readahead: ::c_long = 191; +pub const SYS_mmap2: ::c_long = 192; +pub const SYS_truncate64: ::c_long = 193; +pub const SYS_ftruncate64: ::c_long = 194; +pub const SYS_stat64: ::c_long = 195; +pub const SYS_lstat64: ::c_long = 196; +pub const SYS_fstat64: ::c_long = 197; +pub const SYS_pciconfig_read: ::c_long = 198; +pub const SYS_pciconfig_write: ::c_long = 199; +pub const SYS_pciconfig_iobase: ::c_long = 200; +pub const SYS_multiplexer: ::c_long = 201; +pub const SYS_getdents64: ::c_long = 202; +pub const SYS_pivot_root: ::c_long = 203; +pub const SYS_fcntl64: ::c_long = 204; +pub const SYS_madvise: ::c_long = 205; +pub const SYS_mincore: ::c_long = 206; +pub const SYS_gettid: ::c_long = 207; +pub const SYS_tkill: ::c_long = 208; +pub const SYS_setxattr: ::c_long = 209; +pub const SYS_lsetxattr: ::c_long = 210; +pub const SYS_fsetxattr: ::c_long = 211; +pub const SYS_getxattr: ::c_long = 212; +pub const SYS_lgetxattr: ::c_long = 213; +pub const SYS_fgetxattr: ::c_long = 214; +pub const SYS_listxattr: ::c_long = 215; +pub const SYS_llistxattr: ::c_long = 216; +pub const SYS_flistxattr: ::c_long = 217; +pub const SYS_removexattr: ::c_long = 218; +pub const SYS_lremovexattr: ::c_long = 219; +pub const SYS_fremovexattr: ::c_long = 220; +pub const SYS_futex: ::c_long = 221; +pub const SYS_sched_setaffinity: ::c_long = 222; +pub const SYS_sched_getaffinity: ::c_long = 223; +pub const SYS_tuxcall: ::c_long = 225; +pub const SYS_sendfile64: ::c_long = 226; +pub const SYS_io_setup: ::c_long = 227; +pub const SYS_io_destroy: ::c_long = 228; +pub const SYS_io_getevents: ::c_long = 229; +pub const SYS_io_submit: ::c_long = 230; +pub const SYS_io_cancel: ::c_long = 231; +pub const SYS_set_tid_address: ::c_long = 232; +pub const SYS_fadvise64: ::c_long = 233; +pub const SYS_exit_group: ::c_long = 234; +pub const SYS_lookup_dcookie: ::c_long = 235; +pub const SYS_epoll_create: ::c_long = 236; +pub const SYS_epoll_ctl: ::c_long = 237; +pub const SYS_epoll_wait: ::c_long = 238; +pub const SYS_remap_file_pages: ::c_long = 239; +pub const SYS_timer_create: ::c_long = 240; +pub const SYS_timer_settime: ::c_long = 241; +pub const SYS_timer_gettime: ::c_long = 242; +pub const SYS_timer_getoverrun: ::c_long = 243; +pub const SYS_timer_delete: ::c_long = 244; +pub const SYS_clock_settime: ::c_long = 245; +pub const SYS_clock_gettime: ::c_long = 246; +pub const SYS_clock_getres: ::c_long = 247; +pub const SYS_clock_nanosleep: ::c_long = 248; +pub const SYS_swapcontext: ::c_long = 249; +pub const SYS_tgkill: ::c_long = 250; +pub const SYS_utimes: ::c_long = 251; +pub const SYS_statfs64: ::c_long = 252; +pub const SYS_fstatfs64: ::c_long = 253; +pub const SYS_fadvise64_64: ::c_long = 254; +pub const SYS_rtas: ::c_long = 255; +pub const SYS_sys_debug_setcontext: ::c_long = 256; +pub const SYS_migrate_pages: ::c_long = 258; +pub const SYS_mbind: ::c_long = 259; +pub const SYS_get_mempolicy: ::c_long = 260; +pub const SYS_set_mempolicy: ::c_long = 261; +pub const SYS_mq_open: ::c_long = 262; +pub const SYS_mq_unlink: ::c_long = 263; +pub const SYS_mq_timedsend: ::c_long = 264; +pub const SYS_mq_timedreceive: ::c_long = 265; +pub const SYS_mq_notify: ::c_long = 266; +pub const SYS_mq_getsetattr: ::c_long = 267; +pub const SYS_kexec_load: ::c_long = 268; +pub const SYS_add_key: ::c_long = 269; +pub const SYS_request_key: ::c_long = 270; +pub const SYS_keyctl: ::c_long = 271; +pub const SYS_waitid: ::c_long = 272; +pub const SYS_ioprio_set: ::c_long = 273; +pub const SYS_ioprio_get: ::c_long = 274; +pub const SYS_inotify_init: ::c_long = 275; +pub const SYS_inotify_add_watch: ::c_long = 276; +pub const SYS_inotify_rm_watch: ::c_long = 277; +pub const SYS_spu_run: ::c_long = 278; +pub const SYS_spu_create: ::c_long = 279; +pub const SYS_pselect6: ::c_long = 280; +pub const SYS_ppoll: ::c_long = 281; +pub const SYS_unshare: ::c_long = 282; +pub const SYS_splice: ::c_long = 283; +pub const SYS_tee: ::c_long = 284; +pub const SYS_vmsplice: ::c_long = 285; +pub const SYS_openat: ::c_long = 286; +pub const SYS_mkdirat: ::c_long = 287; +pub const SYS_mknodat: ::c_long = 288; +pub const SYS_fchownat: ::c_long = 289; +pub const SYS_futimesat: ::c_long = 290; +pub const SYS_fstatat64: ::c_long = 291; +pub const SYS_unlinkat: ::c_long = 292; +pub const SYS_renameat: ::c_long = 293; +pub const SYS_linkat: ::c_long = 294; +pub const SYS_symlinkat: ::c_long = 295; +pub const SYS_readlinkat: ::c_long = 296; +pub const SYS_fchmodat: ::c_long = 297; +pub const SYS_faccessat: ::c_long = 298; +pub const SYS_get_robust_list: ::c_long = 299; +pub const SYS_set_robust_list: ::c_long = 300; +pub const SYS_move_pages: ::c_long = 301; +pub const SYS_getcpu: ::c_long = 302; +pub const SYS_epoll_pwait: ::c_long = 303; +pub const SYS_utimensat: ::c_long = 304; +pub const SYS_signalfd: ::c_long = 305; +pub const SYS_timerfd_create: ::c_long = 306; +pub const SYS_eventfd: ::c_long = 307; +pub const SYS_sync_file_range2: ::c_long = 308; +pub const SYS_fallocate: ::c_long = 309; +pub const SYS_subpage_prot: ::c_long = 310; +pub const SYS_timerfd_settime: ::c_long = 311; +pub const SYS_timerfd_gettime: ::c_long = 312; +pub const SYS_signalfd4: ::c_long = 313; +pub const SYS_eventfd2: ::c_long = 314; +pub const SYS_epoll_create1: ::c_long = 315; +pub const SYS_dup3: ::c_long = 316; +pub const SYS_pipe2: ::c_long = 317; +pub const SYS_inotify_init1: ::c_long = 318; +pub const SYS_perf_event_open: ::c_long = 319; +pub const SYS_preadv: ::c_long = 320; +pub const SYS_pwritev: ::c_long = 321; +pub const SYS_rt_tgsigqueueinfo: ::c_long = 322; +pub const SYS_fanotify_init: ::c_long = 323; +pub const SYS_fanotify_mark: ::c_long = 324; +pub const SYS_prlimit64: ::c_long = 325; +pub const SYS_socket: ::c_long = 326; +pub const SYS_bind: ::c_long = 327; +pub const SYS_connect: ::c_long = 328; +pub const SYS_listen: ::c_long = 329; +pub const SYS_accept: ::c_long = 330; +pub const SYS_getsockname: ::c_long = 331; +pub const SYS_getpeername: ::c_long = 332; +pub const SYS_socketpair: ::c_long = 333; +pub const SYS_send: ::c_long = 334; +pub const SYS_sendto: ::c_long = 335; +pub const SYS_recv: ::c_long = 336; +pub const SYS_recvfrom: ::c_long = 337; +pub const SYS_shutdown: ::c_long = 338; +pub const SYS_setsockopt: ::c_long = 339; +pub const SYS_getsockopt: ::c_long = 340; +pub const SYS_sendmsg: ::c_long = 341; +pub const SYS_recvmsg: ::c_long = 342; +pub const SYS_recvmmsg: ::c_long = 343; +pub const SYS_accept4: ::c_long = 344; +pub const SYS_name_to_handle_at: ::c_long = 345; +pub const SYS_open_by_handle_at: ::c_long = 346; +pub const SYS_clock_adjtime: ::c_long = 347; +pub const SYS_syncfs: ::c_long = 348; +pub const SYS_sendmmsg: ::c_long = 349; +pub const SYS_setns: ::c_long = 350; +pub const SYS_process_vm_readv: ::c_long = 351; +pub const SYS_process_vm_writev: ::c_long = 352; +pub const SYS_finit_module: ::c_long = 353; +pub const SYS_kcmp: ::c_long = 354; +pub const SYS_sched_setattr: ::c_long = 355; +pub const SYS_sched_getattr: ::c_long = 356; +pub const SYS_renameat2: ::c_long = 357; +pub const SYS_seccomp: ::c_long = 358; +pub const SYS_getrandom: ::c_long = 359; +pub const SYS_memfd_create: ::c_long = 360; +pub const SYS_bpf: ::c_long = 361; +pub const SYS_execveat: ::c_long = 362; +pub const SYS_switch_endian: ::c_long = 363; +pub const SYS_userfaultfd: ::c_long = 364; +pub const SYS_membarrier: ::c_long = 365; +pub const SYS_mlock2: ::c_long = 378; +pub const SYS_copy_file_range: ::c_long = 379; +pub const SYS_preadv2: ::c_long = 380; +pub const SYS_pwritev2: ::c_long = 381; +pub const SYS_kexec_file_load: ::c_long = 382; +pub const SYS_statx: ::c_long = 383; +pub const SYS_pkey_alloc: ::c_long = 384; +pub const SYS_pkey_free: ::c_long = 385; +pub const SYS_pkey_mprotect: ::c_long = 386; + +#[doc(hidden)] +pub const AF_MAX: ::c_int = 43; +#[doc(hidden)] +pub const PF_MAX: ::c_int = AF_MAX; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/x86.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/x86.rs index fa570248c729..9f704c7fac9c 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/x86.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b32/x86.rs @@ -52,6 +52,18 @@ s! { pub ss_size: ::size_t } + pub struct ipc_perm { + pub __ipc_perm_key: ::key_t, + pub uid: ::uid_t, + pub gid: ::gid_t, + pub cuid: ::uid_t, + pub cgid: ::gid_t, + pub mode: ::mode_t, + pub __seq: ::c_int, + __unused1: ::c_long, + __unused2: ::c_long + } + pub struct shmid_ds { pub shm_perm: ::ipc_perm, pub shm_segsz: ::size_t, @@ -164,6 +176,9 @@ s! { } } +pub const SIGSTKSZ: ::size_t = 8192; +pub const MINSIGSTKSZ: ::size_t = 2048; + pub const O_DIRECT: ::c_int = 0x4000; pub const O_DIRECTORY: ::c_int = 0x10000; pub const O_NOFOLLOW: ::c_int = 0x20000; @@ -178,6 +193,76 @@ pub const RLIMIT_NOFILE: ::c_int = 7; pub const RLIMIT_AS: ::c_int = 9; pub const RLIMIT_NPROC: ::c_int = 6; pub const RLIMIT_MEMLOCK: ::c_int = 8; +pub const RLIMIT_NLIMITS: ::c_int = 16; + +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; +pub const CIBAUD: ::tcflag_t = 0o02003600000; +pub const CBAUDEX: ::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const XTABS: ::tcflag_t = 0o014000; +pub const B57600: ::speed_t = 0o010001; +pub const B115200: ::speed_t = 0o010002; +pub const B230400: ::speed_t = 0o010003; +pub const B460800: ::speed_t = 0o010004; +pub const B500000: ::speed_t = 0o010005; +pub const B576000: ::speed_t = 0o010006; +pub const B921600: ::speed_t = 0o010007; +pub const B1000000: ::speed_t = 0o010010; +pub const B1152000: ::speed_t = 0o010011; +pub const B1500000: ::speed_t = 0o010012; +pub const B2000000: ::speed_t = 0o010013; +pub const B2500000: ::speed_t = 0o010014; +pub const B3000000: ::speed_t = 0o010015; +pub const B3500000: ::speed_t = 0o010016; +pub const B4000000: ::speed_t = 0o010017; pub const O_APPEND: ::c_int = 1024; pub const O_CREAT: ::c_int = 64; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/aarch64.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/aarch64.rs index da0827a7750a..98c53dc4f2b1 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/aarch64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/aarch64.rs @@ -339,3 +339,138 @@ pub const SYS_pwritev2: ::c_long = 287; pub const SYS_pkey_mprotect: ::c_long = 288; pub const SYS_pkey_alloc: ::c_long = 289; pub const SYS_pkey_free: ::c_long = 290; + +pub const RLIMIT_NLIMITS: ::c_int = 16; +pub const TIOCINQ: ::c_int = ::FIONREAD; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; +pub const CIBAUD: ::tcflag_t = 0o02003600000; +pub const CBAUDEX: ::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const XTABS: ::tcflag_t = 0o014000; +pub const B57600: ::speed_t = 0o010001; +pub const B115200: ::speed_t = 0o010002; +pub const B230400: ::speed_t = 0o010003; +pub const B460800: ::speed_t = 0o010004; +pub const B500000: ::speed_t = 0o010005; +pub const B576000: ::speed_t = 0o010006; +pub const B921600: ::speed_t = 0o010007; +pub const B1000000: ::speed_t = 0o010010; +pub const B1152000: ::speed_t = 0o010011; +pub const B1500000: ::speed_t = 0o010012; +pub const B2000000: ::speed_t = 0o010013; +pub const B2500000: ::speed_t = 0o010014; +pub const B3000000: ::speed_t = 0o010015; +pub const B3500000: ::speed_t = 0o010016; +pub const B4000000: ::speed_t = 0o010017; + +pub const FIOCLEX: ::c_int = 0x5451; +pub const FIONBIO: ::c_int = 0x5421; +pub const EDEADLK: ::c_int = 35; +pub const EDEADLOCK: ::c_int = EDEADLK; +pub const SO_PASSCRED: ::c_int = 16; +pub const SO_PEERCRED: ::c_int = 17; +pub const SO_RCVLOWAT: ::c_int = 18; +pub const SO_SNDLOWAT: ::c_int = 19; +pub const SO_RCVTIMEO: ::c_int = 20; +pub const SO_SNDTIMEO: ::c_int = 21; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const TCGETS: ::c_int = 0x5401; +pub const TCSETS: ::c_int = 0x5402; +pub const TCSETSW: ::c_int = 0x5403; +pub const TCSETSF: ::c_int = 0x5404; +pub const TCGETA: ::c_int = 0x5405; +pub const TCSETA: ::c_int = 0x5406; +pub const TCSETAW: ::c_int = 0x5407; +pub const TCSETAF: ::c_int = 0x5408; +pub const TCSBRK: ::c_int = 0x5409; +pub const TCXONC: ::c_int = 0x540A; +pub const TCFLSH: ::c_int = 0x540B; +pub const TIOCGSOFTCAR: ::c_int = 0x5419; +pub const TIOCSSOFTCAR: ::c_int = 0x541A; +pub const TIOCLINUX: ::c_int = 0x541C; +pub const TIOCGSERIAL: ::c_int = 0x541E; +pub const TIOCEXCL: ::c_int = 0x540C; +pub const TIOCNXCL: ::c_int = 0x540D; +pub const TIOCSCTTY: ::c_int = 0x540E; +pub const TIOCGPGRP: ::c_int = 0x540F; +pub const TIOCSPGRP: ::c_int = 0x5410; +pub const TIOCOUTQ: ::c_int = 0x5411; +pub const TIOCSTI: ::c_int = 0x5412; +pub const TIOCGWINSZ: ::c_int = 0x5413; +pub const TIOCSWINSZ: ::c_int = 0x5414; +pub const TIOCMGET: ::c_int = 0x5415; +pub const TIOCMBIS: ::c_int = 0x5416; +pub const TIOCMBIC: ::c_int = 0x5417; +pub const TIOCMSET: ::c_int = 0x5418; +pub const FIONREAD: ::c_int = 0x541B; +pub const TIOCCONS: ::c_int = 0x541D; + +pub const TIOCM_LE: ::c_int = 0x001; +pub const TIOCM_DTR: ::c_int = 0x002; +pub const TIOCM_RTS: ::c_int = 0x004; +pub const TIOCM_ST: ::c_int = 0x008; +pub const TIOCM_SR: ::c_int = 0x010; +pub const TIOCM_CTS: ::c_int = 0x020; +pub const TIOCM_CAR: ::c_int = 0x040; +pub const TIOCM_RNG: ::c_int = 0x080; +pub const TIOCM_DSR: ::c_int = 0x100; +pub const TIOCM_CD: ::c_int = TIOCM_CAR; +pub const TIOCM_RI: ::c_int = TIOCM_RNG; + +extern { + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; +} diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/mod.rs index 70baf8277a9b..5c2e815fd608 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/mod.rs @@ -136,9 +136,6 @@ pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const O_ASYNC: ::c_int = 0x2000; -pub const FIOCLEX: ::c_int = 0x5451; -pub const FIONBIO: ::c_int = 0x5421; - pub const RLIMIT_RSS: ::c_int = 5; pub const RLIMIT_NOFILE: ::c_int = 7; pub const RLIMIT_AS: ::c_int = 9; @@ -172,7 +169,6 @@ pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOL_SOCKET: ::c_int = 1; -pub const EDEADLK: ::c_int = 35; pub const ENAMETOOLONG: ::c_int = 36; pub const ENOLCK: ::c_int = 37; pub const ENOSYS: ::c_int = 38; @@ -194,7 +190,6 @@ pub const EXFULL: ::c_int = 54; pub const ENOANO: ::c_int = 55; pub const EBADRQC: ::c_int = 56; pub const EBADSLT: ::c_int = 57; -pub const EDEADLOCK: ::c_int = EDEADLK; pub const EMULTIHOP: ::c_int = 72; pub const EBADMSG: ::c_int = 74; pub const EOVERFLOW: ::c_int = 75; @@ -272,12 +267,6 @@ pub const SO_PRIORITY: ::c_int = 12; pub const SO_LINGER: ::c_int = 13; pub const SO_BSDCOMPAT: ::c_int = 14; pub const SO_REUSEPORT: ::c_int = 15; -pub const SO_PASSCRED: ::c_int = 16; -pub const SO_PEERCRED: ::c_int = 17; -pub const SO_RCVLOWAT: ::c_int = 18; -pub const SO_SNDLOWAT: ::c_int = 19; -pub const SO_RCVTIMEO: ::c_int = 20; -pub const SO_SNDTIMEO: ::c_int = 21; pub const SO_ACCEPTCONN: ::c_int = 30; pub const SO_SNDBUFFORCE: ::c_int = 32; pub const SO_RCVBUFFORCE: ::c_int = 33; @@ -312,8 +301,6 @@ pub const SIG_SETMASK: ::c_int = 2; pub const SIG_BLOCK: ::c_int = 0x000000; pub const SIG_UNBLOCK: ::c_int = 0x01; -pub const EXTPROC: ::tcflag_t = 0x00010000; - pub const MAP_HUGETLB: ::c_int = 0x040000; pub const F_GETLK: ::c_int = 5; @@ -323,59 +310,10 @@ pub const F_SETLKW: ::c_int = 7; pub const F_SETOWN: ::c_int = 8; pub const VEOF: usize = 4; -pub const VEOL: usize = 11; -pub const VEOL2: usize = 16; -pub const VMIN: usize = 6; -pub const IEXTEN: ::tcflag_t = 0x00008000; -pub const TOSTOP: ::tcflag_t = 0x00000100; -pub const FLUSHO: ::tcflag_t = 0x00001000; - -pub const TCGETS: ::c_int = 0x5401; -pub const TCSETS: ::c_int = 0x5402; -pub const TCSETSW: ::c_int = 0x5403; -pub const TCSETSF: ::c_int = 0x5404; -pub const TCGETA: ::c_int = 0x5405; -pub const TCSETA: ::c_int = 0x5406; -pub const TCSETAW: ::c_int = 0x5407; -pub const TCSETAF: ::c_int = 0x5408; -pub const TCSBRK: ::c_int = 0x5409; -pub const TCXONC: ::c_int = 0x540A; -pub const TCFLSH: ::c_int = 0x540B; -pub const TIOCGSOFTCAR: ::c_int = 0x5419; -pub const TIOCSSOFTCAR: ::c_int = 0x541A; -pub const TIOCLINUX: ::c_int = 0x541C; -pub const TIOCGSERIAL: ::c_int = 0x541E; -pub const TIOCEXCL: ::c_int = 0x540C; -pub const TIOCNXCL: ::c_int = 0x540D; -pub const TIOCSCTTY: ::c_int = 0x540E; -pub const TIOCGPGRP: ::c_int = 0x540F; -pub const TIOCSPGRP: ::c_int = 0x5410; -pub const TIOCOUTQ: ::c_int = 0x5411; -pub const TIOCSTI: ::c_int = 0x5412; -pub const TIOCGWINSZ: ::c_int = 0x5413; -pub const TIOCSWINSZ: ::c_int = 0x5414; -pub const TIOCMGET: ::c_int = 0x5415; -pub const TIOCMBIS: ::c_int = 0x5416; -pub const TIOCMBIC: ::c_int = 0x5417; -pub const TIOCMSET: ::c_int = 0x5418; -pub const FIONREAD: ::c_int = 0x541B; -pub const TIOCCONS: ::c_int = 0x541D; pub const POLLWRNORM: ::c_short = 0x100; pub const POLLWRBAND: ::c_short = 0x200; -pub const TIOCM_LE: ::c_int = 0x001; -pub const TIOCM_DTR: ::c_int = 0x002; -pub const TIOCM_RTS: ::c_int = 0x004; -pub const TIOCM_ST: ::c_int = 0x008; -pub const TIOCM_SR: ::c_int = 0x010; -pub const TIOCM_CTS: ::c_int = 0x020; -pub const TIOCM_CAR: ::c_int = 0x040; -pub const TIOCM_RNG: ::c_int = 0x080; -pub const TIOCM_DSR: ::c_int = 0x100; -pub const TIOCM_CD: ::c_int = TIOCM_CAR; -pub const TIOCM_RI: ::c_int = TIOCM_RNG; - cfg_if! { if #[cfg(target_arch = "aarch64")] { mod aarch64; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/powerpc64.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/powerpc64.rs index 621f9f4ce798..2ac39bf0cd5d 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/powerpc64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/powerpc64.rs @@ -60,22 +60,17 @@ s! { } } -pub const SYS_pivot_root: ::c_long = 203; -pub const SYS_gettid: ::c_long = 207; -pub const SYS_perf_event_open: ::c_long = 319; -pub const SYS_memfd_create: ::c_long = 360; - pub const MAP_32BIT: ::c_int = 0x0040; -pub const O_DIRECT: ::c_int = 0x4000; -pub const O_DIRECTORY: ::c_int = 0x10000; -pub const O_LARGEFILE: ::c_int = 0; -pub const O_NOFOLLOW: ::c_int = 0x20000; +pub const O_DIRECT: ::c_int = 0x20000; +pub const O_DIRECTORY: ::c_int = 0x4000; +pub const O_LARGEFILE: ::c_int = 0x10000; +pub const O_NOFOLLOW: ::c_int = 0x8000; -pub const SIGSTKSZ: ::size_t = 8192; -pub const MINSIGSTKSZ: ::size_t = 2048; +pub const SIGSTKSZ: ::size_t = 10240; +pub const MINSIGSTKSZ: ::size_t = 4096; #[doc(hidden)] -pub const AF_MAX: ::c_int = 42; +pub const AF_MAX: ::c_int = 43; #[doc(hidden)] pub const PF_MAX: ::c_int = AF_MAX; @@ -439,3 +434,139 @@ pub const SYS_copy_file_range: ::c_long = 379; pub const SYS_preadv2: ::c_long = 380; pub const SYS_pwritev2: ::c_long = 381; pub const SYS_kexec_file_load: ::c_long = 382; + +pub const FIOCLEX: ::c_ulong = 0x20006601; +pub const FIONBIO: ::c_ulong = 0x8004667e; +pub const EDEADLK: ::c_int = 58; +pub const EDEADLOCK: ::c_int = EDEADLK; +pub const SO_PASSCRED: ::c_int = 20; +pub const SO_PEERCRED: ::c_int = 21; +pub const SO_RCVLOWAT: ::c_int = 16; +pub const SO_SNDLOWAT: ::c_int = 17; +pub const SO_RCVTIMEO: ::c_int = 18; +pub const SO_SNDTIMEO: ::c_int = 19; +pub const EXTPROC: ::tcflag_t = 0x10000000; +pub const VEOL: usize = 6; +pub const VEOL2: usize = 8; +pub const VMIN: usize = 5; +pub const IEXTEN: ::tcflag_t = 0x00000400; +pub const TOSTOP: ::tcflag_t = 0x00400000; +pub const FLUSHO: ::tcflag_t = 0x00800000; +pub const TCGETS: ::c_ulong = 0x403c7413; +pub const TCSETS: ::c_ulong = 0x803c7414; +pub const TCSETSW: ::c_ulong = 0x803c7415; +pub const TCSETSF: ::c_ulong = 0x803c7416; +pub const TCGETA: ::c_ulong = 0x40147417; +pub const TCSETA: ::c_ulong = 0x80147418; +pub const TCSETAW: ::c_ulong = 0x80147419; +pub const TCSETAF: ::c_ulong = 0x8014741c; +pub const TCSBRK: ::c_ulong = 0x2000741d; +pub const TCXONC: ::c_ulong = 0x2000741e; +pub const TCFLSH: ::c_ulong = 0x2000741f; +pub const TIOCGSOFTCAR: ::c_ulong = 0x5419; +pub const TIOCSSOFTCAR: ::c_ulong = 0x541A; +pub const TIOCLINUX: ::c_ulong = 0x541C; +pub const TIOCGSERIAL: ::c_ulong = 0x541E; +pub const TIOCEXCL: ::c_ulong = 0x540C; +pub const TIOCNXCL: ::c_ulong = 0x540D; +pub const TIOCSCTTY: ::c_ulong = 0x540E; +pub const TIOCGPGRP: ::c_ulong = 0x40047477; +pub const TIOCSPGRP: ::c_ulong = 0x80047476; +pub const TIOCOUTQ: ::c_ulong = 0x40047473; +pub const TIOCGWINSZ: ::c_ulong = 0x40087468; +pub const TIOCSWINSZ: ::c_ulong = 0x80087467; +pub const TIOCMGET: ::c_ulong = 0x5415; +pub const TIOCMBIS: ::c_ulong = 0x5416; +pub const TIOCMBIC: ::c_ulong = 0x5417; +pub const TIOCMSET: ::c_ulong = 0x5418; +pub const FIONREAD: ::c_ulong = 0x4004667f; +pub const TIOCCONS: ::c_ulong = 0x541D; +pub const TIOCM_LE: ::c_ulong = 0x001; +pub const TIOCM_DTR: ::c_ulong = 0x002; +pub const TIOCM_RTS: ::c_ulong = 0x004; +pub const TIOCM_ST: ::c_ulong = 0x008; +pub const TIOCM_SR: ::c_ulong = 0x010; +pub const TIOCM_CTS: ::c_ulong = 0x020; +pub const TIOCM_CAR: ::c_ulong = 0x040; +pub const TIOCM_RNG: ::c_ulong = 0x080; +pub const TIOCM_DSR: ::c_ulong = 0x100; +pub const TIOCM_CD: ::c_ulong = TIOCM_CAR; +pub const TIOCM_RI: ::c_ulong = TIOCM_RNG; + +pub const RLIMIT_NLIMITS: ::c_int = 15; +pub const TIOCINQ: ::c_ulong = ::FIONREAD; +pub const MCL_CURRENT: ::c_int = 0x2000; +pub const MCL_FUTURE: ::c_int = 0x4000; +pub const CBAUD: ::tcflag_t = 0xff; +pub const TAB1: ::c_int = 0x400; +pub const TAB2: ::c_int = 0x800; +pub const TAB3: ::c_int = 0xc00; +pub const CR1: ::c_int = 0x1000; +pub const CR2: ::c_int = 0x2000; +pub const CR3: ::c_int = 0x3000; +pub const FF1: ::c_int = 0x4000; +pub const BS1: ::c_int = 0x8000; +pub const VT1: ::c_int = 0x10000; +pub const VWERASE: usize = 10; +pub const VREPRINT: usize = 11; +pub const VSUSP: usize = 12; +pub const VSTART: usize = 13; +pub const VSTOP: usize = 14; +pub const VDISCARD: usize = 16; +pub const VTIME: usize = 7; +pub const IXON: ::tcflag_t = 0x00000200; +pub const IXOFF: ::tcflag_t = 0x00000400; +pub const ONLCR: ::tcflag_t = 0x2; +pub const CSIZE: ::tcflag_t = 0x00000300; + +pub const CS6: ::tcflag_t = 0x00000100; +pub const CS7: ::tcflag_t = 0x00000200; +pub const CS8: ::tcflag_t = 0x00000300; +pub const CSTOPB: ::tcflag_t = 0x00000400; +pub const CREAD: ::tcflag_t = 0x00000800; +pub const PARENB: ::tcflag_t = 0x00001000; +pub const PARODD: ::tcflag_t = 0x00002000; +pub const HUPCL: ::tcflag_t = 0x00004000; +pub const CLOCAL: ::tcflag_t = 0x00008000; +pub const ECHOKE: ::tcflag_t = 0x00000001; +pub const ECHOE: ::tcflag_t = 0x00000002; +pub const ECHOK: ::tcflag_t = 0x00000004; +pub const ECHONL: ::tcflag_t = 0x00000010; +pub const ECHOPRT: ::tcflag_t = 0x00000020; +pub const ECHOCTL: ::tcflag_t = 0x00000040; +pub const ISIG: ::tcflag_t = 0x00000080; +pub const ICANON: ::tcflag_t = 0x00000100; +pub const PENDIN: ::tcflag_t = 0x20000000; +pub const NOFLSH: ::tcflag_t = 0x80000000; + +pub const CIBAUD: ::tcflag_t = 0o77600000; +pub const CBAUDEX: ::tcflag_t = 0o0000020; +pub const VSWTC: usize = 9; +pub const OLCUC: ::tcflag_t = 0o000004; +pub const NLDLY: ::tcflag_t = 0o0001400; +pub const CRDLY: ::tcflag_t = 0o0030000; +pub const TABDLY: ::tcflag_t = 0o0006000; +pub const BSDLY: ::tcflag_t = 0o0100000; +pub const FFDLY: ::tcflag_t = 0o0040000; +pub const VTDLY: ::tcflag_t = 0o0200000; +pub const XTABS: ::tcflag_t = 0o00006000; + +pub const B57600: ::speed_t = 0o00020; +pub const B115200: ::speed_t = 0o00021; +pub const B230400: ::speed_t = 0o00022; +pub const B460800: ::speed_t = 0o00023; +pub const B500000: ::speed_t = 0o00024; +pub const B576000: ::speed_t = 0o00025; +pub const B921600: ::speed_t = 0o00026; +pub const B1000000: ::speed_t = 0o00027; +pub const B1152000: ::speed_t = 0o00030; +pub const B1500000: ::speed_t = 0o00031; +pub const B2000000: ::speed_t = 0o00032; +pub const B2500000: ::speed_t = 0o00033; +pub const B3000000: ::speed_t = 0o00034; +pub const B3500000: ::speed_t = 0o00035; +pub const B4000000: ::speed_t = 0o00036; + +extern { + pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int; +} diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/x86_64.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/x86_64.rs index 78d38e49e8f9..0e0fcec4d2e7 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/x86_64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/b64/x86_64.rs @@ -449,3 +449,139 @@ pub const MINSIGSTKSZ: ::size_t = 2048; pub const AF_MAX: ::c_int = 42; #[doc(hidden)] pub const PF_MAX: ::c_int = AF_MAX; + +pub const RLIMIT_NLIMITS: ::c_int = 16; +pub const TIOCINQ: ::c_int = ::FIONREAD; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; +pub const CBAUD: ::tcflag_t = 0o0010017; +pub const TAB1: ::c_int = 0x00000800; +pub const TAB2: ::c_int = 0x00001000; +pub const TAB3: ::c_int = 0x00001800; +pub const CR1: ::c_int = 0x00000200; +pub const CR2: ::c_int = 0x00000400; +pub const CR3: ::c_int = 0x00000600; +pub const FF1: ::c_int = 0x00008000; +pub const BS1: ::c_int = 0x00002000; +pub const VT1: ::c_int = 0x00004000; +pub const VWERASE: usize = 14; +pub const VREPRINT: usize = 12; +pub const VSUSP: usize = 10; +pub const VSTART: usize = 8; +pub const VSTOP: usize = 9; +pub const VDISCARD: usize = 13; +pub const VTIME: usize = 5; +pub const IXON: ::tcflag_t = 0x00000400; +pub const IXOFF: ::tcflag_t = 0x00001000; +pub const ONLCR: ::tcflag_t = 0x4; +pub const CSIZE: ::tcflag_t = 0x00000030; +pub const CS6: ::tcflag_t = 0x00000010; +pub const CS7: ::tcflag_t = 0x00000020; +pub const CS8: ::tcflag_t = 0x00000030; +pub const CSTOPB: ::tcflag_t = 0x00000040; +pub const CREAD: ::tcflag_t = 0x00000080; +pub const PARENB: ::tcflag_t = 0x00000100; +pub const PARODD: ::tcflag_t = 0x00000200; +pub const HUPCL: ::tcflag_t = 0x00000400; +pub const CLOCAL: ::tcflag_t = 0x00000800; +pub const ECHOKE: ::tcflag_t = 0x00000800; +pub const ECHOE: ::tcflag_t = 0x00000010; +pub const ECHOK: ::tcflag_t = 0x00000020; +pub const ECHONL: ::tcflag_t = 0x00000040; +pub const ECHOPRT: ::tcflag_t = 0x00000400; +pub const ECHOCTL: ::tcflag_t = 0x00000200; +pub const ISIG: ::tcflag_t = 0x00000001; +pub const ICANON: ::tcflag_t = 0x00000002; +pub const PENDIN: ::tcflag_t = 0x00004000; +pub const NOFLSH: ::tcflag_t = 0x00000080; +pub const CIBAUD: ::tcflag_t = 0o02003600000; +pub const CBAUDEX: ::tcflag_t = 0o010000; +pub const VSWTC: usize = 7; +pub const OLCUC: ::tcflag_t = 0o000002; +pub const NLDLY: ::tcflag_t = 0o000400; +pub const CRDLY: ::tcflag_t = 0o003000; +pub const TABDLY: ::tcflag_t = 0o014000; +pub const BSDLY: ::tcflag_t = 0o020000; +pub const FFDLY: ::tcflag_t = 0o100000; +pub const VTDLY: ::tcflag_t = 0o040000; +pub const XTABS: ::tcflag_t = 0o014000; +pub const B57600: ::speed_t = 0o010001; +pub const B115200: ::speed_t = 0o010002; +pub const B230400: ::speed_t = 0o010003; +pub const B460800: ::speed_t = 0o010004; +pub const B500000: ::speed_t = 0o010005; +pub const B576000: ::speed_t = 0o010006; +pub const B921600: ::speed_t = 0o010007; +pub const B1000000: ::speed_t = 0o010010; +pub const B1152000: ::speed_t = 0o010011; +pub const B1500000: ::speed_t = 0o010012; +pub const B2000000: ::speed_t = 0o010013; +pub const B2500000: ::speed_t = 0o010014; +pub const B3000000: ::speed_t = 0o010015; +pub const B3500000: ::speed_t = 0o010016; +pub const B4000000: ::speed_t = 0o010017; + +pub const FIOCLEX: ::c_int = 0x5451; +pub const FIONBIO: ::c_int = 0x5421; +pub const EDEADLK: ::c_int = 35; +pub const EDEADLOCK: ::c_int = EDEADLK; +pub const SO_PASSCRED: ::c_int = 16; +pub const SO_PEERCRED: ::c_int = 17; +pub const SO_RCVLOWAT: ::c_int = 18; +pub const SO_SNDLOWAT: ::c_int = 19; +pub const SO_RCVTIMEO: ::c_int = 20; +pub const SO_SNDTIMEO: ::c_int = 21; +pub const EXTPROC: ::tcflag_t = 0x00010000; +pub const VEOL: usize = 11; +pub const VEOL2: usize = 16; +pub const VMIN: usize = 6; +pub const IEXTEN: ::tcflag_t = 0x00008000; +pub const TOSTOP: ::tcflag_t = 0x00000100; +pub const FLUSHO: ::tcflag_t = 0x00001000; +pub const TCGETS: ::c_int = 0x5401; +pub const TCSETS: ::c_int = 0x5402; +pub const TCSETSW: ::c_int = 0x5403; +pub const TCSETSF: ::c_int = 0x5404; +pub const TCGETA: ::c_int = 0x5405; +pub const TCSETA: ::c_int = 0x5406; +pub const TCSETAW: ::c_int = 0x5407; +pub const TCSETAF: ::c_int = 0x5408; +pub const TCSBRK: ::c_int = 0x5409; +pub const TCXONC: ::c_int = 0x540A; +pub const TCFLSH: ::c_int = 0x540B; +pub const TIOCGSOFTCAR: ::c_int = 0x5419; +pub const TIOCSSOFTCAR: ::c_int = 0x541A; +pub const TIOCLINUX: ::c_int = 0x541C; +pub const TIOCGSERIAL: ::c_int = 0x541E; +pub const TIOCEXCL: ::c_int = 0x540C; +pub const TIOCNXCL: ::c_int = 0x540D; +pub const TIOCSCTTY: ::c_int = 0x540E; +pub const TIOCGPGRP: ::c_int = 0x540F; +pub const TIOCSPGRP: ::c_int = 0x5410; +pub const TIOCOUTQ: ::c_int = 0x5411; +pub const TIOCSTI: ::c_int = 0x5412; +pub const TIOCGWINSZ: ::c_int = 0x5413; +pub const TIOCSWINSZ: ::c_int = 0x5414; +pub const TIOCMGET: ::c_int = 0x5415; +pub const TIOCMBIS: ::c_int = 0x5416; +pub const TIOCMBIC: ::c_int = 0x5417; +pub const TIOCMSET: ::c_int = 0x5418; +pub const FIONREAD: ::c_int = 0x541B; +pub const TIOCCONS: ::c_int = 0x541D; + +pub const TIOCM_LE: ::c_int = 0x001; +pub const TIOCM_DTR: ::c_int = 0x002; +pub const TIOCM_RTS: ::c_int = 0x004; +pub const TIOCM_ST: ::c_int = 0x008; +pub const TIOCM_SR: ::c_int = 0x010; +pub const TIOCM_CTS: ::c_int = 0x020; +pub const TIOCM_CAR: ::c_int = 0x040; +pub const TIOCM_RNG: ::c_int = 0x080; +pub const TIOCM_DSR: ::c_int = 0x100; +pub const TIOCM_CD: ::c_int = TIOCM_CAR; +pub const TIOCM_RI: ::c_int = TIOCM_RNG; + +extern { + pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; +} + diff --git a/third_party/rust/libc/src/unix/notbsd/linux/musl/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/musl/mod.rs index 7513aaed6724..10d61ebebefa 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/musl/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/musl/mod.rs @@ -40,6 +40,26 @@ s! { pub sa_restorer: ::dox::Option, } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + #[cfg(target_endian = "little")] + pub f_fsid: ::c_ulong, + #[cfg(target_pointer_width = "32")] + __f_unused: ::c_int, + #[cfg(target_endian = "big")] + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, @@ -127,7 +147,6 @@ pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const RLIM_INFINITY: ::rlim_t = !0; pub const RLIMIT_RTTIME: ::c_int = 15; -pub const RLIMIT_NLIMITS: ::c_int = 16; pub const MAP_ANONYMOUS: ::c_int = MAP_ANON; @@ -193,8 +212,6 @@ pub const TCSANOW: ::c_int = 0; pub const TCSADRAIN: ::c_int = 1; pub const TCSAFLUSH: ::c_int = 2; -pub const TIOCINQ: ::c_int = ::FIONREAD; - pub const RTLD_GLOBAL: ::c_int = 0x100; pub const RTLD_NOLOAD: ::c_int = 0x4; @@ -203,61 +220,6 @@ pub const RTLD_NOLOAD: ::c_int = 0x4; pub const CLOCK_SGI_CYCLE: ::clockid_t = 10; pub const CLOCK_TAI: ::clockid_t = 11; -pub const MCL_CURRENT: ::c_int = 0x0001; -pub const MCL_FUTURE: ::c_int = 0x0002; - -pub const CBAUD: ::tcflag_t = 0o0010017; -pub const TAB1: ::c_int = 0x00000800; -pub const TAB2: ::c_int = 0x00001000; -pub const TAB3: ::c_int = 0x00001800; -pub const CR1: ::c_int = 0x00000200; -pub const CR2: ::c_int = 0x00000400; -pub const CR3: ::c_int = 0x00000600; -pub const FF1: ::c_int = 0x00008000; -pub const BS1: ::c_int = 0x00002000; -pub const VT1: ::c_int = 0x00004000; -pub const VWERASE: usize = 14; -pub const VREPRINT: usize = 12; -pub const VSUSP: usize = 10; -pub const VSTART: usize = 8; -pub const VSTOP: usize = 9; -pub const VDISCARD: usize = 13; -pub const VTIME: usize = 5; -pub const IXON: ::tcflag_t = 0x00000400; -pub const IXOFF: ::tcflag_t = 0x00001000; -pub const ONLCR: ::tcflag_t = 0x4; -pub const CSIZE: ::tcflag_t = 0x00000030; -pub const CS6: ::tcflag_t = 0x00000010; -pub const CS7: ::tcflag_t = 0x00000020; -pub const CS8: ::tcflag_t = 0x00000030; -pub const CSTOPB: ::tcflag_t = 0x00000040; -pub const CREAD: ::tcflag_t = 0x00000080; -pub const PARENB: ::tcflag_t = 0x00000100; -pub const PARODD: ::tcflag_t = 0x00000200; -pub const HUPCL: ::tcflag_t = 0x00000400; -pub const CLOCAL: ::tcflag_t = 0x00000800; -pub const ECHOKE: ::tcflag_t = 0x00000800; -pub const ECHOE: ::tcflag_t = 0x00000010; -pub const ECHOK: ::tcflag_t = 0x00000020; -pub const ECHONL: ::tcflag_t = 0x00000040; -pub const ECHOPRT: ::tcflag_t = 0x00000400; -pub const ECHOCTL: ::tcflag_t = 0x00000200; -pub const ISIG: ::tcflag_t = 0x00000001; -pub const ICANON: ::tcflag_t = 0x00000002; -pub const PENDIN: ::tcflag_t = 0x00004000; -pub const NOFLSH: ::tcflag_t = 0x00000080; -pub const CIBAUD: ::tcflag_t = 0o02003600000; -pub const CBAUDEX: ::tcflag_t = 0o010000; -pub const VSWTC: usize = 7; -pub const OLCUC: ::tcflag_t = 0o000002; -pub const NLDLY: ::tcflag_t = 0o000400; -pub const CRDLY: ::tcflag_t = 0o003000; -pub const TABDLY: ::tcflag_t = 0o014000; -pub const BSDLY: ::tcflag_t = 0o020000; -pub const FFDLY: ::tcflag_t = 0o100000; -pub const VTDLY: ::tcflag_t = 0o040000; -pub const XTABS: ::tcflag_t = 0o014000; - pub const B0: ::speed_t = 0o000000; pub const B50: ::speed_t = 0o000001; pub const B75: ::speed_t = 0o000002; @@ -276,21 +238,6 @@ pub const B19200: ::speed_t = 0o000016; pub const B38400: ::speed_t = 0o000017; pub const EXTA: ::speed_t = B19200; pub const EXTB: ::speed_t = B38400; -pub const B57600: ::speed_t = 0o010001; -pub const B115200: ::speed_t = 0o010002; -pub const B230400: ::speed_t = 0o010003; -pub const B460800: ::speed_t = 0o010004; -pub const B500000: ::speed_t = 0o010005; -pub const B576000: ::speed_t = 0o010006; -pub const B921600: ::speed_t = 0o010007; -pub const B1000000: ::speed_t = 0o010010; -pub const B1152000: ::speed_t = 0o010011; -pub const B1500000: ::speed_t = 0o010012; -pub const B2000000: ::speed_t = 0o010013; -pub const B2500000: ::speed_t = 0o010014; -pub const B3000000: ::speed_t = 0o010015; -pub const B3500000: ::speed_t = 0o010016; -pub const B4000000: ::speed_t = 0o010017; pub const SO_BINDTODEVICE: ::c_int = 25; pub const SO_TIMESTAMP: ::c_int = 29; @@ -300,7 +247,6 @@ pub const SO_PEEK_OFF: ::c_int = 42; pub const SO_BUSY_POLL: ::c_int = 46; extern { - pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; pub fn ptrace(request: ::c_int, ...) -> ::c_long; pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; @@ -314,7 +260,8 @@ cfg_if! { pub use self::b64::*; } else if #[cfg(any(target_arch = "x86", target_arch = "mips", - target_arch = "arm"))] { + target_arch = "arm", + target_arch = "powerpc"))] { mod b32; pub use self::b32::*; } else { } diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b32/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b32/mod.rs index 8536353fb49e..5b0142ab89a8 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b32/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b32/mod.rs @@ -1,5 +1,7 @@ //! 32-bit specific definitions for linux-like values +use pthread_mutex_t; + pub type c_long = i32; pub type c_ulong = u32; pub type clock_t = i32; @@ -44,6 +46,22 @@ s! { __unused5: ::c_long, } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + __f_unused: ::c_int, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct pthread_attr_t { __size: [u32; 9] } @@ -283,6 +301,57 @@ pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 32; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +align_const! { + #[cfg(target_endian = "little")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 0, + ], + }; +} + pub const PTRACE_GETFPREGS: ::c_uint = 14; pub const PTRACE_SETFPREGS: ::c_uint = 15; pub const PTRACE_GETREGS: ::c_uint = 12; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/aarch64.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/aarch64.rs index 80ebe7e5cfc2..2ba27a72b895 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/aarch64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/aarch64.rs @@ -1,5 +1,7 @@ //! AArch64-specific definitions for 64-bit linux-like values +use pthread_mutex_t; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = u8; @@ -69,6 +71,21 @@ s! { pub f_spare: [::__fsword_t; 4], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct statvfs64 { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -371,6 +388,33 @@ pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 8; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 48; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 8; +align_const! { + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }; +} + pub const O_DIRECT: ::c_int = 0x10000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/not_x32.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/not_x32.rs index fb30a3149065..e3e449807f89 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/not_x32.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/not_x32.rs @@ -1,9 +1,79 @@ +use pthread_mutex_t; + pub type c_long = i64; pub type c_ulong = u64; +s! { + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } +} + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; +align_const! { + #[cfg(target_endian = "little")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + // Syscall table pub const SYS_read: ::c_long = 0; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/powerpc64.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/powerpc64.rs index 65f2fa200dc4..9dd91f0fdd2c 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/powerpc64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/powerpc64.rs @@ -1,5 +1,7 @@ //! PowerPC64-specific definitions for 64-bit linux-like values +use pthread_mutex_t; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = u8; @@ -67,6 +69,21 @@ s! { pub f_spare: [::__fsword_t; 4], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct statvfs64 { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -358,6 +375,57 @@ pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +align_const! { + #[cfg(target_endian = "little")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "little")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + #[cfg(target_endian = "big")] + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; pub const O_DIRECT: ::c_int = 0x20000; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/sparc64.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/sparc64.rs index 206a0ce602cd..34438a73537e 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/sparc64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/sparc64.rs @@ -1,5 +1,7 @@ //! SPARC64-specific definitions for 64-bit linux-like values +use pthread_mutex_t; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = i8; @@ -69,6 +71,21 @@ s! { pub f_spare: [::__fsword_t; 4], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct statvfs64 { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, @@ -243,6 +260,8 @@ pub const SO_DONTROUTE: ::c_int = 16; pub const SO_BROADCAST: ::c_int = 32; pub const SO_SNDBUF: ::c_int = 0x1001; pub const SO_RCVBUF: ::c_int = 0x1002; +pub const SO_SNDBUFFORCE: ::c_int = 0x100a; +pub const SO_RCVBUFFORCE: ::c_int = 0x100b; pub const SO_DOMAIN: ::c_int = 0x1029; pub const SO_KEEPALIVE: ::c_int = 8; pub const SO_OOBINLINE: ::c_int = 0x100; @@ -332,6 +351,30 @@ pub const __SIZEOF_PTHREAD_CONDATTR_T: usize = 4; pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; +align_const! { + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + pub const O_DIRECTORY: ::c_int = 0o200000; pub const O_NOFOLLOW: ::c_int = 0o400000; pub const O_DIRECT: ::c_int = 0x100000; @@ -407,6 +450,41 @@ pub const FFDLY: ::tcflag_t = 0o100000; pub const VTDLY: ::tcflag_t = 0o040000; pub const XTABS: ::tcflag_t = 0o014000; +pub const B0: ::speed_t = 0o000000; +pub const B50: ::speed_t = 0o000001; +pub const B75: ::speed_t = 0o000002; +pub const B110: ::speed_t = 0o000003; +pub const B134: ::speed_t = 0o000004; +pub const B150: ::speed_t = 0o000005; +pub const B200: ::speed_t = 0o000006; +pub const B300: ::speed_t = 0o000007; +pub const B600: ::speed_t = 0o000010; +pub const B1200: ::speed_t = 0o000011; +pub const B1800: ::speed_t = 0o000012; +pub const B2400: ::speed_t = 0o000013; +pub const B4800: ::speed_t = 0o000014; +pub const B9600: ::speed_t = 0o000015; +pub const B19200: ::speed_t = 0o000016; +pub const B38400: ::speed_t = 0o000017; +pub const EXTA: ::speed_t = B19200; +pub const EXTB: ::speed_t = B38400; +pub const BOTHER: ::speed_t = 0x1000; +pub const B57600: ::speed_t = 0x1001; +pub const B115200: ::speed_t = 0x1002; +pub const B230400: ::speed_t = 0x1003; +pub const B460800: ::speed_t = 0x1004; +pub const B76800: ::speed_t = 0x1005; +pub const B153600: ::speed_t = 0x1006; +pub const B307200: ::speed_t = 0x1007; +pub const B614400: ::speed_t = 0x1008; +pub const B921600: ::speed_t = 0x1009; +pub const B500000: ::speed_t = 0x100a; +pub const B576000: ::speed_t = 0x100b; +pub const B1000000: ::speed_t = 0x100c; +pub const B1152000: ::speed_t = 0x100d; +pub const B1500000: ::speed_t = 0x100e; +pub const B2000000: ::speed_t = 0x100f; + pub const VEOL: usize = 5; pub const VEOL2: usize = 6; pub const VMIN: usize = 4; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x32.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x32.rs index 2e97061fba7d..d88dbafed83c 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x32.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x32.rs @@ -1,9 +1,52 @@ +use pthread_mutex_t; + pub type c_long = i32; pub type c_ulong = u32; +s! { + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } +} + pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 32; pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 44; +align_const! { + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + // Syscall table pub const __X32_SYSCALL_BIT: ::c_long = 0x40000000; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x86_64.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x86_64.rs index 5689aaa20ffc..7596eba492e7 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x86_64.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/b64/x86_64.rs @@ -427,6 +427,10 @@ pub const F_SETOWN: ::c_int = 8; pub const F_SETLK: ::c_int = 6; pub const F_SETLKW: ::c_int = 7; +pub const F_RDLCK: ::c_int = 0; +pub const F_WRLCK: ::c_int = 1; +pub const F_UNLCK: ::c_int = 2; + pub const SFD_NONBLOCK: ::c_int = 0x0800; pub const TIOCEXCL: ::c_ulong = 0x540C; diff --git a/third_party/rust/libc/src/unix/notbsd/linux/other/mod.rs b/third_party/rust/libc/src/unix/notbsd/linux/other/mod.rs index b566a95577bd..93b710b8df09 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/other/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/other/mod.rs @@ -158,11 +158,16 @@ s! { } // FIXME this is actually a union + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { #[cfg(target_pointer_width = "32")] __size: [::c_char; 16], #[cfg(target_pointer_width = "64")] __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } @@ -180,41 +185,62 @@ s! { } pub struct nlmsghdr { - nlmsg_len: u32, - nlmsg_type: u16, - nlmsg_flags: u16, - nlmsg_seq: u32, - nlmsg_pid: u32, + pub nlmsg_len: u32, + pub nlmsg_type: u16, + pub nlmsg_flags: u16, + pub nlmsg_seq: u32, + pub nlmsg_pid: u32, } pub struct nlmsgerr { - error: ::c_int, - msg: nlmsghdr, + pub error: ::c_int, + pub msg: nlmsghdr, } pub struct nl_pktinfo { - group: u32, + pub group: u32, } pub struct nl_mmap_req { - nm_block_size: ::c_uint, - nm_block_nr: ::c_uint, - nm_frame_size: ::c_uint, - nm_frame_nr: ::c_uint, + pub nm_block_size: ::c_uint, + pub nm_block_nr: ::c_uint, + pub nm_frame_size: ::c_uint, + pub nm_frame_nr: ::c_uint, } pub struct nl_mmap_hdr { - nm_status: ::c_uint, - nm_len: ::c_uint, - nm_group: u32, - nm_pid: u32, - nm_uid: u32, - nm_gid: u32, + pub nm_status: ::c_uint, + pub nm_len: ::c_uint, + pub nm_group: u32, + pub nm_pid: u32, + pub nm_uid: u32, + pub nm_gid: u32, } pub struct nlattr { - nla_len: u16, - nla_type: u16, + pub nla_len: u16, + pub nla_type: u16, + } + + pub struct rtentry { + pub rt_pad1: ::c_ulong, + pub rt_dst: ::sockaddr, + pub rt_gateway: ::sockaddr, + pub rt_genmask: ::sockaddr, + pub rt_flags: ::c_ushort, + pub rt_pad2: ::c_short, + pub rt_pad3: ::c_ulong, + pub rt_tos: ::c_uchar, + pub rt_class: ::c_uchar, + #[cfg(target_pointer_width = "64")] + pub rt_pad4: [::c_short; 3usize], + #[cfg(not(target_pointer_width = "64"))] + pub rt_pad4: ::c_short, + pub rt_metric: ::c_short, + pub rt_dev: *mut ::c_char, + pub rt_mtu: ::c_ulong, + pub rt_window: ::c_ulong, + pub rt_irtt: ::c_ushort, } } @@ -813,6 +839,7 @@ cfg_if! { pub const PTHREAD_STACK_MIN: ::size_t = 131072; } } +pub const PTHREAD_MUTEX_ADAPTIVE_NP: ::c_int = 3; f! { pub fn NLA_ALIGN(len: ::c_int) -> ::c_int { diff --git a/third_party/rust/libc/src/unix/notbsd/linux/s390x.rs b/third_party/rust/libc/src/unix/notbsd/linux/s390x.rs index d7ed38368c97..9196f88b453a 100644 --- a/third_party/rust/libc/src/unix/notbsd/linux/s390x.rs +++ b/third_party/rust/libc/src/unix/notbsd/linux/s390x.rs @@ -1,3 +1,5 @@ +use pthread_mutex_t; + pub type blkcnt_t = i64; pub type blksize_t = i64; pub type c_char = u8; @@ -153,6 +155,21 @@ s! { f_spare: [::c_uint; 4], } + pub struct statvfs { + pub f_bsize: ::c_ulong, + pub f_frsize: ::c_ulong, + pub f_blocks: ::fsblkcnt_t, + pub f_bfree: ::fsblkcnt_t, + pub f_bavail: ::fsblkcnt_t, + pub f_files: ::fsfilcnt_t, + pub f_ffree: ::fsfilcnt_t, + pub f_favail: ::fsfilcnt_t, + pub f_fsid: ::c_ulong, + pub f_flag: ::c_ulong, + pub f_namemax: ::c_ulong, + __f_spare: [::c_int; 6], + } + pub struct msghdr { pub msg_name: *mut ::c_void, pub msg_namelen: ::socklen_t, @@ -230,8 +247,13 @@ s! { } // FIXME this is actually a union + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } @@ -356,6 +378,30 @@ pub const __SIZEOF_PTHREAD_MUTEX_T: usize = 40; pub const __SIZEOF_PTHREAD_RWLOCK_T: usize = 56; pub const __SIZEOF_PTHREAD_RWLOCKATTR_T: usize = 8; +align_const! { + pub const PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; + pub const PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP: ::pthread_mutex_t = + pthread_mutex_t { + size: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }; +} + pub const EADDRINUSE: ::c_int = 98; pub const EADDRNOTAVAIL: ::c_int = 99; pub const ECONNABORTED: ::c_int = 103; @@ -374,6 +420,7 @@ pub const O_CREAT: ::c_int = 64; pub const O_EXCL: ::c_int = 128; pub const O_NONBLOCK: ::c_int = 2048; pub const PTHREAD_STACK_MIN: ::size_t = 16384; +pub const PTHREAD_MUTEX_ADAPTIVE_NP: ::c_int = 3; pub const RLIM_INFINITY: ::rlim_t = 0xffffffffffffffff; pub const SA_NOCLDWAIT: ::c_int = 2; pub const SA_ONSTACK: ::c_int = 0x08000000; @@ -735,6 +782,9 @@ pub const PTRACE_INTERRUPT: ::c_uint = 0x4207; pub const PTRACE_LISTEN: ::c_uint = 0x4208; pub const PTRACE_PEEKSIGINFO: ::c_uint = 0x4209; +pub const MCL_CURRENT: ::c_int = 0x0001; +pub const MCL_FUTURE: ::c_int = 0x0002; + pub const EPOLLWAKEUP: ::c_int = 0x20000000; pub const MAP_HUGETLB: ::c_int = 0x040000; diff --git a/third_party/rust/libc/src/unix/notbsd/mod.rs b/third_party/rust/libc/src/unix/notbsd/mod.rs index 92dfad6b9f2b..eead3fd84724 100644 --- a/third_party/rust/libc/src/unix/notbsd/mod.rs +++ b/third_party/rust/libc/src/unix/notbsd/mod.rs @@ -177,6 +177,58 @@ s! { #[cfg(target_pointer_width = "32")] __unused1: [::c_int; 12] } + + pub struct in_pktinfo { + pub ipi_ifindex: ::c_int, + pub ipi_spec_dst: ::in_addr, + pub ipi_addr: ::in_addr, + } + + pub struct ifaddrs { + pub ifa_next: *mut ifaddrs, + pub ifa_name: *mut c_char, + pub ifa_flags: ::c_uint, + pub ifa_addr: *mut ::sockaddr, + pub ifa_netmask: *mut ::sockaddr, + pub ifa_ifu: *mut ::sockaddr, // FIXME This should be a union + pub ifa_data: *mut ::c_void + } + + pub struct in6_rtmsg { + rtmsg_dst: ::in6_addr, + rtmsg_src: ::in6_addr, + rtmsg_gateway: ::in6_addr, + rtmsg_type: u32, + rtmsg_dst_len: u16, + rtmsg_src_len: u16, + rtmsg_metric: u32, + rtmsg_info: ::c_ulong, + rtmsg_flags: u32, + rtmsg_ifindex: ::c_int, + } + + pub struct arpreq { + pub arp_pa: ::sockaddr, + pub arp_ha: ::sockaddr, + pub arp_flags: ::c_int, + pub arp_netmask: ::sockaddr, + pub arp_dev: [::c_char; 16], + } + + pub struct arpreq_old { + pub arp_pa: ::sockaddr, + pub arp_ha: ::sockaddr, + pub arp_flags: ::c_int, + pub arp_netmask: ::sockaddr, + } + + pub struct arphdr { + pub ar_hrd: u16, + pub ar_pro: u16, + pub ar_hln: u8, + pub ar_pln: u8, + pub ar_op: u16, + } } // intentionally not public, only used for fd_set @@ -573,6 +625,7 @@ pub const IP_MULTICAST_TTL: ::c_int = 33; pub const IP_MULTICAST_LOOP: ::c_int = 34; pub const IP_TTL: ::c_int = 2; pub const IP_HDRINCL: ::c_int = 3; +pub const IP_PKTINFO: ::c_int = 8; pub const IP_ADD_MEMBERSHIP: ::c_int = 35; pub const IP_DROP_MEMBERSHIP: ::c_int = 36; pub const IP_TRANSPARENT: ::c_int = 19; @@ -583,6 +636,8 @@ pub const IPV6_MULTICAST_LOOP: ::c_int = 19; pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20; pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21; pub const IPV6_V6ONLY: ::c_int = 26; +pub const IPV6_RECVPKTINFO: ::c_int = 49; +pub const IPV6_PKTINFO: ::c_int = 50; pub const TCP_NODELAY: ::c_int = 1; pub const TCP_MAXSEG: ::c_int = 2; @@ -805,6 +860,122 @@ pub const POLLNVAL: ::c_short = 0x20; pub const POLLRDNORM: ::c_short = 0x040; pub const POLLRDBAND: ::c_short = 0x080; +pub const IPTOS_LOWDELAY: u8 = 0x10; +pub const IPTOS_THROUGHPUT: u8 = 0x08; +pub const IPTOS_RELIABILITY: u8 = 0x04; +pub const IPTOS_MINCOST: u8 = 0x02; + +pub const IPTOS_PREC_NETCONTROL: u8 = 0xe0; +pub const IPTOS_PREC_INTERNETCONTROL: u8 = 0xc0; +pub const IPTOS_PREC_CRITIC_ECP: u8 = 0xa0; +pub const IPTOS_PREC_FLASHOVERRIDE: u8 = 0x80; +pub const IPTOS_PREC_FLASH: u8 = 0x60; +pub const IPTOS_PREC_IMMEDIATE: u8 = 0x40; +pub const IPTOS_PREC_PRIORITY: u8 = 0x20; +pub const IPTOS_PREC_ROUTINE: u8 = 0x00; + +pub const IPOPT_COPY: u8 = 0x80; +pub const IPOPT_CLASS_MASK: u8 = 0x60; +pub const IPOPT_NUMBER_MASK: u8 = 0x1f; + +pub const IPOPT_CONTROL: u8 = 0x00; +pub const IPOPT_RESERVED1: u8 = 0x20; +pub const IPOPT_MEASUREMENT: u8 = 0x40; +pub const IPOPT_RESERVED2: u8 = 0x60; +pub const IPOPT_END: u8 = (0 |IPOPT_CONTROL); +pub const IPOPT_NOOP: u8 = (1 |IPOPT_CONTROL); +pub const IPOPT_SEC: u8 = (2 |IPOPT_CONTROL|IPOPT_COPY); +pub const IPOPT_LSRR: u8 = (3 |IPOPT_CONTROL|IPOPT_COPY); +pub const IPOPT_TIMESTAMP: u8 = (4 |IPOPT_MEASUREMENT); +pub const IPOPT_RR: u8 = (7 |IPOPT_CONTROL); +pub const IPOPT_SID: u8 = (8 |IPOPT_CONTROL|IPOPT_COPY); +pub const IPOPT_SSRR: u8 = (9 |IPOPT_CONTROL|IPOPT_COPY); +pub const IPOPT_RA: u8 = (20|IPOPT_CONTROL|IPOPT_COPY); +pub const IPVERSION: u8 = 4; +pub const MAXTTL: u8 = 255; +pub const IPDEFTTL: u8 = 64; +pub const IPOPT_OPTVAL: u8 = 0; +pub const IPOPT_OLEN: u8 = 1; +pub const IPOPT_OFFSET: u8 = 2; +pub const IPOPT_MINOFF: u8 = 4; +pub const MAX_IPOPTLEN: u8 = 40; +pub const IPOPT_NOP: u8 = IPOPT_NOOP; +pub const IPOPT_EOL: u8 = IPOPT_END; +pub const IPOPT_TS: u8 = IPOPT_TIMESTAMP; +pub const IPOPT_TS_TSONLY: u8 = 0; +pub const IPOPT_TS_TSANDADDR: u8 = 1; +pub const IPOPT_TS_PRESPEC: u8 = 3; + +pub const ARPOP_RREQUEST: u16 = 3; +pub const ARPOP_RREPLY: u16 = 4; +pub const ARPOP_InREQUEST: u16 = 8; +pub const ARPOP_InREPLY: u16 = 9; +pub const ARPOP_NAK: u16 = 10; + +pub const ATF_NETMASK: ::c_int = 0x20; +pub const ATF_DONTPUB: ::c_int = 0x40; + +pub const ARPHRD_NETROM: u16 = 0; +pub const ARPHRD_ETHER: u16 = 1; +pub const ARPHRD_EETHER: u16 = 2; +pub const ARPHRD_AX25: u16 = 3; +pub const ARPHRD_PRONET: u16 = 4; +pub const ARPHRD_CHAOS: u16 = 5; +pub const ARPHRD_IEEE802: u16 = 6; +pub const ARPHRD_ARCNET: u16 = 7; +pub const ARPHRD_APPLETLK: u16 = 8; +pub const ARPHRD_DLCI: u16 = 15; +pub const ARPHRD_ATM: u16 = 19; +pub const ARPHRD_METRICOM: u16 = 23; +pub const ARPHRD_IEEE1394: u16 = 24; +pub const ARPHRD_EUI64: u16 = 27; +pub const ARPHRD_INFINIBAND: u16 = 32; + +pub const ARPHRD_SLIP: u16 = 256; +pub const ARPHRD_CSLIP: u16 = 257; +pub const ARPHRD_SLIP6: u16 = 258; +pub const ARPHRD_CSLIP6: u16 = 259; +pub const ARPHRD_RSRVD: u16 = 260; +pub const ARPHRD_ADAPT: u16 = 264; +pub const ARPHRD_ROSE: u16 = 270; +pub const ARPHRD_X25: u16 = 271; +pub const ARPHRD_HWX25: u16 = 272; +pub const ARPHRD_PPP: u16 = 512; +pub const ARPHRD_CISCO: u16 = 513; +pub const ARPHRD_HDLC: u16 = ARPHRD_CISCO; +pub const ARPHRD_LAPB: u16 = 516; +pub const ARPHRD_DDCMP: u16 = 517; +pub const ARPHRD_RAWHDLC: u16 = 518; + +pub const ARPHRD_TUNNEL: u16 = 768; +pub const ARPHRD_TUNNEL6: u16 = 769; +pub const ARPHRD_FRAD: u16 = 770; +pub const ARPHRD_SKIP: u16 = 771; +pub const ARPHRD_LOOPBACK: u16 = 772; +pub const ARPHRD_LOCALTLK: u16 = 773; +pub const ARPHRD_FDDI: u16 = 774; +pub const ARPHRD_BIF: u16 = 775; +pub const ARPHRD_SIT: u16 = 776; +pub const ARPHRD_IPDDP: u16 = 777; +pub const ARPHRD_IPGRE: u16 = 778; +pub const ARPHRD_PIMREG: u16 = 779; +pub const ARPHRD_HIPPI: u16 = 780; +pub const ARPHRD_ASH: u16 = 781; +pub const ARPHRD_ECONET: u16 = 782; +pub const ARPHRD_IRDA: u16 = 783; +pub const ARPHRD_FCPP: u16 = 784; +pub const ARPHRD_FCAL: u16 = 785; +pub const ARPHRD_FCPL: u16 = 786; +pub const ARPHRD_FCFABRIC: u16 = 787; +pub const ARPHRD_IEEE802_TR: u16 = 800; +pub const ARPHRD_IEEE80211: u16 = 801; +pub const ARPHRD_IEEE80211_PRISM: u16 = 802; +pub const ARPHRD_IEEE80211_RADIOTAP: u16 = 803; +pub const ARPHRD_IEEE802154: u16 = 804; + +pub const ARPHRD_VOID: u16 = 0xFFFF; +pub const ARPHRD_NONE: u16 = 0xFFFE; + f! { pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () { let fd = fd as usize; @@ -867,6 +1038,18 @@ f! { pub fn QCMD(cmd: ::c_int, type_: ::c_int) -> ::c_int { (cmd << 8) | (type_ & 0x00ff) } + + pub fn IPOPT_COPIED(o: u8) -> u8 { + o & IPOPT_COPY + } + + pub fn IPOPT_CLASS(o: u8) -> u8 { + o & IPOPT_CLASS_MASK + } + + pub fn IPOPT_NUMBER(o: u8) -> u8 { + o & IPOPT_NUMBER_MASK + } } extern { @@ -946,8 +1129,6 @@ extern { pub fn stat64(path: *const c_char, buf: *mut stat64) -> ::c_int; pub fn truncate64(path: *const c_char, length: off64_t) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, @@ -994,6 +1175,23 @@ extern { pub fn fexecve(fd: ::c_int, argv: *const *const ::c_char, envp: *const *const ::c_char) -> ::c_int; + pub fn getifaddrs(ifap: *mut *mut ::ifaddrs) -> ::c_int; + pub fn freeifaddrs(ifa: *mut ::ifaddrs); + pub fn bind(socket: ::c_int, address: *const ::sockaddr, + address_len: ::socklen_t) -> ::c_int; + + pub fn writev(fd: ::c_int, + iov: *const ::iovec, + iovcnt: ::c_int) -> ::ssize_t; + pub fn readv(fd: ::c_int, + iov: *const ::iovec, + iovcnt: ::c_int) -> ::ssize_t; + + pub fn sendmsg(fd: ::c_int, + msg: *const ::msghdr, + flags: ::c_int) -> ::ssize_t; + pub fn recvmsg(fd: ::c_int, msg: *mut ::msghdr, flags: ::c_int) + -> ::ssize_t; } cfg_if! { diff --git a/third_party/rust/libc/src/unix/solaris/mod.rs b/third_party/rust/libc/src/unix/solaris/mod.rs index 024b8ec37264..f285191677cf 100644 --- a/third_party/rust/libc/src/unix/solaris/mod.rs +++ b/third_party/rust/libc/src/unix/solaris/mod.rs @@ -354,6 +354,7 @@ s! { pub portev_user: *mut ::c_void, } + #[cfg_attr(any(target_arch = "x86", target_arch = "x86_64"), repr(packed))] pub struct epoll_event { pub events: ::uint32_t, pub u64: ::uint64_t, @@ -1208,7 +1209,7 @@ pub const EPOLLET: ::c_int = 0x80000000; pub const EPOLLRDHUP: ::c_int = 0x2000; pub const EPOLLEXCLUSIVE: ::c_int = 0x10000000; pub const EPOLLONESHOT: ::c_int = 0x40000000; -pub const EPOLL_CLOEXEC: ::c_int = 0x02000000; +pub const EPOLL_CLOEXEC: ::c_int = 0x80000; pub const EPOLL_CTL_ADD: ::c_int = 1; pub const EPOLL_CTL_MOD: ::c_int = 3; pub const EPOLL_CTL_DEL: ::c_int = 2; @@ -1265,6 +1266,7 @@ extern { pub fn ioctl(fildes: ::c_int, request: ::c_int, ...) -> ::c_int; pub fn mprotect(addr: *const ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int; + pub fn ___errno() -> *mut ::c_int; pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int; pub fn clock_nanosleep(clk_id: ::clockid_t, @@ -1296,8 +1298,6 @@ extern { pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, @@ -1315,6 +1315,8 @@ extern { clock_id: ::clockid_t) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const ::timespec) -> ::c_int; pub fn waitid(idtype: idtype_t, id: id_t, infop: *mut ::siginfo_t, diff --git a/third_party/rust/libc/src/unix/uclibc/mips/mips32.rs b/third_party/rust/libc/src/unix/uclibc/mips/mips32.rs index 70d26e78d939..dcbfcf8ff2bd 100644 --- a/third_party/rust/libc/src/unix/uclibc/mips/mips32.rs +++ b/third_party/rust/libc/src/unix/uclibc/mips/mips32.rs @@ -66,7 +66,7 @@ s! { } pub struct sigaction { - pub sa_flags: ::c_int, + pub sa_flags: ::c_uint, pub sa_sigaction: ::sighandler_t, pub sa_mask: sigset_t, _restorer: *mut ::c_void, @@ -222,11 +222,16 @@ s! { } // FIXME this is actually a union + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { #[cfg(target_pointer_width = "32")] __size: [::c_char; 16], #[cfg(target_pointer_width = "64")] __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } } @@ -238,7 +243,372 @@ pub const __SIZEOF_PTHREAD_MUTEXATTR_T: usize = 4; pub const RLIM_INFINITY: ::rlim_t = 0x7fffffff; -pub const SYS_gettid: ::c_long = 4222; // Valid for O32 +pub const SYS_syscall: ::c_long = 4000 + 0; +pub const SYS_exit: ::c_long = 4000 + 1; +pub const SYS_fork: ::c_long = 4000 + 2; +pub const SYS_read: ::c_long = 4000 + 3; +pub const SYS_write: ::c_long = 4000 + 4; +pub const SYS_open: ::c_long = 4000 + 5; +pub const SYS_close: ::c_long = 4000 + 6; +pub const SYS_waitpid: ::c_long = 4000 + 7; +pub const SYS_creat: ::c_long = 4000 + 8; +pub const SYS_link: ::c_long = 4000 + 9; +pub const SYS_unlink: ::c_long = 4000 + 10; +pub const SYS_execve: ::c_long = 4000 + 11; +pub const SYS_chdir: ::c_long = 4000 + 12; +pub const SYS_time: ::c_long = 4000 + 13; +pub const SYS_mknod: ::c_long = 4000 + 14; +pub const SYS_chmod: ::c_long = 4000 + 15; +pub const SYS_lchown: ::c_long = 4000 + 16; +pub const SYS_break: ::c_long = 4000 + 17; +pub const SYS_unused18: ::c_long = 4000 + 18; +pub const SYS_lseek: ::c_long = 4000 + 19; +pub const SYS_getpid: ::c_long = 4000 + 20; +pub const SYS_mount: ::c_long = 4000 + 21; +pub const SYS_umount: ::c_long = 4000 + 22; +pub const SYS_setuid: ::c_long = 4000 + 23; +pub const SYS_getuid: ::c_long = 4000 + 24; +pub const SYS_stime: ::c_long = 4000 + 25; +pub const SYS_ptrace: ::c_long = 4000 + 26; +pub const SYS_alarm: ::c_long = 4000 + 27; +pub const SYS_unused28: ::c_long = 4000 + 28; +pub const SYS_pause: ::c_long = 4000 + 29; +pub const SYS_utime: ::c_long = 4000 + 30; +pub const SYS_stty: ::c_long = 4000 + 31; +pub const SYS_gtty: ::c_long = 4000 + 32; +pub const SYS_access: ::c_long = 4000 + 33; +pub const SYS_nice: ::c_long = 4000 + 34; +pub const SYS_ftime: ::c_long = 4000 + 35; +pub const SYS_sync: ::c_long = 4000 + 36; +pub const SYS_kill: ::c_long = 4000 + 37; +pub const SYS_rename: ::c_long = 4000 + 38; +pub const SYS_mkdir: ::c_long = 4000 + 39; +pub const SYS_rmdir: ::c_long = 4000 + 40; +pub const SYS_dup: ::c_long = 4000 + 41; +pub const SYS_pipe: ::c_long = 4000 + 42; +pub const SYS_times: ::c_long = 4000 + 43; +pub const SYS_prof: ::c_long = 4000 + 44; +pub const SYS_brk: ::c_long = 4000 + 45; +pub const SYS_setgid: ::c_long = 4000 + 46; +pub const SYS_getgid: ::c_long = 4000 + 47; +pub const SYS_signal: ::c_long = 4000 + 48; +pub const SYS_geteuid: ::c_long = 4000 + 49; +pub const SYS_getegid: ::c_long = 4000 + 50; +pub const SYS_acct: ::c_long = 4000 + 51; +pub const SYS_umount2: ::c_long = 4000 + 52; +pub const SYS_lock: ::c_long = 4000 + 53; +pub const SYS_ioctl: ::c_long = 4000 + 54; +pub const SYS_fcntl: ::c_long = 4000 + 55; +pub const SYS_mpx: ::c_long = 4000 + 56; +pub const SYS_setpgid: ::c_long = 4000 + 57; +pub const SYS_ulimit: ::c_long = 4000 + 58; +pub const SYS_unused59: ::c_long = 4000 + 59; +pub const SYS_umask: ::c_long = 4000 + 60; +pub const SYS_chroot: ::c_long = 4000 + 61; +pub const SYS_ustat: ::c_long = 4000 + 62; +pub const SYS_dup2: ::c_long = 4000 + 63; +pub const SYS_getppid: ::c_long = 4000 + 64; +pub const SYS_getpgrp: ::c_long = 4000 + 65; +pub const SYS_setsid: ::c_long = 4000 + 66; +pub const SYS_sigaction: ::c_long = 4000 + 67; +pub const SYS_sgetmask: ::c_long = 4000 + 68; +pub const SYS_ssetmask: ::c_long = 4000 + 69; +pub const SYS_setreuid: ::c_long = 4000 + 70; +pub const SYS_setregid: ::c_long = 4000 + 71; +pub const SYS_sigsuspend: ::c_long = 4000 + 72; +pub const SYS_sigpending: ::c_long = 4000 + 73; +pub const SYS_sethostname: ::c_long = 4000 + 74; +pub const SYS_setrlimit: ::c_long = 4000 + 75; +pub const SYS_getrlimit: ::c_long = 4000 + 76; +pub const SYS_getrusage: ::c_long = 4000 + 77; +pub const SYS_gettimeofday: ::c_long = 4000 + 78; +pub const SYS_settimeofday: ::c_long = 4000 + 79; +pub const SYS_getgroups: ::c_long = 4000 + 80; +pub const SYS_setgroups: ::c_long = 4000 + 81; +pub const SYS_reserved82: ::c_long = 4000 + 82; +pub const SYS_symlink: ::c_long = 4000 + 83; +pub const SYS_unused84: ::c_long = 4000 + 84; +pub const SYS_readlink: ::c_long = 4000 + 85; +pub const SYS_uselib: ::c_long = 4000 + 86; +pub const SYS_swapon: ::c_long = 4000 + 87; +pub const SYS_reboot: ::c_long = 4000 + 88; +pub const SYS_readdir: ::c_long = 4000 + 89; +pub const SYS_mmap: ::c_long = 4000 + 90; +pub const SYS_munmap: ::c_long = 4000 + 91; +pub const SYS_truncate: ::c_long = 4000 + 92; +pub const SYS_ftruncate: ::c_long = 4000 + 93; +pub const SYS_fchmod: ::c_long = 4000 + 94; +pub const SYS_fchown: ::c_long = 4000 + 95; +pub const SYS_getpriority: ::c_long = 4000 + 96; +pub const SYS_setpriority: ::c_long = 4000 + 97; +pub const SYS_profil: ::c_long = 4000 + 98; +pub const SYS_statfs: ::c_long = 4000 + 99; +pub const SYS_fstatfs: ::c_long = 4000 + 100; +pub const SYS_ioperm: ::c_long = 4000 + 101; +pub const SYS_socketcall: ::c_long = 4000 + 102; +pub const SYS_syslog: ::c_long = 4000 + 103; +pub const SYS_setitimer: ::c_long = 4000 + 104; +pub const SYS_getitimer: ::c_long = 4000 + 105; +pub const SYS_stat: ::c_long = 4000 + 106; +pub const SYS_lstat: ::c_long = 4000 + 107; +pub const SYS_fstat: ::c_long = 4000 + 108; +pub const SYS_unused109: ::c_long = 4000 + 109; +pub const SYS_iopl: ::c_long = 4000 + 110; +pub const SYS_vhangup: ::c_long = 4000 + 111; +pub const SYS_idle: ::c_long = 4000 + 112; +pub const SYS_vm86: ::c_long = 4000 + 113; +pub const SYS_wait4: ::c_long = 4000 + 114; +pub const SYS_swapoff: ::c_long = 4000 + 115; +pub const SYS_sysinfo: ::c_long = 4000 + 116; +pub const SYS_ipc: ::c_long = 4000 + 117; +pub const SYS_fsync: ::c_long = 4000 + 118; +pub const SYS_sigreturn: ::c_long = 4000 + 119; +pub const SYS_clone: ::c_long = 4000 + 120; +pub const SYS_setdomainname: ::c_long = 4000 + 121; +pub const SYS_uname: ::c_long = 4000 + 122; +pub const SYS_modify_ldt: ::c_long = 4000 + 123; +pub const SYS_adjtimex: ::c_long = 4000 + 124; +pub const SYS_mprotect: ::c_long = 4000 + 125; +pub const SYS_sigprocmask: ::c_long = 4000 + 126; +pub const SYS_create_module: ::c_long = 4000 + 127; +pub const SYS_init_module: ::c_long = 4000 + 128; +pub const SYS_delete_module: ::c_long = 4000 + 129; +pub const SYS_get_kernel_syms: ::c_long = 4000 + 130; +pub const SYS_quotactl: ::c_long = 4000 + 131; +pub const SYS_getpgid: ::c_long = 4000 + 132; +pub const SYS_fchdir: ::c_long = 4000 + 133; +pub const SYS_bdflush: ::c_long = 4000 + 134; +pub const SYS_sysfs: ::c_long = 4000 + 135; +pub const SYS_personality: ::c_long = 4000 + 136; +pub const SYS_afs_syscall: ::c_long = 4000 + 137; +pub const SYS_setfsuid: ::c_long = 4000 + 138; +pub const SYS_setfsgid: ::c_long = 4000 + 139; +pub const SYS__llseek: ::c_long = 4000 + 140; +pub const SYS_getdents: ::c_long = 4000 + 141; +pub const SYS__newselect: ::c_long = 4000 + 142; +pub const SYS_flock: ::c_long = 4000 + 143; +pub const SYS_msync: ::c_long = 4000 + 144; +pub const SYS_readv: ::c_long = 4000 + 145; +pub const SYS_writev: ::c_long = 4000 + 146; +pub const SYS_cacheflush: ::c_long = 4000 + 147; +pub const SYS_cachectl: ::c_long = 4000 + 148; +pub const SYS_sysmips: ::c_long = 4000 + 149; +pub const SYS_unused150: ::c_long = 4000 + 150; +pub const SYS_getsid: ::c_long = 4000 + 151; +pub const SYS_fdatasync: ::c_long = 4000 + 152; +pub const SYS__sysctl: ::c_long = 4000 + 153; +pub const SYS_mlock: ::c_long = 4000 + 154; +pub const SYS_munlock: ::c_long = 4000 + 155; +pub const SYS_mlockall: ::c_long = 4000 + 156; +pub const SYS_munlockall: ::c_long = 4000 + 157; +pub const SYS_sched_setparam: ::c_long = 4000 + 158; +pub const SYS_sched_getparam: ::c_long = 4000 + 159; +pub const SYS_sched_setscheduler: ::c_long = 4000 + 160; +pub const SYS_sched_getscheduler: ::c_long = 4000 + 161; +pub const SYS_sched_yield: ::c_long = 4000 + 162; +pub const SYS_sched_get_priority_max: ::c_long = 4000 + 163; +pub const SYS_sched_get_priority_min: ::c_long = 4000 + 164; +pub const SYS_sched_rr_get_interval: ::c_long = 4000 + 165; +pub const SYS_nanosleep: ::c_long = 4000 + 166; +pub const SYS_mremap: ::c_long = 4000 + 167; +pub const SYS_accept: ::c_long = 4000 + 168; +pub const SYS_bind: ::c_long = 4000 + 169; +pub const SYS_connect: ::c_long = 4000 + 170; +pub const SYS_getpeername: ::c_long = 4000 + 171; +pub const SYS_getsockname: ::c_long = 4000 + 172; +pub const SYS_getsockopt: ::c_long = 4000 + 173; +pub const SYS_listen: ::c_long = 4000 + 174; +pub const SYS_recv: ::c_long = 4000 + 175; +pub const SYS_recvfrom: ::c_long = 4000 + 176; +pub const SYS_recvmsg: ::c_long = 4000 + 177; +pub const SYS_send: ::c_long = 4000 + 178; +pub const SYS_sendmsg: ::c_long = 4000 + 179; +pub const SYS_sendto: ::c_long = 4000 + 180; +pub const SYS_setsockopt: ::c_long = 4000 + 181; +pub const SYS_shutdown: ::c_long = 4000 + 182; +pub const SYS_socket: ::c_long = 4000 + 183; +pub const SYS_socketpair: ::c_long = 4000 + 184; +pub const SYS_setresuid: ::c_long = 4000 + 185; +pub const SYS_getresuid: ::c_long = 4000 + 186; +pub const SYS_query_module: ::c_long = 4000 + 187; +pub const SYS_poll: ::c_long = 4000 + 188; +pub const SYS_nfsservctl: ::c_long = 4000 + 189; +pub const SYS_setresgid: ::c_long = 4000 + 190; +pub const SYS_getresgid: ::c_long = 4000 + 191; +pub const SYS_prctl: ::c_long = 4000 + 192; +pub const SYS_rt_sigreturn: ::c_long = 4000 + 193; +pub const SYS_rt_sigaction: ::c_long = 4000 + 194; +pub const SYS_rt_sigprocmask: ::c_long = 4000 + 195; +pub const SYS_rt_sigpending: ::c_long = 4000 + 196; +pub const SYS_rt_sigtimedwait: ::c_long = 4000 + 197; +pub const SYS_rt_sigqueueinfo: ::c_long = 4000 + 198; +pub const SYS_rt_sigsuspend: ::c_long = 4000 + 199; +pub const SYS_pread64: ::c_long = 4000 + 200; +pub const SYS_pwrite64: ::c_long = 4000 + 201; +pub const SYS_chown: ::c_long = 4000 + 202; +pub const SYS_getcwd: ::c_long = 4000 + 203; +pub const SYS_capget: ::c_long = 4000 + 204; +pub const SYS_capset: ::c_long = 4000 + 205; +pub const SYS_sigaltstack: ::c_long = 4000 + 206; +pub const SYS_sendfile: ::c_long = 4000 + 207; +pub const SYS_getpmsg: ::c_long = 4000 + 208; +pub const SYS_putpmsg: ::c_long = 4000 + 209; +pub const SYS_mmap2: ::c_long = 4000 + 210; +pub const SYS_truncate64: ::c_long = 4000 + 211; +pub const SYS_ftruncate64: ::c_long = 4000 + 212; +pub const SYS_stat64: ::c_long = 4000 + 213; +pub const SYS_lstat64: ::c_long = 4000 + 214; +pub const SYS_fstat64: ::c_long = 4000 + 215; +pub const SYS_pivot_root: ::c_long = 4000 + 216; +pub const SYS_mincore: ::c_long = 4000 + 217; +pub const SYS_madvise: ::c_long = 4000 + 218; +pub const SYS_getdents64: ::c_long = 4000 + 219; +pub const SYS_fcntl64: ::c_long = 4000 + 220; +pub const SYS_reserved221: ::c_long = 4000 + 221; +pub const SYS_gettid: ::c_long = 4000 + 222; +pub const SYS_readahead: ::c_long = 4000 + 223; +pub const SYS_setxattr: ::c_long = 4000 + 224; +pub const SYS_lsetxattr: ::c_long = 4000 + 225; +pub const SYS_fsetxattr: ::c_long = 4000 + 226; +pub const SYS_getxattr: ::c_long = 4000 + 227; +pub const SYS_lgetxattr: ::c_long = 4000 + 228; +pub const SYS_fgetxattr: ::c_long = 4000 + 229; +pub const SYS_listxattr: ::c_long = 4000 + 230; +pub const SYS_llistxattr: ::c_long = 4000 + 231; +pub const SYS_flistxattr: ::c_long = 4000 + 232; +pub const SYS_removexattr: ::c_long = 4000 + 233; +pub const SYS_lremovexattr: ::c_long = 4000 + 234; +pub const SYS_fremovexattr: ::c_long = 4000 + 235; +pub const SYS_tkill: ::c_long = 4000 + 236; +pub const SYS_sendfile64: ::c_long = 4000 + 237; +pub const SYS_futex: ::c_long = 4000 + 238; +pub const SYS_sched_setaffinity: ::c_long = 4000 + 239; +pub const SYS_sched_getaffinity: ::c_long = 4000 + 240; +pub const SYS_io_setup: ::c_long = 4000 + 241; +pub const SYS_io_destroy: ::c_long = 4000 + 242; +pub const SYS_io_getevents: ::c_long = 4000 + 243; +pub const SYS_io_submit: ::c_long = 4000 + 244; +pub const SYS_io_cancel: ::c_long = 4000 + 245; +pub const SYS_exit_group: ::c_long = 4000 + 246; +pub const SYS_lookup_dcookie: ::c_long = 4000 + 247; +pub const SYS_epoll_create: ::c_long = 4000 + 248; +pub const SYS_epoll_ctl: ::c_long = 4000 + 249; +pub const SYS_epoll_wait: ::c_long = 4000 + 250; +pub const SYS_remap_file_pages: ::c_long = 4000 + 251; +pub const SYS_set_tid_address: ::c_long = 4000 + 252; +pub const SYS_restart_syscall: ::c_long = 4000 + 253; +pub const SYS_fadvise64: ::c_long = 4000 + 254; +pub const SYS_statfs64: ::c_long = 4000 + 255; +pub const SYS_fstatfs64: ::c_long = 4000 + 256; +pub const SYS_timer_create: ::c_long = 4000 + 257; +pub const SYS_timer_settime: ::c_long = 4000 + 258; +pub const SYS_timer_gettime: ::c_long = 4000 + 259; +pub const SYS_timer_getoverrun: ::c_long = 4000 + 260; +pub const SYS_timer_delete: ::c_long = 4000 + 261; +pub const SYS_clock_settime: ::c_long = 4000 + 262; +pub const SYS_clock_gettime: ::c_long = 4000 + 263; +pub const SYS_clock_getres: ::c_long = 4000 + 264; +pub const SYS_clock_nanosleep: ::c_long = 4000 + 265; +pub const SYS_tgkill: ::c_long = 4000 + 266; +pub const SYS_utimes: ::c_long = 4000 + 267; +pub const SYS_mbind: ::c_long = 4000 + 268; +pub const SYS_get_mempolicy: ::c_long = 4000 + 269; +pub const SYS_set_mempolicy: ::c_long = 4000 + 270; +pub const SYS_mq_open: ::c_long = 4000 + 271; +pub const SYS_mq_unlink: ::c_long = 4000 + 272; +pub const SYS_mq_timedsend: ::c_long = 4000 + 273; +pub const SYS_mq_timedreceive: ::c_long = 4000 + 274; +pub const SYS_mq_notify: ::c_long = 4000 + 275; +pub const SYS_mq_getsetattr: ::c_long = 4000 + 276; +pub const SYS_vserver: ::c_long = 4000 + 277; +pub const SYS_waitid: ::c_long = 4000 + 278; +/* pub const SYS_sys_setaltroot: ::c_long = 4000 + 279; */ +pub const SYS_add_key: ::c_long = 4000 + 280; +pub const SYS_request_key: ::c_long = 4000 + 281; +pub const SYS_keyctl: ::c_long = 4000 + 282; +pub const SYS_set_thread_area: ::c_long = 4000 + 283; +pub const SYS_inotify_init: ::c_long = 4000 + 284; +pub const SYS_inotify_add_watch: ::c_long = 4000 + 285; +pub const SYS_inotify_rm_watch: ::c_long = 4000 + 286; +pub const SYS_migrate_pages: ::c_long = 4000 + 287; +pub const SYS_openat: ::c_long = 4000 + 288; +pub const SYS_mkdirat: ::c_long = 4000 + 289; +pub const SYS_mknodat: ::c_long = 4000 + 290; +pub const SYS_fchownat: ::c_long = 4000 + 291; +pub const SYS_futimesat: ::c_long = 4000 + 292; +pub const SYS_fstatat64: ::c_long = 4000 + 293; +pub const SYS_unlinkat: ::c_long = 4000 + 294; +pub const SYS_renameat: ::c_long = 4000 + 295; +pub const SYS_linkat: ::c_long = 4000 + 296; +pub const SYS_symlinkat: ::c_long = 4000 + 297; +pub const SYS_readlinkat: ::c_long = 4000 + 298; +pub const SYS_fchmodat: ::c_long = 4000 + 299; +pub const SYS_faccessat: ::c_long = 4000 + 300; +pub const SYS_pselect6: ::c_long = 4000 + 301; +pub const SYS_ppoll: ::c_long = 4000 + 302; +pub const SYS_unshare: ::c_long = 4000 + 303; +pub const SYS_splice: ::c_long = 4000 + 304; +pub const SYS_sync_file_range: ::c_long = 4000 + 305; +pub const SYS_tee: ::c_long = 4000 + 306; +pub const SYS_vmsplice: ::c_long = 4000 + 307; +pub const SYS_move_pages: ::c_long = 4000 + 308; +pub const SYS_set_robust_list: ::c_long = 4000 + 309; +pub const SYS_get_robust_list: ::c_long = 4000 + 310; +pub const SYS_kexec_load: ::c_long = 4000 + 311; +pub const SYS_getcpu: ::c_long = 4000 + 312; +pub const SYS_epoll_pwait: ::c_long = 4000 + 313; +pub const SYS_ioprio_set: ::c_long = 4000 + 314; +pub const SYS_ioprio_get: ::c_long = 4000 + 315; +pub const SYS_utimensat: ::c_long = 4000 + 316; +pub const SYS_signalfd: ::c_long = 4000 + 317; +pub const SYS_timerfd: ::c_long = 4000 + 318; +pub const SYS_eventfd: ::c_long = 4000 + 319; +pub const SYS_fallocate: ::c_long = 4000 + 320; +pub const SYS_timerfd_create: ::c_long = 4000 + 321; +pub const SYS_timerfd_gettime: ::c_long = 4000 + 322; +pub const SYS_timerfd_settime: ::c_long = 4000 + 323; +pub const SYS_signalfd4: ::c_long = 4000 + 324; +pub const SYS_eventfd2: ::c_long = 4000 + 325; +pub const SYS_epoll_create1: ::c_long = 4000 + 326; +pub const SYS_dup3: ::c_long = 4000 + 327; +pub const SYS_pipe2: ::c_long = 4000 + 328; +pub const SYS_inotify_init1: ::c_long = 4000 + 329; +pub const SYS_preadv: ::c_long = 4000 + 330; +pub const SYS_pwritev: ::c_long = 4000 + 331; +pub const SYS_rt_tgsigqueueinfo: ::c_long = 4000 + 332; +pub const SYS_perf_event_open: ::c_long = 4000 + 333; +pub const SYS_accept4: ::c_long = 4000 + 334; +pub const SYS_recvmmsg: ::c_long = 4000 + 335; +pub const SYS_fanotify_init: ::c_long = 4000 + 336; +pub const SYS_fanotify_mark: ::c_long = 4000 + 337; +pub const SYS_prlimit64: ::c_long = 4000 + 338; +pub const SYS_name_to_handle_at: ::c_long = 4000 + 339; +pub const SYS_open_by_handle_at: ::c_long = 4000 + 340; +pub const SYS_clock_adjtime: ::c_long = 4000 + 341; +pub const SYS_syncfs: ::c_long = 4000 + 342; +pub const SYS_sendmmsg: ::c_long = 4000 + 343; +pub const SYS_setns: ::c_long = 4000 + 344; +pub const SYS_process_vm_readv: ::c_long = 4000 + 345; +pub const SYS_process_vm_writev: ::c_long = 4000 + 346; +pub const SYS_kcmp: ::c_long = 4000 + 347; +pub const SYS_finit_module: ::c_long = 4000 + 348; +pub const SYS_sched_setattr: ::c_long = 4000 + 349; +pub const SYS_sched_getattr: ::c_long = 4000 + 350; +pub const SYS_renameat2: ::c_long = 4000 + 351; +pub const SYS_seccomp: ::c_long = 4000 + 352; +pub const SYS_getrandom: ::c_long = 4000 + 353; +pub const SYS_memfd_create: ::c_long = 4000 + 354; +pub const SYS_bpf: ::c_long = 4000 + 355; +pub const SYS_execveat: ::c_long = 4000 + 356; +pub const SYS_userfaultfd: ::c_long = 4000 + 357; +pub const SYS_membarrier: ::c_long = 4000 + 358; +pub const SYS_mlock2: ::c_long = 4000 + 359; +pub const SYS_copy_file_range: ::c_long = 4000 + 360; +pub const SYS_preadv2: ::c_long = 4000 + 361; +pub const SYS_pwritev2: ::c_long = 4000 + 362; +pub const SYS_pkey_mprotect: ::c_long = 4000 + 363; +pub const SYS_pkey_alloc: ::c_long = 4000 + 364; +pub const SYS_pkey_free: ::c_long = 4000 + 365; #[link(name = "util")] extern { diff --git a/third_party/rust/libc/src/unix/uclibc/mips/mips64.rs b/third_party/rust/libc/src/unix/uclibc/mips/mips64.rs index 79bac1fa8a47..e35938b1fc8d 100644 --- a/third_party/rust/libc/src/unix/uclibc/mips/mips64.rs +++ b/third_party/rust/libc/src/unix/uclibc/mips/mips64.rs @@ -188,8 +188,13 @@ s! { } // FIXME this is actually a union + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } } diff --git a/third_party/rust/libc/src/unix/uclibc/mips/mod.rs b/third_party/rust/libc/src/unix/uclibc/mips/mod.rs index cc3ddf254d73..d197249d0579 100644 --- a/third_party/rust/libc/src/unix/uclibc/mips/mod.rs +++ b/third_party/rust/libc/src/unix/uclibc/mips/mod.rs @@ -1,3 +1,5 @@ +pub type pthread_t = ::c_ulong; + pub const SFD_CLOEXEC: ::c_int = 0x080000; pub const NCCS: usize = 32; diff --git a/third_party/rust/libc/src/unix/uclibc/mod.rs b/third_party/rust/libc/src/unix/uclibc/mod.rs index 3f3a2bab8021..e3606c226640 100644 --- a/third_party/rust/libc/src/unix/uclibc/mod.rs +++ b/third_party/rust/libc/src/unix/uclibc/mod.rs @@ -11,7 +11,6 @@ pub type id_t = ::c_uint; pub type useconds_t = u32; pub type dev_t = u64; pub type socklen_t = u32; -pub type pthread_t = ::c_ulong; pub type mode_t = u32; pub type ino64_t = u64; pub type off64_t = i64; @@ -225,34 +224,80 @@ s! { pub ifa_data: *mut ::c_void } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_mutex_t { - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_rwlock_t { - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], } + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))), + repr(align(8)))] pub struct pthread_mutexattr_t { - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + #[cfg(all(not(feature = "align"), + any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", target_arch = "sparc64")))] + __align: [::c_int; 0], + #[cfg(all(not(feature = "align"), + not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } @@ -262,12 +307,16 @@ s! { __pshared: ::c_int, } + #[cfg_attr(feature = "align", repr(align(8)))] pub struct pthread_cond_t { + #[cfg(not(feature = "align"))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } @@ -1117,18 +1166,17 @@ pub const RTLD_NOW: ::c_int = 0x2; pub const TCP_MD5SIG: ::c_int = 14; -pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_MUTEX_T], -}; -pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_COND_T], -}; -pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { - __align: [], - size: [0; __SIZEOF_PTHREAD_RWLOCK_T], -}; +align_const! { + pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + size: [0; __SIZEOF_PTHREAD_MUTEX_T], + }; + pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + size: [0; __SIZEOF_PTHREAD_COND_T], + }; + pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = pthread_rwlock_t { + size: [0; __SIZEOF_PTHREAD_RWLOCK_T], + }; +} pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; @@ -1329,6 +1377,9 @@ pub const PR_GET_TID_ADDRESS: ::c_int = 40; pub const PR_SET_THP_DISABLE: ::c_int = 41; pub const PR_GET_THP_DISABLE: ::c_int = 42; +pub const GRND_NONBLOCK: ::c_uint = 0x0001; +pub const GRND_RANDOM: ::c_uint = 0x0002; + pub const ABDAY_1: ::nl_item = 0x300; pub const ABDAY_2: ::nl_item = 0x301; pub const ABDAY_3: ::nl_item = 0x302; @@ -1610,8 +1661,6 @@ extern { pub fn truncate64(path: *const c_char, length: off64_t) -> ::c_int; pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int; - pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; - pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn ppoll(fds: *mut ::pollfd, @@ -1635,6 +1684,8 @@ extern { pub fn unshare(flags: ::c_int) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; + pub fn sem_getvalue(sem: *mut sem_t, + sval: *mut ::c_int) -> ::c_int; pub fn accept4(fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t, flg: ::c_int) -> ::c_int; pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, diff --git a/third_party/rust/libc/src/unix/uclibc/x86_64/l4re.rs b/third_party/rust/libc/src/unix/uclibc/x86_64/l4re.rs index f047a82e004e..4f5811d17dfe 100644 --- a/third_party/rust/libc/src/unix/uclibc/x86_64/l4re.rs +++ b/third_party/rust/libc/src/unix/uclibc/x86_64/l4re.rs @@ -4,6 +4,7 @@ /// libc and hence we should provide them here. pub type l4_umword_t = ::c_ulong; // Unsigned machine word. +pub type pthread_t = *mut ::c_void; s! { /// CPU sets. diff --git a/third_party/rust/libc/src/unix/uclibc/x86_64/mod.rs b/third_party/rust/libc/src/unix/uclibc/x86_64/mod.rs index 7d082589d04a..bc6571aff98c 100644 --- a/third_party/rust/libc/src/unix/uclibc/x86_64/mod.rs +++ b/third_party/rust/libc/src/unix/uclibc/x86_64/mod.rs @@ -133,6 +133,7 @@ s! { // // pub struct in6_addr { // pub s6_addr: [u8; 16], +// #[cfg(not(feature = "align"))] // __align: [u32; 0], // } @@ -204,51 +205,106 @@ s! { pub c_cc: [::cc_t; ::NCCS], } + #[cfg_attr(all(feature = "align", target_pointer_width = "32"), + repr(align(4)))] + #[cfg_attr(all(feature = "align", target_pointer_width = "64"), + repr(align(8)))] pub struct sem_t { // ToDo #[cfg(target_pointer_width = "32")] __size: [::c_char; 16], #[cfg(target_pointer_width = "64")] __size: [::c_char; 32], + #[cfg(not(feature = "align"))] __align: [::c_long; 0], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_mutex_t { // ToDo - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + not(any(target_pointer_width = "32", + target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))), + repr(align(8)))] pub struct pthread_mutexattr_t { // ToDo - #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64", - target_arch = "mips64", target_arch = "s390x", - target_arch = "sparc64"))] - __align: [::c_int; 0], - #[cfg(not(any(target_arch = "x86_64", target_arch = "powerpc64", + #[cfg(all(not(feature = "align"), + any(target_arch = "x86_64", target_arch = "powerpc64", target_arch = "mips64", target_arch = "s390x", target_arch = "sparc64")))] + __align: [::c_int; 0], + #[cfg(all(not(feature = "align"), + not(any(target_arch = "x86_64", target_arch = "powerpc64", + target_arch = "mips64", target_arch = "s390x", + target_arch = "sparc64"))))] __align: [::c_long; 0], size: [u8; __SIZEOF_PTHREAD_MUTEXATTR_T], } + #[cfg_attr(feature = "align", repr(align(8)))] pub struct pthread_cond_t { // ToDo + #[cfg(not(feature = "align"))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_COND_T], } + #[cfg_attr(feature = "align", repr(align(4)))] pub struct pthread_condattr_t { // ToDo + #[cfg(not(feature = "align"))] __align: [::c_int; 0], size: [u8; __SIZEOF_PTHREAD_CONDATTR_T], } + #[cfg_attr(all(feature = "align", + target_pointer_width = "32", + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")), + repr(align(4)))] + #[cfg_attr(all(feature = "align", + any(target_pointer_width = "64", + not(any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))), + repr(align(8)))] pub struct pthread_rwlock_t { // ToDo - #[cfg(any(target_arch = "mips", target_arch = "arm", - target_arch = "powerpc"))] + #[cfg(all(not(feature = "align"), + any(target_arch = "mips", + target_arch = "arm", + target_arch = "powerpc")))] __align: [::c_long; 0], - #[cfg(not(any(target_arch = "mips", target_arch = "arm", + #[cfg(not(any(feature = "align", + target_arch = "mips", + target_arch = "arm", target_arch = "powerpc")))] __align: [::c_longlong; 0], size: [u8; __SIZEOF_PTHREAD_RWLOCK_T], @@ -352,6 +408,9 @@ cfg_if! { if #[cfg(target_os = "l4re")] { mod l4re; pub use self::l4re::*; - } else { } + } else { + mod other; + pub use other::*; + } } diff --git a/third_party/rust/libc/src/unix/uclibc/x86_64/other.rs b/third_party/rust/libc/src/unix/uclibc/x86_64/other.rs new file mode 100644 index 000000000000..1cc521df992a --- /dev/null +++ b/third_party/rust/libc/src/unix/uclibc/x86_64/other.rs @@ -0,0 +1,4 @@ +// Thestyle checker discourages the use of #[cfg], so this has to go into a +// separate module +pub type pthread_t = ::c_ulong; + diff --git a/third_party/rust/mime/.cargo-checksum.json b/third_party/rust/mime/.cargo-checksum.json deleted file mode 100644 index 919e1cb2b13d..000000000000 --- a/third_party/rust/mime/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"713aa1cac091a99e295194ca0dfd6d74028edbb02a0f5f627f3d3dfdf6437ee0","Cargo.toml":"c2757b0a3b45e977f1f872603ad3cee9a029ffb9a37af5eb639a7c16c96fa3d9","LICENSE":"df9cfd06d8a44d9a671eadd39ffd97f166481da015a30f45dfd27886209c5922","README.md":"706ee5777dbfea1255359f2dd3f905f7bb89282a528c9c14e38751e5e874fd5e","src/lib.rs":"0f534df49767cdf07ee2f59db9b8aeb8ca9f552531504eef0b1bbe55f6fbd7f7"},"package":"ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0"} \ No newline at end of file diff --git a/third_party/rust/mime/.travis.yml b/third_party/rust/mime/.travis.yml deleted file mode 100644 index 87326907bb8b..000000000000 --- a/third_party/rust/mime/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: rust -rust: beta - diff --git a/third_party/rust/mime/README.md b/third_party/rust/mime/README.md deleted file mode 100644 index 0e6d119a3705..000000000000 --- a/third_party/rust/mime/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# mime.rs - -[![Build Status](https://travis-ci.org/hyperium/mime.rs.svg?branch=master)](https://travis-ci.org/hyperium/mime.rs) - -Support MIME (Media Types) as strong types in Rust. - -[Documentation](http://hyperium.github.io/mime.rs) - -## License - -[MIT](./LICENSE) diff --git a/third_party/rust/mime/src/lib.rs b/third_party/rust/mime/src/lib.rs deleted file mode 100644 index 7eabbd9bc64b..000000000000 --- a/third_party/rust/mime/src/lib.rs +++ /dev/null @@ -1,655 +0,0 @@ -//! # Mime -//! -//! Mime is now Media Type, technically, but `Mime` is more immediately -//! understandable, so the main type here is `Mime`. -//! -//! ## What is Mime? -//! -//! Example mime string: `text/plain;charset=utf-8` -//! -//! ```rust -//! # #[macro_use] extern crate mime; -//! # fn main() { -//! let plain_text: mime::Mime = "text/plain;charset=utf-8".parse().unwrap(); -//! assert_eq!(plain_text, mime!(Text/Plain; Charset=Utf8)); -//! # } -//! ``` - -#![doc(html_root_url = "https://hyperium.github.io/mime.rs")] -#![cfg_attr(test, deny(warnings))] -#![cfg_attr(all(feature = "nightly", test), feature(test))] - -#[macro_use] -extern crate log; - -#[cfg(feature = "nightly")] -#[cfg(test)] -extern crate test; - -#[cfg(feature = "serde")] -extern crate serde; - -#[cfg(feature = "serde")] -#[cfg(test)] -extern crate serde_json; - -#[cfg(feature = "heapsize")] -extern crate heapsize; - -use std::ascii::AsciiExt; -use std::fmt; -use std::iter::Enumerate; -use std::str::{FromStr, Chars}; - -/// Mime, or Media Type. Encapsulates common registers types. -/// -/// Consider that a traditional mime type contains a "top level type", -/// a "sub level type", and 0-N "parameters". And they're all strings. -/// Strings everywhere. Strings mean typos. Rust has type safety. We should -/// use types! -/// -/// So, Mime bundles together this data into types so the compiler can catch -/// your typos. -/// -/// This improves things so you use match without Strings: -/// -/// ```rust -/// use mime::{Mime, TopLevel, SubLevel}; -/// -/// let mime: Mime = "application/json".parse().unwrap(); -/// -/// match mime { -/// Mime(TopLevel::Application, SubLevel::Json, _) => println!("matched json!"), -/// _ => () -/// } -/// ``` -#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] -pub struct Mime = Vec>(pub TopLevel, pub SubLevel, pub T); - -#[cfg(feature = "heapsize")] -impl + heapsize::HeapSizeOf> heapsize::HeapSizeOf for Mime { - fn heap_size_of_children(&self) -> usize { - self.0.heap_size_of_children() + - self.1.heap_size_of_children() + - self.2.heap_size_of_children() - } -} - -impl, RHS: AsRef<[Param]>> PartialEq> for Mime { - #[inline] - fn eq(&self, other: &Mime) -> bool { - self.0 == other.0 && self.1 == other.1 && self.2.as_ref() == other.2.as_ref() - } -} - -/// Easily create a Mime without having to import so many enums. -/// -/// # Example -/// -/// ``` -/// # #[macro_use] extern crate mime; -/// -/// # fn main() { -/// let json = mime!(Application/Json); -/// let plain = mime!(Text/Plain; Charset=Utf8); -/// let text = mime!(Text/Html; Charset=("bar"), ("baz")=("quux")); -/// let img = mime!(Image/_); -/// # } -/// ``` -#[macro_export] -macro_rules! mime { - ($top:tt / $sub:tt) => ( - mime!($top / $sub;) - ); - - ($top:tt / $sub:tt ; $($attr:tt = $val:tt),*) => ( - $crate::Mime( - __mime__ident_or_ext!(TopLevel::$top), - __mime__ident_or_ext!(SubLevel::$sub), - vec![ $((__mime__ident_or_ext!(Attr::$attr), __mime__ident_or_ext!(Value::$val))),* ] - ) - ); -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __mime__ident_or_ext { - ($enoom:ident::_) => ( - $crate::$enoom::Star - ); - ($enoom:ident::($inner:expr)) => ( - $crate::$enoom::Ext($inner.to_string()) - ); - ($enoom:ident::$var:ident) => ( - $crate::$enoom::$var - ) -} - -macro_rules! enoom { - (pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => ( - - #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] - pub enum $en { - $($ty),*, - $ext(String) - } - - impl $en { - pub fn as_str(&self) -> &str { - match *self { - $($en::$ty => $text),*, - $en::$ext(ref s) => &s - } - } - } - - impl ::std::ops::Deref for $en { - type Target = str; - fn deref(&self) -> &str { - self.as_str() - } - } - - impl PartialEq for $en { - #[inline] - fn eq(&self, other: &$en) -> bool { - match (self, other) { - $( (&$en::$ty, &$en::$ty) => true ),*, - (&$en::$ext(ref a), &$en::$ext(ref b)) => a == b, - (_, _) => self.as_str() == other.as_str(), - } - } - } - - impl PartialEq for $en { - fn eq(&self, other: &String) -> bool { - self.as_str() == other - } - } - - impl PartialEq for $en { - fn eq(&self, other: &str) -> bool { - self.as_str() == other - } - } - - impl<'a> PartialEq<&'a str> for $en { - fn eq(&self, other: &&'a str) -> bool { - self.as_str() == *other - } - } - - impl PartialEq<$en> for String { - fn eq(&self, other: &$en) -> bool { - self == other.as_str() - } - } - - impl PartialEq<$en> for str { - fn eq(&self, other: &$en) -> bool { - self == other.as_str() - } - } - - impl<'a> PartialEq<$en> for &'a str { - fn eq(&self, other: &$en) -> bool { - *self == other.as_str() - } - } - - impl fmt::Display for $en { - #[inline] - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(match *self { - $($en::$ty => $text),*, - $en::$ext(ref s) => s - }) - } - } - - impl FromStr for $en { - type Err = (); - fn from_str(s: &str) -> Result<$en, ()> { - Ok(match s { - $(_s if _s == $text => $en::$ty),*, - s => $en::$ext(s.to_string()) - }) - } - } - - #[cfg(feature = "heapsize")] - impl heapsize::HeapSizeOf for $en { - fn heap_size_of_children(&self) -> usize { - match *self { - $en::$ext(ref ext) => ext.heap_size_of_children(), - _ => 0, - } - } - } - ) -} - -enoom! { - pub enum TopLevel; - Ext; - Star, "*"; - Text, "text"; - Image, "image"; - Audio, "audio"; - Video, "video"; - Application, "application"; - Multipart, "multipart"; - Message, "message"; - Model, "model"; -} - -enoom! { - pub enum SubLevel; - Ext; - Star, "*"; - - // common text/* - Plain, "plain"; - Html, "html"; - Xml, "xml"; - Javascript, "javascript"; - Css, "css"; - EventStream, "event-stream"; - - // common application/* - Json, "json"; - WwwFormUrlEncoded, "x-www-form-urlencoded"; - Msgpack, "msgpack"; - OctetStream, "octet-stream"; - - // multipart/* - FormData, "form-data"; - - // common image/* - Png, "png"; - Gif, "gif"; - Bmp, "bmp"; - Jpeg, "jpeg"; - - // audio/* - Mpeg, "mpeg"; - Mp4, "mp4"; - Ogg, "ogg"; -} - -enoom! { - pub enum Attr; - Ext; - Charset, "charset"; - Boundary, "boundary"; - Q, "q"; -} - -enoom! { - pub enum Value; - Ext; - Utf8, "utf-8"; -} - -pub type Param = (Attr, Value); - -impl> fmt::Display for Mime { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // It's much faster to write a single string, as opposed to push - // several parts through f.write_str(). So, check for the most common - // mime types, and fast track them. - if let TopLevel::Text = self.0 { - if let SubLevel::Plain = self.1 { - let attrs = self.2.as_ref(); - if attrs.len() == 0 { - return f.write_str("text/plain"); - } else if &[(Attr::Charset, Value::Utf8)] == attrs { - return f.write_str("text/plain; charset=utf-8"); - } - } - } else if let TopLevel::Application = self.0 { - if let SubLevel::Json = self.1 { - let attrs = self.2.as_ref(); - if attrs.len() == 0 { - return f.write_str("application/json"); - } - } - } else if let TopLevel::Star = self.0 { - if let SubLevel::Star = self.1 { - let attrs = self.2.as_ref(); - if attrs.len() == 0 { - return f.write_str("*/*"); - } - } - } - - // slower general purpose fmt - try!(fmt::Display::fmt(&self.0, f)); - try!(f.write_str("/")); - try!(fmt::Display::fmt(&self.1, f)); - for param in self.2.as_ref() { - try!(f.write_str("; ")); - try!(fmt::Display::fmt(¶m.0, f)); - try!(f.write_str("=")); - try!(fmt::Display::fmt(¶m.1, f)); - } - Ok(()) - } -} - -impl> Mime

    { - pub fn get_param>(&self, attr: A) -> Option<&Value> { - self.2.as_ref().iter().find(|&&(ref name, _)| attr == *name).map(|&(_, ref value)| value) - } -} - -impl FromStr for Mime { - type Err = (); - fn from_str(raw: &str) -> Result { - if raw == "*/*" { - return Ok(mime!(Star/Star)); - } - - let ascii = raw.to_ascii_lowercase(); // lifetimes :( - let len = ascii.len(); - let mut iter = ascii.chars().enumerate(); - let mut params = vec![]; - // toplevel - let mut start; - let top; - loop { - match iter.next() { - Some((0, c)) if is_restricted_name_first_char(c) => (), - Some((i, c)) if i > 0 && is_restricted_name_char(c) => (), - Some((i, '/')) if i > 0 => match FromStr::from_str(&ascii[..i]) { - Ok(t) => { - top = t; - start = i + 1; - break; - } - Err(_) => return Err(()) - }, - _ => return Err(()) // EOF and no toplevel is no Mime - }; - - } - - // sublevel - let sub; - let mut sub_star = false; - loop { - match iter.next() { - Some((i, '*')) if i == start => { - sub_star = true; - }, - Some((i, c)) if i == start && is_restricted_name_first_char(c) => (), - Some((i, c)) if !sub_star && i > start && is_restricted_name_char(c) => (), - Some((i, ';')) if i > start => match FromStr::from_str(&ascii[start..i]) { - Ok(s) => { - sub = s; - start = i + 1; - break; - } - Err(_) => return Err(()) - }, - None => match FromStr::from_str(&ascii[start..]) { - Ok(s) => return Ok(Mime(top, s, params)), - Err(_) => return Err(()) - }, - _ => return Err(()) - }; - } - - // params - debug!("starting params, len={}", len); - loop { - match param_from_str(raw, &ascii, &mut iter, start) { - Some((p, end)) => { - params.push(p); - start = end; - if start >= len { - break; - } - } - None => break - } - } - - Ok(Mime(top, sub, params)) - } -} - -#[cfg(feature = "serde")] -impl serde::ser::Serialize for Mime { - fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> - where S: serde::ser::Serializer - { - serializer.serialize_str(&*format!("{}",self)) - } -} - -#[cfg(feature = "serde")] -impl serde::de::Deserialize for Mime { - fn deserialize(deserializer: &mut D) -> Result - where D: serde::de::Deserializer - { - let string: String = try!(serde::Deserialize::deserialize(deserializer)); - let mime: Mime = match FromStr::from_str(&*string) { - Ok(mime) => mime, - Err(_) => return Err(serde::de::Error::custom("Invalid serialized mime")), - }; - Ok(mime) - } -} - -fn param_from_str(raw: &str, ascii: &str, iter: &mut Enumerate, mut start: usize) -> Option<(Param, usize)> { - let attr; - debug!("param_from_str, start={}", start); - loop { - match iter.next() { - Some((i, ' ')) if i == start => start = i + 1, - Some((i, c)) if i == start && is_restricted_name_first_char(c) => (), - Some((i, c)) if i > start && is_restricted_name_char(c) => (), - Some((i, '=')) if i > start => match FromStr::from_str(&ascii[start..i]) { - Ok(a) => { - attr = a; - start = i + 1; - break; - }, - Err(_) => return None - }, - _ => return None - } - } - - let value; - // values must be restrict-name-char or "anything goes" - let mut is_quoted = false; - - { - let substr = |a,b| { if attr==Attr::Charset { &ascii[a..b] } else { &raw[a..b] } }; - let endstr = |a| { if attr==Attr::Charset { &ascii[a..] } else { &raw[a..] } }; - loop { - match iter.next() { - Some((i, '"')) if i == start => { - debug!("quoted"); - is_quoted = true; - start = i + 1; - }, - Some((i, c)) if i == start && is_restricted_name_first_char(c) => (), - Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(substr(start,i)) { - Ok(v) => { - value = v; - start = i + 1; - break; - }, - Err(_) => return None - }, - Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (), - Some((i, ';')) if i > start => match FromStr::from_str(substr(start,i)) { - Ok(v) => { - value = v; - start = i + 1; - break; - }, - Err(_) => return None - }, - None => match FromStr::from_str(endstr(start)) { - Ok(v) => { - value = v; - start = raw.len(); - break; - }, - Err(_) => return None - }, - - _ => return None - } - } - } - - Some(((attr, value), start)) -} - -// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2): -// -// > All registered media types MUST be assigned top-level type and -// > subtype names. The combination of these names serves to uniquely -// > identify the media type, and the subtype name facet (or the absence -// > of one) identifies the registration tree. Both top-level type and -// > subtype names are case-insensitive. -// > -// > Type and subtype names MUST conform to the following ABNF: -// > -// > type-name = restricted-name -// > subtype-name = restricted-name -// > -// > restricted-name = restricted-name-first *126restricted-name-chars -// > restricted-name-first = ALPHA / DIGIT -// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" / -// > "$" / "&" / "-" / "^" / "_" -// > restricted-name-chars =/ "." ; Characters before first dot always -// > ; specify a facet name -// > restricted-name-chars =/ "+" ; Characters after last plus always -// > ; specify a structured syntax suffix -// -fn is_restricted_name_first_char(c: char) -> bool { - match c { - 'a'...'z' | - '0'...'9' => true, - _ => false - } -} - -fn is_restricted_name_char(c: char) -> bool { - if is_restricted_name_first_char(c) { - true - } else { - match c { - '!' | - '#' | - '$' | - '&' | - '-' | - '^' | - '.' | - '+' | - '_' => true, - _ => false - } - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - #[cfg(feature = "nightly")] - use test::Bencher; - use super::{Mime, Value, Attr}; - - #[test] - fn test_mime_show() { - let mime = mime!(Text/Plain); - assert_eq!(mime.to_string(), "text/plain".to_string()); - let mime = mime!(Text/Plain; Charset=Utf8); - assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string()); - } - - #[test] - fn test_mime_from_str() { - assert_eq!(Mime::from_str("text/plain").unwrap(), mime!(Text/Plain)); - assert_eq!(Mime::from_str("TEXT/PLAIN").unwrap(), mime!(Text/Plain)); - assert_eq!(Mime::from_str("text/plain; charset=utf-8").unwrap(), mime!(Text/Plain; Charset=Utf8)); - assert_eq!(Mime::from_str("text/plain;charset=\"utf-8\"").unwrap(), mime!(Text/Plain; Charset=Utf8)); - assert_eq!(Mime::from_str("text/plain; charset=utf-8; foo=bar").unwrap(), - mime!(Text/Plain; Charset=Utf8, ("foo")=("bar"))); - assert_eq!("*/*".parse::().unwrap(), mime!(Star/Star)); - assert_eq!("image/*".parse::().unwrap(), mime!(Image/Star)); - assert_eq!("text/*; charset=utf-8".parse::().unwrap(), mime!(Text/Star; Charset=Utf8)); - assert!("*/png".parse::().is_err()); - assert!("*image/png".parse::().is_err()); - assert!("text/*plain".parse::().is_err()); - } - - #[test] - fn test_case_sensitive_values() { - assert_eq!(Mime::from_str("multipart/form-data; boundary=ABCDEFG").unwrap(), - mime!(Multipart/FormData; Boundary=("ABCDEFG"))); - assert_eq!(Mime::from_str("multipart/form-data; charset=BASE64; boundary=ABCDEFG").unwrap(), - mime!(Multipart/FormData; Charset=("base64"), Boundary=("ABCDEFG"))); - } - - #[test] - fn test_get_param() { - let mime = Mime::from_str("text/plain; charset=utf-8; foo=bar").unwrap(); - assert_eq!(mime.get_param(Attr::Charset), Some(&Value::Utf8)); - assert_eq!(mime.get_param("charset"), Some(&Value::Utf8)); - assert_eq!(mime.get_param("foo").unwrap(), "bar"); - assert_eq!(mime.get_param("baz"), None); - } - - #[test] - fn test_value_as_str() { - assert_eq!(Value::Utf8.as_str(), "utf-8"); - } - - #[test] - fn test_value_eq_str() { - assert_eq!(Value::Utf8, "utf-8"); - assert_eq!("utf-8", Value::Utf8); - } - - #[cfg(feature = "serde")] - #[test] - fn test_serialize_deserialize() { - use serde_json; - - let mime = Mime::from_str("text/plain; charset=utf-8; foo=bar").unwrap(); - let serialized = serde_json::to_string(&mime).unwrap(); - let deserialized: Mime = serde_json::from_str(&serialized).unwrap(); - assert_eq!(mime, deserialized); - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_fmt(b: &mut Bencher) { - use std::fmt::Write; - let mime = mime!(Text/Plain; Charset=Utf8); - b.bytes = mime.to_string().as_bytes().len() as u64; - let mut s = String::with_capacity(64); - b.iter(|| { - let _ = write!(s, "{}", mime); - ::test::black_box(&s); - unsafe { s.as_mut_vec().set_len(0); } - }) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_from_str(b: &mut Bencher) { - let s = "text/plain; charset=utf-8; foo=bar"; - b.bytes = s.as_bytes().len() as u64; - b.iter(|| s.parse::()) - } -} diff --git a/third_party/rust/mio/.cargo-checksum.json b/third_party/rust/mio/.cargo-checksum.json index cf1982efc921..47aa1fd3988b 100644 --- a/third_party/rust/mio/.cargo-checksum.json +++ b/third_party/rust/mio/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"859b996b1d16ad6025c86d6d723f9db687dbe69fc15f85bf5a5dbeb1cdf89145","Cargo.toml":"1e24366fa1469647457574af270b42bfc3eccf92f92c04c81f8cf54682fceadf","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"18c1685d784a5ce3a02f9019b1e9f85035e8061bd78fdbeded57f53cc5860171","appveyor.yml":"d270d3d60d368e4dc62bb87c17fc407b524d441fdf69eff27090a6043e50d342","benches/bench_poll.rs":"ab1e38ad309d58f4c1924fdef1868fd6b899536959f08d2f33247f057c8962d0","ci/docker/aarch64-linux-android/Dockerfile":"350e18f37e45f8332e802d32e7c05c824fdaf441174c8bc5fb5602865f85b516","ci/docker/aarch64-linux-android/accept-licenses.sh":"b425a5561694c3bf065ef10a00f904c2536e63d6b11782e35a567f2808118ef2","ci/docker/aarch64-linux-android/cargo_config":"ef57da8986b41bbca9ca77d2dbceb857185d341dbb3d97a3f95cc680467e4985","ci/docker/aarch64-linux-android/install-ndk.sh":"e9e58a151ba4f71e7c23fca256bebd9b8a5ed1b3161c6154a634870174f33484","ci/docker/aarch64-linux-android/install-sdk.sh":"e068d8f16bc6297613f3de7bfab72c803594f44edaab7e546d6d8f393389b72b","ci/docker/arm-linux-androideabi/Dockerfile":"c6f153172df91bc5c7d3a859f8970ce54e1cc5c92ab295c1c9cf91c81a49fd51","ci/docker/arm-linux-androideabi/accept-licenses.sh":"84ad00815f628005ed22c5d6cd14990ebc97812a7163bd275b2877904eddab53","ci/docker/arm-linux-androideabi/cargo_config":"ec54caa043c093c699cfb3a1cc3dc35651039b50e29fb416b43f3f4dbd778397","ci/docker/arm-linux-androideabi/install-ndk.sh":"eef063bb01a16c0f90471dbce1b5a395b53141d7704e15a3c9a1c4fc5e06d4b1","ci/docker/arm-linux-androideabi/install-sdk.sh":"42c04b17c4a35bef58757332e960a6e4aba1b5e41f8fc0182265163ff93f6182","ci/ios/deploy_and_run_on_ios_simulator.rs":"977ed5ee02864e85b5583d61549865c72e70c16e62d5b4d3ea1fe486609c193e","ci/run-docker.sh":"7f6c68dbca93788111170ac4678608957a179e76cfe8c5a51d11dfea1742d7f2","ci/run-ios.sh":"2a3069d5378a8e1fdae3aeb79fda490da5b0e5c0e6c775edf37d93986305463a","ci/run.sh":"f496e9d3506aee0c26e00407a2658ada83b736ca50f4b159af219e53c3fe3acf","ci/trust/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/trust/script.sh":"d5d581703524b4b770ad99d5beed4d68733e16497a50b4d0f416115526dae319","src/channel.rs":"e76baed95be4ab4af050ba731916d2a3a03057d145e68f053571e7b8ae277c28","src/deprecated/event_loop.rs":"8432e097e29a0237e6187dfaff2782f077d8c44da4fffcdb83c7991958fbd920","src/deprecated/handler.rs":"13cbc0c193f43a331e125e05d5eddf3712fe86e41a8721186d3672518ef8a9cc","src/deprecated/io.rs":"4948217ffeeba4f508cc89744da5d6af858b4ad7b4be23f927a00df93bdf2984","src/deprecated/mod.rs":"504d718e7c786c69619d27d5b6302ffca3f174b7ffba1bcd72b58e7bac8d83c6","src/deprecated/notify.rs":"8cb108387ebcfb75764e4dd2868d80eb00d793c4b7c867c08cd86ef10b91b023","src/deprecated/unix.rs":"771cf2f475f7655b8b0918d907f78acce6f729f4eb608f657f1af0e5d50ab641","src/event_imp.rs":"374ddc5ccf439bf3c8c02b3796af5bc39bbb4ee8156534752cbe63743d659802","src/io.rs":"26d42aee058dc7a94e1b6fc6f29b34d6f70029fcbf6809574b5758b0c1afd905","src/lib.rs":"979fc24e9013a2672d75b097781ac699f5f88e06455cbec65f8ef1d088e358fa","src/net/mod.rs":"340c63a8efe9ee774b7bf8ed8c0f72fc7563e5c4b35f6a8b243f92d366e145a2","src/net/tcp.rs":"73b8e98f9e474fadf8193a2b3b9cb50743d97d8e27749c063918908d28012d54","src/net/udp.rs":"2d9e4a7eb3c30e36c3ca0b38d572c0eb9006c7602a339e72c19de823f5452a95","src/poll.rs":"08129340bd814e1d2b7fad64641d0dd2a2b6862640309ae68f489a4471b046ec","src/sys/mod.rs":"e6d068be2ed80e56d768aac8ae59886db0f2c8e53f1319b03b02ff35aa031ba7","src/sys/unix/awakener.rs":"7efbec6de6562cc3dcd641f224d466403e1435334e5ad2a82c967ed5a9c30345","src/sys/unix/dlsym.rs":"559337d1f6c10e6c1172bd3908a9dcfa5a0828b53b04f7ca3a0d926afa85cd63","src/sys/unix/epoll.rs":"558cd0dd23afd43a655a778b8b7e876a2372d05a9a132b8a6547c3091c7c00c6","src/sys/unix/eventedfd.rs":"c12f916ed98930ccacb1c15d0d3ea49eb85f872e4127e1bddabb3875ac16ab38","src/sys/unix/io.rs":"5e3c48ef5cd8e3aa2bd6831eea5f9578e82a84c092add7b042675c1c0f01cad1","src/sys/unix/kqueue.rs":"567399be003ac7007e0f068856ebb402382f52c7ffef5b5044edd0290f890a80","src/sys/unix/mod.rs":"dc8bece30046f5cdb25ace072a836b6b3a085eb91c64017b3242c368aa221ef1","src/sys/unix/ready.rs":"152561f335330c8cdce65f9bcc1018e76def6180e45b26e76fa71c41625067bf","src/sys/unix/tcp.rs":"08d2617e3bb172b258d9ee56f46304fa5abd6937f9fa29289a0d9af809a25750","src/sys/unix/udp.rs":"3bedb61c9ee9966f64a6a1ba715bf64e906fc5b118bf1903d7f8aeb2931cfccd","src/sys/unix/uds.rs":"5126490860d2c980e94abc9326dd67473411b20b380aadaeeabc450571e21427","src/sys/windows/awakener.rs":"ea7dcd0370a673421f58a2d804c8da9a7f159dca1a0af87e006ac0df3d3ffa74","src/sys/windows/buffer_pool.rs":"636f4b7510a507f9985987699ce8ec140b2ed852abb231443ee1486c80759eed","src/sys/windows/from_raw_arc.rs":"c2cee14a0355256beb55a1feb54ccdcc50c8ab2d9abb3b7f114be00ed8a5583f","src/sys/windows/mod.rs":"d3b2bf7dd9011e3930d2712aa990b9599bf4060572dc4296f123eb3908ad58ab","src/sys/windows/selector.rs":"b13c9bcba172887f6fef1dffd6e5511489329f7640dd0c9a4e34bb2973bda883","src/sys/windows/tcp.rs":"b0591fc3c89d163129e9c6dbfb1b04a19edc759036e1e66bac54416944ede785","src/sys/windows/udp.rs":"d4cfefee1091d5712cc133eec9c21aafdea9f9959f1384453d1b61c6ea5e8fb5","src/timer.rs":"13c386666720e3e85cfdd5807e098396958e94980e22a21a1b706e69093fab76","src/token.rs":"9fea166777a6fd70b902ee96e395da71f41156f82f923087e61a56b3d1af641e","src/udp.rs":"a02e64c8bb585e6b8637666260809bd9ed708bef0f84bab2d46ca8655e76a03b"},"package":"9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7"} \ No newline at end of file +{"files":{"CHANGELOG.md":"77d8144016a20f81879755c8b517e1f8d608e84d41d676adfb061aa4a8e6e88a","Cargo.toml":"129c2be1244092de4987999ff9524717da7a0a1e3c6d6de080a48088ec81c3c1","LICENSE":"07919255c7e04793d8ea760d6c2ce32d19f9ff02bdbdde3ce90b1e1880929a9b","README.md":"8095f9f50964ab6f4539d6b6ffb035c8e0601824920946b9f00037844f2fe161","appveyor.yml":"9cf83d26a0bbf81a8349c7f80ca805d2d57a9c5dff1ee7630db504c4aa759c48","benches/bench_poll.rs":"ab1e38ad309d58f4c1924fdef1868fd6b899536959f08d2f33247f057c8962d0","ci/docker/aarch64-linux-android/Dockerfile":"736b09aaeffc0f8b78b5a1026a9a9e28614d1203ec3767f93f739ce835ed3d6c","ci/docker/aarch64-linux-android/accept-licenses.sh":"b425a5561694c3bf065ef10a00f904c2536e63d6b11782e35a567f2808118ef2","ci/docker/aarch64-linux-android/cargo_config":"ef57da8986b41bbca9ca77d2dbceb857185d341dbb3d97a3f95cc680467e4985","ci/docker/aarch64-linux-android/install-ndk.sh":"e9e58a151ba4f71e7c23fca256bebd9b8a5ed1b3161c6154a634870174f33484","ci/docker/aarch64-linux-android/install-sdk.sh":"0a20433259127e6e3f5115b8bf0aaf3b10c9ae758d56ecb2a33006b614283770","ci/docker/arm-linux-androideabi/Dockerfile":"c6f153172df91bc5c7d3a859f8970ce54e1cc5c92ab295c1c9cf91c81a49fd51","ci/docker/arm-linux-androideabi/accept-licenses.sh":"84ad00815f628005ed22c5d6cd14990ebc97812a7163bd275b2877904eddab53","ci/docker/arm-linux-androideabi/cargo_config":"ec54caa043c093c699cfb3a1cc3dc35651039b50e29fb416b43f3f4dbd778397","ci/docker/arm-linux-androideabi/install-ndk.sh":"eef063bb01a16c0f90471dbce1b5a395b53141d7704e15a3c9a1c4fc5e06d4b1","ci/docker/arm-linux-androideabi/install-sdk.sh":"cb6b0155f0d146515a3ea4bdaa50b90936529f843814cba1665cf52cfda8668c","ci/ios/deploy_and_run_on_ios_simulator.rs":"977ed5ee02864e85b5583d61549865c72e70c16e62d5b4d3ea1fe486609c193e","ci/run-docker.sh":"7f6c68dbca93788111170ac4678608957a179e76cfe8c5a51d11dfea1742d7f2","ci/run-ios.sh":"39de59f369ba52a5ca4a343b496396d3797f7e776f615612401254fb385020b3","ci/run.sh":"f496e9d3506aee0c26e00407a2658ada83b736ca50f4b159af219e53c3fe3acf","ci/trust/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/trust/script.sh":"d5d581703524b4b770ad99d5beed4d68733e16497a50b4d0f416115526dae319","src/channel.rs":"92a0dcf00174fface8a50ddfee2f45189730f8c172d874aab49587bd160c4658","src/deprecated/event_loop.rs":"5fccdcba1b3d3012eb6778f8f487279f30f9b9076acb7d41512838b2ba02c5e7","src/deprecated/handler.rs":"13cbc0c193f43a331e125e05d5eddf3712fe86e41a8721186d3672518ef8a9cc","src/deprecated/io.rs":"4948217ffeeba4f508cc89744da5d6af858b4ad7b4be23f927a00df93bdf2984","src/deprecated/mod.rs":"4310471b5a1313dbf53b492769a3031b15353eb269333b7c1a890bc2709def7c","src/deprecated/notify.rs":"8cb108387ebcfb75764e4dd2868d80eb00d793c4b7c867c08cd86ef10b91b023","src/deprecated/unix.rs":"e63eab07061ff9a0798cf0ad05fe276e62c68e685fb188be5872c2921290beb1","src/event_imp.rs":"35e55783c630ecf6e75be30e35b838d57dab0defbd2963429c0bb3fdf96a5686","src/io.rs":"9207ffc93ea744b09bc6872fa4d378d7c75640f9ac38b1fa67b940c7cb5d0ade","src/lib.rs":"45ce57ba07598a256d9c0af03751420ed6f3aa294e33e262a39e13cbde763368","src/net/mod.rs":"340c63a8efe9ee774b7bf8ed8c0f72fc7563e5c4b35f6a8b243f92d366e145a2","src/net/tcp.rs":"034010d098d1fb2e5385fa6fe40e012ee4876338f6e970fb00282472fff44963","src/net/udp.rs":"4400029c4544f9d6c58c5e7fd4699a70e1a6822f4ef4690d33b0573cfb66e17c","src/poll.rs":"55f2c0f685e8f1c4a5fd47f047ddd9a0e47da8bd9dbc8bd40d56781d92ce5c73","src/sys/fuchsia/awakener.rs":"71a4a0083242457b0a96326c69c0f98b23dfdb97be96deb26ee02fa9d1dfb212","src/sys/fuchsia/eventedfd.rs":"bd8f43d2b61cdd6a5d0df9c0dc1cb43e1708140d01a05611055277ed55a33b63","src/sys/fuchsia/handles.rs":"161a69e8a8d7f71326a9c53bcb7685d0a81d184aba8e20da27c64aa27dfd56b2","src/sys/fuchsia/mod.rs":"9d80f1214abc93f48b6b6c12ce5b6cfaddbda592c8f3410564d0237228cae2d0","src/sys/fuchsia/net.rs":"50340191dd9cbe317bd6e6ae0170c03daa9141f15c96782b96484e3d8b8301b1","src/sys/fuchsia/ready.rs":"7e6bb7235c52ab4a2134d36cf982f6a4fd6e18050e737b40ee84c89a10a9faac","src/sys/fuchsia/selector.rs":"f3be7f8d683d43e4a8313246e4cacb9444549bf66ecb2234f0d0f53172043bf5","src/sys/mod.rs":"64bea046e4a9feb1f2e2fb8326452c1be8b9d56cf8794df0af4fbdf565204255","src/sys/unix/awakener.rs":"20a61d8f39b2f2abf4f166a3ed46fa0d79907ddf92758eaddb880c67321db56c","src/sys/unix/dlsym.rs":"559337d1f6c10e6c1172bd3908a9dcfa5a0828b53b04f7ca3a0d926afa85cd63","src/sys/unix/epoll.rs":"e385de7d7716affbaf69ddacedf91c5670f828fb31e826e4bcf0616c80503877","src/sys/unix/eventedfd.rs":"c9c4aefc218e48b90a9f8703923afb0fe60d5316824501e49365e2c64804a1d0","src/sys/unix/io.rs":"a518f09020f821e87bcf9c2ecb4bf501d428619ddfd7b35a26629b614919b14c","src/sys/unix/kqueue.rs":"c852e7358d8f2c247f5ab9c77c1b76631ad4ab0c582af0954c72c7380ba8cc97","src/sys/unix/mod.rs":"62331824b1f5022e372b875faad41a970abb13e63c15e3fae9af948a2b06e4e1","src/sys/unix/ready.rs":"a01240079528ae5443b643ab03d4955501bee17d87b28caf4defb4284dbe38bc","src/sys/unix/tcp.rs":"811f76bf5ee53363b2f957cdf7db4dbcc995b0c3263eab8b9f240b7f03ba5bbe","src/sys/unix/udp.rs":"c51220d49455bee0072a87f1434e03bf51947c80a7f2bae94402dc3475de795f","src/sys/unix/uds.rs":"fabaf91c64a51de45c2e3142932b1da604206efe888d4aa365d795cfe19e5beb","src/sys/windows/awakener.rs":"2d3cdaf8b523675e2f64c5fd81e1197d5255384517b9753b718c5c47acf0cabd","src/sys/windows/buffer_pool.rs":"636f4b7510a507f9985987699ce8ec140b2ed852abb231443ee1486c80759eed","src/sys/windows/from_raw_arc.rs":"659dabdf5037546e3783aacc422992b4248b0ba2ddcce52b149d35bc723550e5","src/sys/windows/mod.rs":"afeec8cd4e3adeaf6ffe68b134ad1b4ba07faa3abae96f6f9a00bbc20ff5f2c5","src/sys/windows/selector.rs":"e971a04e56104dee3271512497e31634eae06ed04029e4637041609efeb9f7d0","src/sys/windows/tcp.rs":"dbba36ef0459b4331f8f3199d8eef8fe16e0ab5b196aadfc3cc0a80097c9011c","src/sys/windows/udp.rs":"1ef869b660bcf89ea6498552474abf8f540946631e14d5b610ca31014cd9045f","src/timer.rs":"aaebc4729dcddd60fe90a46e6ac053a4dcaba705355ea974730fb9d76f5e429f","src/token.rs":"4a56f851709470df2eed803c57c68b0a4b12ea86fa1b8d2c999bec7a85d58ec0","src/udp.rs":"442e620f3ea0cf010497d3ad775debd585f28e79a025993d40471c8e6839dc98","test/benchmark.rs":"d655cd4a79b11555df4d46e929134d73f2e49e174a59f66b98904a6b9a2779e3","test/mod.rs":"556340593d7376ce67f5645064bce48925f5fe8c52e2a89e709220ccac9503be","test/test_battery.rs":"cea7470a896a5bb09cb45c0658bc80f48ab92f07662e95eceb0cfd70065cd84f","test/test_broken_pipe.rs":"1e8352a22a8b3fe170aa8bb942dc4a2ddd0fb5e69374733e6bcfa324481da648","test/test_close_on_drop.rs":"403b5e8477d3f4309c8d1568ed05a57af50393baa228e91cc7a0d29bb77bcbaa","test/test_custom_evented.rs":"02f113cbbbb4c0d09767bc17434b12acf6de1dd6b6ee20106b5f40ce2f0b46ed","test/test_double_register.rs":"f48b1b5aee68f0c3fef8895c8b9f2cf2446e670f3d0e7249ec300067df783617","test/test_echo_server.rs":"c4fadaf9f226e4ac2066369200792bf890bdec09ccd699370c573f960a4eb52b","test/test_fuchsia_handles.rs":"1e4fc0920b4067a4ece7487067af4f6e3f18e8e55b0fd636ccc2d333f8ade70f","test/test_local_addr_ready.rs":"0a9cd42cb43b5afcc139b1def8278f2a24834e40f96c4ea69f05c4f3263f7c65","test/test_multicast.rs":"cbd85c94971c07bd269d72eb80f53a3bcd57075f7acb7475c2d73efb28e816af","test/test_notify.rs":"ee98f24cb72690756b4bff5983c642a251cd253851f7c1c8f1adf29bf2b97287","test/test_oneshot.rs":"3f618c167734b1ce4695691955fb01224c6abf0a1cc1e1db9e01248fb9edbd3c","test/test_poll.rs":"ad450ff65e9ab8108d424b3b8cf4c82847fb9927f12e749f0aee1228bd5db7da","test/test_poll_channel.rs":"8414e7d27e32bec5c8ea0341f4f2708eb64853cfb6d28689deb58d90b1619120","test/test_register_deregister.rs":"c10612421b097d4a8b0a73a0440f11fe1f8c4f34c701255af6da62bf4fa5a08e","test/test_register_multiple_event_loops.rs":"e0f3115ef938ceed2eae47ea1630c7f440a860b69ab57b70507cf8062fc21eb9","test/test_reregister_without_poll.rs":"30ebba86b936bc283575f8f15237bd1f616ea09516bf34912a27ce6c7b5a5559","test/test_smoke.rs":"f1c4c52509537a76975354dbd9a91e16b14e53f8b30171c57ebf59ff4d935bcc","test/test_subprocess_pipe.rs":"324b68e1cb2d43bec204d41064f6873640cf61970718da3703506f0e5ebad31d","test/test_tcp.rs":"dc7e8684e53099b13e0828cddbc0985755d041c8b20408d57fe12d4bb7c28c3f","test/test_tcp_level.rs":"4f7b9615c6b47ccce5787f1e3b0b4f26e56af9b310c9ca687ec2d08126937478","test/test_tick.rs":"e5bbbcad3106da6419147bb0775d0ad5203d90adb1a9ee61de2128d95600bd5c","test/test_timer.rs":"3e1d249f3c1fc57b3c44adb54221eb1b7434ce41f985b8ae46c75abc026701cd","test/test_udp_level.rs":"c47b99cd614d5115fe59ed3afdbe56e0a4bf16884da7f03bab54ba4a513a3893","test/test_udp_socket.rs":"4bccb59ff803ed9acfc61c58b2d6f19fbdf1cf78ec12eba59902582b0a1acaa8","test/test_uds_shutdown.rs":"ea3bfdca353116fc3a55a7fb5156a1c67229b3ff2057cc06d83ed73768436894","test/test_unix_echo_server.rs":"5f39c69f846a6d60d7f0018a62c212be68032211cf653748fba312edb12206b7","test/test_unix_pass_fd.rs":"a8a4f5b9e0acfae22a53192b037cd3c398c43d8514c60e25468bafe1cb9ccd72","test/test_write_then_drop.rs":"2e58df792cd794306d0b9a068a69ff084352545d295aceedb92b67472be7f1b0"},"package":"4fcfcb32d63961fb6f367bfd5d21e4600b92cd310f71f9dca25acae196eb1560"} \ No newline at end of file diff --git a/third_party/rust/mio/CHANGELOG.md b/third_party/rust/mio/CHANGELOG.md index a1c5afb280e7..31a6f4bbd037 100644 --- a/third_party/rust/mio/CHANGELOG.md +++ b/third_party/rust/mio/CHANGELOG.md @@ -1,3 +1,46 @@ +# 0.6.15 (July 3, 2018) + +* Implement `Evented` for containers (#840). +* Fix android-aarch64 build (#850). + +# 0.6.14 (March 8, 2018) + +* Add `Poll::poll_interruptible` (#811) +* Add `Ready::all` and `usize` conversions (#825) + +# 0.6.13 (February 5, 2018) + +* Fix build on DragonFlyBSD. +* Add `TcpListener::from_std` that does not require the socket addr. +* Deprecate `TcpListener::from_listener` in favor of from_std. + +# 0.6.12 (January 5, 2018) + +* Add `TcpStream::peek` function (#773). +* Raise minimum Rust version to 1.18.0. +* `Poll`: retry select() when interrupted by a signal (#742). +* Deprecate `Events` index access (#713). +* Add `Events::clear` (#782). +* Add support for `lio_listio` (#780). + +# 0.6.11 (October 25, 2017) + +* Allow register to take empty interest (#640). +* Fix bug with TCP errors on windows (#725). +* Add TcpListener::accept_std (#733). +* Update IoVec to fix soundness bug -- includes behavior change. (#747). +* Minimum Rust version is now 1.14.0. +* Fix Android x86_64 build. +* Misc API & doc polish. + +# 0.6.10 (July 27, 2017) + +* Experimental support for Fuchsia +* Add `only_v6` option for UDP sockets +* Fix build on NetBSD +* Minimum Rust version is now 1.13.0 +* Assignment operators (e.g. `|=`) are now implemented for `Ready` + # 0.6.9 (June 7, 2017) * More socket options are exposed through the TCP types, brought in through the @@ -39,7 +82,7 @@ # 0.6.4 (January 24, 2017) * Fix compilation on musl -* Add `TcpStream::from_stream` which conversts a std TCP stream to Mio. +* Add `TcpStream::from_stream` which converts a std TCP stream to Mio. # 0.6.3 (January 22, 2017) @@ -122,7 +165,7 @@ * [FEATURE] Expose TCP shutdown * [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184) * [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std. -* [IMPROVEMENT] Provide TCP and UDP types in mio (path to windows #155) +* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155) * [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155) * [IMPROVEMENT] Move unix specific features into mio::unix module * [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default diff --git a/third_party/rust/mio/Cargo.toml b/third_party/rust/mio/Cargo.toml index c7fb8dda90e5..0b80bfb55723 100644 --- a/third_party/rust/mio/Cargo.toml +++ b/third_party/rust/mio/Cargo.toml @@ -1,47 +1,71 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] - -name = "mio" -version = "0.6.9" -license = "MIT" -authors = ["Carl Lerche "] -description = "Lightweight non-blocking IO" +name = "mio" +version = "0.6.15" +authors = ["Carl Lerche "] +exclude = [".gitignore", ".travis.yml", "deploy.sh"] +description = "Lightweight non-blocking IO" +homepage = "https://github.com/carllerche/mio" documentation = "https://docs.rs/mio" -homepage = "https://github.com/carllerche/mio" -repository = "https://github.com/carllerche/mio" -readme = "README.md" -keywords = ["io", "async", "non-blocking"] -categories = ["asynchronous"] -exclude = [ - ".gitignore", - ".travis.yml", - "deploy.sh", - "test/**/*", -] - -[features] -with-deprecated = [] -default = ["with-deprecated"] - -[dependencies] -lazycell = "0.4.0" -log = "0.3.1" -slab = "0.3.0" -net2 = "0.2.29" -iovec = "0.1.0" - -[target.'cfg(unix)'.dependencies] -libc = "0.2.19" - -[target.'cfg(windows)'.dependencies] -winapi = "0.2.1" -miow = "0.2.1" -kernel32-sys = "0.2" - -[dev-dependencies] -env_logger = { version = "0.3.0", default-features = false } -tempdir = "0.3.4" -bytes = "0.3.0" +readme = "README.md" +keywords = ["io", "async", "non-blocking"] +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/carllerche/mio" [[test]] name = "test" path = "test/mod.rs" +[dependencies.iovec] +version = "0.1.1" + +[dependencies.lazycell] +version = "0.6.0" + +[dependencies.log] +version = "0.4" + +[dependencies.net2] +version = "0.2.29" + +[dependencies.slab] +version = "0.4.0" +[dev-dependencies.bytes] +version = "0.3.0" + +[dev-dependencies.env_logger] +version = "0.4.0" +default-features = false + +[dev-dependencies.tempdir] +version = "0.3.4" + +[features] +default = ["with-deprecated"] +with-deprecated = [] +[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon] +version = "0.3.2" + +[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon-sys] +version = "0.3.2" +[target."cfg(unix)".dependencies.libc] +version = "0.2.42" +[target."cfg(windows)".dependencies.kernel32-sys] +version = "0.2" + +[target."cfg(windows)".dependencies.miow] +version = "0.2.1" + +[target."cfg(windows)".dependencies.winapi] +version = "0.2.6" diff --git a/third_party/rust/mio/README.md b/third_party/rust/mio/README.md index b3ae83fbd170..b890b02567e9 100644 --- a/third_party/rust/mio/README.md +++ b/third_party/rust/mio/README.md @@ -1,4 +1,4 @@ -# Mio - Metal IO +# Mio – Metal IO Mio is a lightweight I/O library for Rust with a focus on adding as little overhead as possible over the OS abstractions. @@ -9,7 +9,7 @@ overhead as possible over the OS abstractions. **API documentation** -* [master](http://carllerche.github.io/mio) +* [master](https://carllerche.github.io/mio) * [v0.6](https://docs.rs/mio/^0.6) * [v0.5](https://docs.rs/mio/^0.5) @@ -33,19 +33,19 @@ extern crate mio; ## Features -* Event loop backed by epoll, kqueue. +* Non-blocking TCP, UDP. +* I/O event notification queue backed by epoll, kqueue, and IOCP. * Zero allocations at runtime -* Non-blocking TCP, UDP and Unix domain sockets -* High performance timer system -* Thread safe message channel for cross thread communication +* Platform specific extensions. -## Non goals +## Non-goals -The following are specifically omitted from MIO and are left to the user -or higher level libraries. +The following are specifically omitted from Mio and are left to the user +or higher-level libraries. * File operations * Thread pools / multi-threaded event loop +* Timers ## Platforms @@ -54,26 +54,27 @@ Currently supported platforms: * Linux * OS X * Windows +* FreeBSD * NetBSD +* Solaris * Android * iOS +* Fuchsia (experimental). There are potentially others. If you find that Mio works on another platform, submit a PR to update the list! ### Libraries -* [tokio-core](//github.com/tokio-rs/tokio-core) - Underlying event loop - for the [Tokio project](//github.com/tokio-rs/tokio). -* [mioco](//github.com/dpc/mioco) - Mio COroutines -* [simplesched](//github.com/zonyitoo/simplesched) - Coroutine I/O with a simple scheduler -* [coio-rs](//github.com/zonyitoo/coio-rs) - Coroutine I/O with work-stealing scheduler -* [rotor](//github.com/tailhook/rotor) - A wrapper that allows to create composable I/O libraries on top of mio -* [ws-rs](//github.com/housleyjk/ws-rs) - WebSockets based on Mio +* [tokio-core](https://github.com/tokio-rs/tokio-core) – Underlying event loop + for the [Tokio project](https://github.com/tokio-rs/tokio). +* [mioco](https://github.com/dpc/mioco) – Mio COroutines +* [coio-rs](https://github.com/zonyitoo/coio-rs) – Coroutine I/O with work-stealing scheduler +* [ws-rs](https://github.com/housleyjk/ws-rs) – WebSockets based on Mio ## Community -A group of mio users hang out in the #mio channel on the Mozilla IRC +A group of Mio users hang out in the #mio channel on the Mozilla IRC server (irc.mozilla.org). This can be a good place to go for questions. ## Contributing diff --git a/third_party/rust/mio/appveyor.yml b/third_party/rust/mio/appveyor.yml index e2907383cad2..be5c87e2e5e9 100644 --- a/third_party/rust/mio/appveyor.yml +++ b/third_party/rust/mio/appveyor.yml @@ -18,3 +18,4 @@ test_script: # Ensure that the build works without default features - cargo build --no-default-features - cargo test + - cargo test --no-default-features diff --git a/third_party/rust/mio/ci/docker/aarch64-linux-android/Dockerfile b/third_party/rust/mio/ci/docker/aarch64-linux-android/Dockerfile index 0be22819dec9..cc9128a69166 100644 --- a/third_party/rust/mio/ci/docker/aarch64-linux-android/Dockerfile +++ b/third_party/rust/mio/ci/docker/aarch64-linux-android/Dockerfile @@ -16,6 +16,7 @@ RUN dpkg --add-architecture i386 && \ expect \ openjdk-9-jre \ libstdc++6:i386 \ + libc++1 \ gcc \ libc6-dev \ qt5-default zlib1g:i386 libx11-6:i386 \ diff --git a/third_party/rust/mio/ci/docker/aarch64-linux-android/install-sdk.sh b/third_party/rust/mio/ci/docker/aarch64-linux-android/install-sdk.sh index 5c1a6ad4d9f4..df2e2393c8ae 100644 --- a/third_party/rust/mio/ci/docker/aarch64-linux-android/install-sdk.sh +++ b/third_party/rust/mio/ci/docker/aarch64-linux-android/install-sdk.sh @@ -26,8 +26,8 @@ curl -o sdk-tools-linux-3859397.zip https://dl.google.com/android/repository/sdk -yes | sdkmanager --licenses -sdkmanager tools platform-tools "build-tools;25.0.2" "platforms;android-24" "system-images;android-24;default;arm64-v8a" +yes | sdkmanager --licenses --no_https +sdkmanager tools platform-tools "build-tools;25.0.2" "platforms;android-24" "system-images;android-24;default;arm64-v8a" --no_https echo "no" | avdmanager create avd \ --force \ diff --git a/third_party/rust/mio/ci/docker/arm-linux-androideabi/install-sdk.sh b/third_party/rust/mio/ci/docker/arm-linux-androideabi/install-sdk.sh index 3f20837fe061..58f34845d210 100644 --- a/third_party/rust/mio/ci/docker/arm-linux-androideabi/install-sdk.sh +++ b/third_party/rust/mio/ci/docker/arm-linux-androideabi/install-sdk.sh @@ -25,7 +25,7 @@ curl https://dl.google.com/android/android-sdk_r24.4.1-linux.tgz | \ filter="platform-tools,android-21" filter="$filter,sys-img-armeabi-v7a-android-21" -./accept-licenses.sh "android - update sdk -a --no-ui --filter $filter" +./accept-licenses.sh "android - update sdk -a --no-ui --filter $filter --no-https" echo "no" | android create avd \ --name arm-21 \ diff --git a/third_party/rust/mio/ci/run-ios.sh b/third_party/rust/mio/ci/run-ios.sh index 7f6c2cd29570..027c54e126af 100755 --- a/third_party/rust/mio/ci/run-ios.sh +++ b/third_party/rust/mio/ci/run-ios.sh @@ -20,7 +20,7 @@ case "$TARGET" in # Find the file to run - TEST_FILE="$(find target/$TARGET/debug -maxdepth 1 -type f -name test-* | head -1)"; + TEST_FILE="$(find $TARGET/debug -maxdepth 1 -type f -name test-* | head -1)"; rustc -O ./ci/ios/deploy_and_run_on_ios_simulator.rs; ./deploy_and_run_on_ios_simulator $TEST_FILE; diff --git a/third_party/rust/mio/src/channel.rs b/third_party/rust/mio/src/channel.rs index b088dde9772a..851378fff78f 100644 --- a/third_party/rust/mio/src/channel.rs +++ b/third_party/rust/mio/src/channel.rs @@ -2,7 +2,8 @@ #![allow(unused_imports, deprecated, missing_debug_implementations)] -use {io, Evented, Ready, Poll, PollOpt, Registration, SetReadiness, Token}; +use {io, Ready, Poll, PollOpt, Registration, SetReadiness, Token}; +use event::Evented; use lazycell::{LazyCell, AtomicLazyCell}; use std::any::Any; use std::fmt; @@ -118,7 +119,7 @@ impl Sender { self.tx.send(t) .map_err(SendError::from) .and_then(|_| { - try!(self.ctl.inc()); + self.ctl.inc()?; Ok(()) }) } @@ -138,7 +139,7 @@ impl SyncSender { self.tx.send(t) .map_err(From::from) .and_then(|_| { - try!(self.ctl.inc()); + self.ctl.inc()?; Ok(()) }) } @@ -147,7 +148,7 @@ impl SyncSender { self.tx.try_send(t) .map_err(From::from) .and_then(|_| { - try!(self.ctl.inc()); + self.ctl.inc()?; Ok(()) }) } @@ -199,7 +200,7 @@ impl SenderCtl { if 0 == cnt { // Toggle readiness to readable if let Some(set_readiness) = self.inner.set_readiness.borrow() { - try!(set_readiness.set_readiness(Ready::readable())); + set_readiness.set_readiness(Ready::readable())?; } } @@ -229,7 +230,7 @@ impl ReceiverCtl { if first == 1 { // Unset readiness if let Some(set_readiness) = self.inner.set_readiness.borrow() { - try!(set_readiness.set_readiness(Ready::empty())); + set_readiness.set_readiness(Ready::empty())?; } } @@ -240,7 +241,7 @@ impl ReceiverCtl { // There are still pending messages. Since readiness was // previously unset, it must be reset here if let Some(set_readiness) = self.inner.set_readiness.borrow() { - try!(set_readiness.set_readiness(Ready::readable())); + set_readiness.set_readiness(Ready::readable())?; } } diff --git a/third_party/rust/mio/src/deprecated/event_loop.rs b/third_party/rust/mio/src/deprecated/event_loop.rs index e4c72187320d..a4beab13968c 100644 --- a/third_party/rust/mio/src/deprecated/event_loop.rs +++ b/third_party/rust/mio/src/deprecated/event_loop.rs @@ -1,4 +1,5 @@ -use {channel, Evented, Poll, Events, Token}; +use {channel, Poll, Events, Token}; +use event::Evented; use deprecated::{Handler, NotifyError}; use event_imp::{Event, Ready, PollOpt}; use timer::{self, Timer, Timeout}; @@ -109,7 +110,7 @@ impl EventLoop { fn configured(config: Config) -> io::Result> { // Create the IO poller - let poll = try!(Poll::new()); + let poll = Poll::new()?; let timer = timer::Builder::default() .tick_duration(config.timer_tick) @@ -121,8 +122,8 @@ impl EventLoop { let (tx, rx) = channel::sync_channel(config.notify_capacity); // Register the notification wakeup FD with the IO poller - try!(poll.register(&rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot())); - try!(poll.register(&timer, TIMER, Ready::readable(), PollOpt::edge())); + poll.register(&rx, NOTIFY, Ready::readable(), PollOpt::edge() | PollOpt::oneshot())?; + poll.register(&timer, TIMER, Ready::readable(), PollOpt::edge())?; Ok(EventLoop { run: true, @@ -260,7 +261,7 @@ impl EventLoop { while self.run { // Execute ticks as long as the event loop is running - try!(self.run_once(handler, None)); + self.run_once(handler, None)?; } Ok(()) @@ -335,7 +336,7 @@ impl EventLoop { } fn io_event(&mut self, handler: &mut H, evt: Event) { - handler.ready(self, evt.token(), evt.kind()); + handler.ready(self, evt.token(), evt.readiness()); } fn notify(&mut self, handler: &mut H) { @@ -390,7 +391,7 @@ impl Sender { } pub fn send(&self, msg: M) -> Result<(), NotifyError> { - try!(self.tx.try_send(msg)); + self.tx.try_send(msg)?; Ok(()) } } diff --git a/third_party/rust/mio/src/deprecated/mod.rs b/third_party/rust/mio/src/deprecated/mod.rs index a6002e4d4134..124a2eee3db4 100644 --- a/third_party/rust/mio/src/deprecated/mod.rs +++ b/third_party/rust/mio/src/deprecated/mod.rs @@ -5,7 +5,7 @@ mod io; mod handler; mod notify; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub mod unix; pub use self::event_loop::{ @@ -24,7 +24,7 @@ pub use self::handler::{ pub use self::notify::{ NotifyError, }; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub use self::unix::{ pipe, PipeReader, diff --git a/third_party/rust/mio/src/deprecated/unix.rs b/third_party/rust/mio/src/deprecated/unix.rs index ee23b4495226..9f93ae931e07 100644 --- a/third_party/rust/mio/src/deprecated/unix.rs +++ b/third_party/rust/mio/src/deprecated/unix.rs @@ -1,4 +1,5 @@ -use {io, sys, Evented, Ready, Poll, PollOpt, Token}; +use {io, sys, Ready, Poll, PollOpt, Token}; +use event::Evented; use deprecated::TryAccept; use io::MapNonBlock; use std::io::{Read, Write}; @@ -37,7 +38,7 @@ impl UnixSocket { /// Listen for incoming requests pub fn listen(self, backlog: usize) -> io::Result { - try!(self.sys.listen(backlog)); + self.sys.listen(backlog)?; Ok(From::from(self.sys)) } @@ -161,7 +162,7 @@ pub struct UnixListener { impl UnixListener { pub fn bind + ?Sized>(addr: &P) -> io::Result { UnixSocket::stream().and_then(|sock| { - try!(sock.bind(addr)); + sock.bind(addr)?; sock.listen(256) }) } @@ -210,7 +211,7 @@ impl From for UnixListener { */ pub fn pipe() -> io::Result<(PipeReader, PipeWriter)> { - let (rd, wr) = try!(sys::pipe()); + let (rd, wr) = sys::pipe()?; Ok((From::from(rd), From::from(wr))) } diff --git a/third_party/rust/mio/src/event_imp.rs b/third_party/rust/mio/src/event_imp.rs index 556a37304f59..6c60aad0fe83 100644 --- a/third_party/rust/mio/src/event_imp.rs +++ b/third_party/rust/mio/src/event_imp.rs @@ -21,9 +21,9 @@ use std::{fmt, io, ops}; /// [`Registration`] and [`SetReadiness`]. In this case, the implementer takes /// responsibility for driving the readiness state changes. /// -/// [`Poll`]: struct.Poll.html -/// [`Registration`]: struct.Registration.html -/// [`SetReadiness`]: struct.SetReadiness.html +/// [`Poll`]: ../struct.Poll.html +/// [`Registration`]: ../struct.Registration.html +/// [`SetReadiness`]: ../struct.SetReadiness.html /// /// # Examples /// @@ -32,7 +32,7 @@ use std::{fmt, io, ops}; /// ``` /// use mio::{Ready, Poll, PollOpt, Token}; /// use mio::event::Evented; -/// use mio::tcp::TcpStream; +/// use mio::net::TcpStream; /// /// use std::io; /// @@ -127,41 +127,75 @@ pub trait Evented { /// instead. Implementors should handle registration by either delegating /// the call to another `Evented` type or creating a [`Registration`]. /// - /// See [struct] documentation for more details. - /// - /// [`Poll::register`]: struct.Poll.html#method.register - /// [`Registration`]: struct.Registration.html - /// [struct]: # + /// [`Poll::register`]: ../struct.Poll.html#method.register + /// [`Registration`]: ../struct.Registration.html fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>; /// Re-register `self` with the given `Poll` instance. /// /// This function should not be called directly. Use [`Poll::reregister`] /// instead. Implementors should handle re-registration by either delegating - /// the call to another `Evented` type or calling [`Registration::update`]. + /// the call to another `Evented` type or calling + /// [`SetReadiness::set_readiness`]. /// - /// See [struct] documentation for more details. - /// - /// [`Poll::reregister`]: struct.Poll.html#method.register - /// [`Registration::update`]: struct.Registration.html#method.update - /// [struct]: # + /// [`Poll::reregister`]: ../struct.Poll.html#method.reregister + /// [`SetReadiness::set_readiness`]: ../struct.SetReadiness.html#method.set_readiness fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()>; /// Deregister `self` from the given `Poll` instance /// /// This function should not be called directly. Use [`Poll::deregister`] - /// instead. Implementors shuld handle deregistration by either delegating + /// instead. Implementors should handle deregistration by either delegating /// the call to another `Evented` type or by dropping the [`Registration`] /// associated with `self`. /// - /// See [struct] documentation for more details. - /// - /// [`Poll::deregister`]: struct.Poll.html#method.deregister - /// [`Registration`]: struct.Registration.html - /// [struct]: # + /// [`Poll::deregister`]: ../struct.Poll.html#method.deregister + /// [`Registration`]: ../struct.Registration.html fn deregister(&self, poll: &Poll) -> io::Result<()>; } +impl Evented for Box { + fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().register(poll, token, interest, opts) + } + + fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.as_ref().deregister(poll) + } +} + +impl Evented for Box { + fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().register(poll, token, interest, opts) + } + + fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.as_ref().deregister(poll) + } +} + +impl Evented for ::std::sync::Arc { + fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().register(poll, token, interest, opts) + } + + fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { + self.as_ref().reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.as_ref().deregister(poll) + } +} + /// Options supplied when registering an `Evented` handle with `Poll` /// /// `PollOpt` values can be combined together using the various bitwise @@ -478,6 +512,9 @@ impl ops::Sub for PollOpt { } } +#[deprecated(since = "0.6.10", note = "removed")] +#[cfg(feature = "with-deprecated")] +#[doc(hidden)] impl ops::Not for PollOpt { type Output = PollOpt; @@ -497,17 +534,29 @@ impl fmt::Debug for PollOpt { for &(flag, msg) in &flags { if self.contains(flag) { - if one { try!(write!(fmt, " | ")) } - try!(write!(fmt, "{}", msg)); + if one { write!(fmt, " | ")? } + write!(fmt, "{}", msg)?; one = true } } + if !one { + fmt.write_str("(empty)")?; + } + Ok(()) } } +#[test] +fn test_debug_pollopt() { + assert_eq!("(empty)", format!("{:?}", PollOpt::empty())); + assert_eq!("Edge-Triggered", format!("{:?}", PollOpt::edge())); + assert_eq!("Level-Triggered", format!("{:?}", PollOpt::level())); + assert_eq!("OneShot", format!("{:?}", PollOpt::oneshot())); +} + /// A set of readiness event kinds /// /// `Ready` is a set of operation descriptors indicating which kind of an @@ -545,8 +594,10 @@ pub struct Ready(usize); const READABLE: usize = 0b00001; const WRITABLE: usize = 0b00010; -const ERROR: usize = 0b00100; -const HUP: usize = 0b01000; + +// These are deprecated and are moved into platform specific implementations. +const ERROR: usize = 0b00100; +const HUP: usize = 0b01000; impl Ready { /// Returns the empty `Ready` set. @@ -631,13 +682,28 @@ impl Ready { Ready(HUP) } - #[deprecated(since = "0.6.5", note = "removed")] - #[cfg(feature = "with-deprecated")] - #[doc(hidden)] + /// Returns a `Ready` representing readiness for all operations. + /// + /// This includes platform specific operations as well (`hup`, `aio`, + /// `error`, `lio`). + /// + /// See [`Poll`] for more documentation on polling. + /// + /// # Examples + /// + /// ``` + /// use mio::Ready; + /// + /// let ready = Ready::all(); + /// + /// assert!(ready.is_readable()); + /// assert!(ready.is_writable()); + /// ``` + /// + /// [`Poll`]: struct.Poll.html #[inline] pub fn all() -> Ready { - Ready::readable() | - Ready::writable() + Ready(READABLE | WRITABLE | ::sys::READY_ALL) } /// Returns true if `Ready` is the empty set @@ -804,7 +870,6 @@ impl Ready { /// /// assert!(!Ready::readable().contains(readiness)); /// assert!(readiness.contains(readiness)); - /// assert!((readiness | Ready::hup()).contains(readiness)); /// ``` /// /// [`Poll`]: struct.Poll.html @@ -813,6 +878,59 @@ impl Ready { let other = other.into(); (*self & other) == other } + + /// Create a `Ready` instance using the given `usize` representation. + /// + /// The `usize` representation must have been obtained from a call to + /// `Ready::as_usize`. + /// + /// The `usize` representation must be treated as opaque. There is no + /// guaranteed correlation between the returned value and platform defined + /// constants. Also, there is no guarantee that the `usize` representation + /// will remain constant across patch releases of Mio. + /// + /// This function is mainly provided to allow the caller to loa a + /// readiness value from an `AtomicUsize`. + /// + /// # Examples + /// + /// ``` + /// use mio::Ready; + /// + /// let ready = Ready::readable(); + /// let ready_usize = ready.as_usize(); + /// let ready2 = Ready::from_usize(ready_usize); + /// + /// assert_eq!(ready, ready2); + /// ``` + pub fn from_usize(val: usize) -> Ready { + Ready(val) + } + + /// Returns a `usize` representation of the `Ready` value. + /// + /// This `usize` representation must be treated as opaque. There is no + /// guaranteed correlation between the returned value and platform defined + /// constants. Also, there is no guarantee that the `usize` representation + /// will remain constant across patch releases of Mio. + /// + /// This function is mainly provided to allow the caller to store a + /// readiness value in an `AtomicUsize`. + /// + /// # Examples + /// + /// ``` + /// use mio::Ready; + /// + /// let ready = Ready::readable(); + /// let ready_usize = ready.as_usize(); + /// let ready2 = Ready::from_usize(ready_usize); + /// + /// assert_eq!(ready, ready2); + /// ``` + pub fn as_usize(&self) -> usize { + self.0 + } } impl> ops::BitOr for Ready { @@ -824,6 +942,13 @@ impl> ops::BitOr for Ready { } } +impl> ops::BitOrAssign for Ready { + #[inline] + fn bitor_assign(&mut self, other: T) { + self.0 |= other.into().0; + } +} + impl> ops::BitXor for Ready { type Output = Ready; @@ -833,6 +958,13 @@ impl> ops::BitXor for Ready { } } +impl> ops::BitXorAssign for Ready { + #[inline] + fn bitxor_assign(&mut self, other: T) { + self.0 ^= other.into().0; + } +} + impl> ops::BitAnd for Ready { type Output = Ready; @@ -842,6 +974,13 @@ impl> ops::BitAnd for Ready { } } +impl> ops::BitAndAssign for Ready { + #[inline] + fn bitand_assign(&mut self, other: T) { + self.0 &= other.into().0 + } +} + impl> ops::Sub for Ready { type Output = Ready; @@ -851,6 +990,16 @@ impl> ops::Sub for Ready { } } +impl> ops::SubAssign for Ready { + #[inline] + fn sub_assign(&mut self, other: T) { + self.0 &= !other.into().0; + } +} + +#[deprecated(since = "0.6.10", note = "removed")] +#[cfg(feature = "with-deprecated")] +#[doc(hidden)] impl ops::Not for Ready { type Output = Ready; @@ -860,7 +1009,6 @@ impl ops::Not for Ready { } } -// TODO: impl Debug for UnixReady impl fmt::Debug for Ready { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut one = false; @@ -870,23 +1018,30 @@ impl fmt::Debug for Ready { (Ready(ERROR), "Error"), (Ready(HUP), "Hup")]; - try!(write!(fmt, "Ready {{")); - for &(flag, msg) in &flags { if self.contains(flag) { - if one { try!(write!(fmt, " | ")) } - try!(write!(fmt, "{}", msg)); + if one { write!(fmt, " | ")? } + write!(fmt, "{}", msg)?; one = true } } - try!(write!(fmt, "}}")); + if !one { + fmt.write_str("(empty)")?; + } Ok(()) } } +#[test] +fn test_debug_ready() { + assert_eq!("(empty)", format!("{:?}", Ready::empty())); + assert_eq!("Readable", format!("{:?}", Ready::readable())); + assert_eq!("Writable", format!("{:?}", Ready::writable())); +} + /// An readiness event returned by [`Poll::poll`]. /// /// `Event` is a [readiness state] paired with a [`Token`]. It is returned by @@ -897,18 +1052,19 @@ impl fmt::Debug for Ready { /// # Examples /// /// ``` -/// use mio::{Event, Ready, Token}; +/// use mio::{Ready, Token}; +/// use mio::event::Event; /// -/// let event = Event::new(Ready::all(), Token(0)); +/// let event = Event::new(Ready::readable() | Ready::writable(), Token(0)); /// -/// assert_eq!(event.readiness(), Ready::all()); +/// assert_eq!(event.readiness(), Ready::readable() | Ready::writable()); /// assert_eq!(event.token(), Token(0)); /// ``` /// -/// [`Poll::poll`]: struct.Poll.html#method.poll -/// [`Poll`]: struct.Poll.html -/// [readiness state ]: struct.Ready.html -/// [`Token`]: struct.Token.html +/// [`Poll::poll`]: ../struct.Poll.html#method.poll +/// [`Poll`]: ../struct.Poll.html +/// [readiness state]: ../struct.Ready.html +/// [`Token`]: ../struct.Token.html #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct Event { kind: Ready, @@ -921,11 +1077,12 @@ impl Event { /// # Examples /// /// ``` - /// use mio::{Event, Ready, Token}; + /// use mio::{Ready, Token}; + /// use mio::event::Event; /// - /// let event = Event::new(Ready::all(), Token(0)); + /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0)); /// - /// assert_eq!(event.readiness(), Ready::all()); + /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable()); /// assert_eq!(event.token(), Token(0)); /// ``` pub fn new(readiness: Ready, token: Token) -> Event { @@ -940,11 +1097,12 @@ impl Event { /// # Examples /// /// ``` - /// use mio::{Event, Ready, Token}; + /// use mio::{Ready, Token}; + /// use mio::event::Event; /// - /// let event = Event::new(Ready::all(), Token(0)); + /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0)); /// - /// assert_eq!(event.readiness(), Ready::all()); + /// assert_eq!(event.readiness(), Ready::readable() | Ready::writable()); /// ``` pub fn readiness(&self) -> Ready { self.kind @@ -962,9 +1120,10 @@ impl Event { /// # Examples /// /// ``` - /// use mio::{Event, Ready, Token}; + /// use mio::{Ready, Token}; + /// use mio::event::Event; /// - /// let event = Event::new(Ready::all(), Token(0)); + /// let event = Event::new(Ready::readable() | Ready::writable(), Token(0)); /// /// assert_eq!(event.token(), Token(0)); /// ``` diff --git a/third_party/rust/mio/src/io.rs b/third_party/rust/mio/src/io.rs index a3aeb0d53ede..275001387d52 100644 --- a/third_party/rust/mio/src/io.rs +++ b/third_party/rust/mio/src/io.rs @@ -28,14 +28,8 @@ impl MapNonBlock for Result { #[cfg(feature = "with-deprecated")] pub mod deprecated { - #[cfg(unix)] - const WOULDBLOCK: i32 = ::libc::EAGAIN; - - #[cfg(windows)] - const WOULDBLOCK: i32 = ::winapi::winerror::WSAEWOULDBLOCK as i32; - /// Returns a std `WouldBlock` error without allocating pub fn would_block() -> ::std::io::Error { - ::std::io::Error::from_raw_os_error(WOULDBLOCK) + ::std::io::ErrorKind::WouldBlock.into() } } diff --git a/third_party/rust/mio/src/lib.rs b/third_party/rust/mio/src/lib.rs index ca87ab822d4d..d26628fd50e3 100644 --- a/third_party/rust/mio/src/lib.rs +++ b/third_party/rust/mio/src/lib.rs @@ -10,6 +10,21 @@ //! * Design to allow for stack allocated buffers when possible (avoid double buffering). //! * Provide utilities such as a timers, a notification channel, buffer abstractions, and a slab. //! +//! # Platforms +//! +//! Currently supported platforms: +//! +//! * Linux +//! * OS X +//! * Windows +//! * FreeBSD +//! * NetBSD +//! * Android +//! * iOS +//! +//! mio can handle interfacing with each of the event notification systems of the aforementioned platforms. The details of +//! their implementation are further discussed in [`Poll`]. +//! //! # Usage //! //! Using mio starts by creating a [`Poll`], which reads events from the OS and @@ -24,7 +39,7 @@ //! //! ``` //! use mio::*; -//! use mio::tcp::{TcpListener, TcpStream}; +//! use mio::net::{TcpListener, TcpStream}; //! //! // Setup some tokens to allow us to identify which event is //! // for which socket. @@ -75,15 +90,20 @@ //! //! ``` -#![doc(html_root_url = "https://docs.rs/mio/0.6.1")] +#![doc(html_root_url = "https://docs.rs/mio/0.6.15")] #![crate_name = "mio"] #![deny(warnings, missing_docs, missing_debug_implementations)] extern crate lazycell; extern crate net2; -extern crate slab; extern crate iovec; +extern crate slab; + +#[cfg(target_os = "fuchsia")] +extern crate fuchsia_zircon as zircon; +#[cfg(target_os = "fuchsia")] +extern crate fuchsia_zircon_sys as zircon_sys; #[cfg(unix)] extern crate libc; @@ -100,9 +120,6 @@ extern crate kernel32; #[macro_use] extern crate log; -#[cfg(test)] -extern crate env_logger; - mod event_imp; mod io; mod poll; @@ -111,12 +128,12 @@ mod token; pub mod net; -#[deprecated(since = "0.6.5", note = "use mio-more instead")] +#[deprecated(since = "0.6.5", note = "use mio-extras instead")] #[cfg(feature = "with-deprecated")] #[doc(hidden)] pub mod channel; -#[deprecated(since = "0.6.5", note = "use mio-more instead")] +#[deprecated(since = "0.6.5", note = "use mio-extras instead")] #[cfg(feature = "with-deprecated")] #[doc(hidden)] pub mod timer; @@ -181,7 +198,7 @@ pub use poll::Iter as EventsIter; #[doc(hidden)] pub use io::deprecated::would_block; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub mod unix { //! Unix only extensions pub use sys::{ @@ -190,6 +207,21 @@ pub mod unix { pub use sys::unix::UnixReady; } +#[cfg(target_os = "fuchsia")] +pub mod fuchsia { + //! Fuchsia-only extensions + //! + //! # Stability + //! + //! This module depends on the [magenta-sys crate](https://crates.io/crates/magenta-sys) + //! and so might introduce breaking changes, even on minor releases, + //! so long as that crate remains unstable. + pub use sys::{ + EventedHandle, + }; + pub use sys::fuchsia::{FuchsiaReady, zx_signals_t}; +} + /// Windows-only extensions to the mio crate. /// /// Mio on windows is currently implemented with IOCP for a high-performance @@ -221,7 +253,7 @@ pub mod unix { /// buffering to ensure that a readiness interface can be provided. For a /// sample implementation see the TCP/UDP modules in mio itself. /// -/// * `Overlapped` - this type is intended to be used as the concreate instances +/// * `Overlapped` - this type is intended to be used as the concrete instances /// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for /// safety, that all asynchronous operations are initiated with an instance of /// `Overlapped` and not another instantiation of `OVERLAPPED`. diff --git a/third_party/rust/mio/src/net/tcp.rs b/third_party/rust/mio/src/net/tcp.rs index 936570f126e3..dfde91a65eff 100644 --- a/third_party/rust/mio/src/net/tcp.rs +++ b/third_party/rust/mio/src/net/tcp.rs @@ -7,7 +7,7 @@ //! /// [portability guidelines]: ../struct.Poll.html#portability - +use std::fmt; use std::io::{Read, Write}; use std::net::{self, SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr}; use std::time::Duration; @@ -33,25 +33,33 @@ use poll::SelectorId; /// /// ``` /// # use std::net::TcpListener; -/// # let _listener = TcpListener::bind("127.0.0.1:3454").unwrap(); +/// # use std::error::Error; +/// # +/// # fn try_main() -> Result<(), Box> { +/// # let _listener = TcpListener::bind("127.0.0.1:34254")?; /// use mio::{Events, Ready, Poll, PollOpt, Token}; -/// use mio::tcp::TcpStream; +/// use mio::net::TcpStream; /// use std::time::Duration; /// -/// let stream = TcpStream::connect(&"127.0.0.1:34254".parse().unwrap()).unwrap(); +/// let stream = TcpStream::connect(&"127.0.0.1:34254".parse()?)?; /// -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// let mut events = Events::with_capacity(128); /// /// // Register the socket with `Poll` /// poll.register(&stream, Token(0), Ready::writable(), -/// PollOpt::edge()).unwrap(); +/// PollOpt::edge())?; /// -/// poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; /// /// // The socket might be ready at this point +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` -#[derive(Debug)] pub struct TcpStream { sys: sys::TcpStream, selector_id: SelectorId, @@ -59,6 +67,18 @@ pub struct TcpStream { use std::net::Shutdown; +// TODO: remove when fuchsia's set_nonblocking is fixed in libstd +#[cfg(target_os = "fuchsia")] +fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> { + sys::set_nonblock( + ::std::os::unix::io::AsRawFd::as_raw_fd(stream)) +} +#[cfg(not(target_os = "fuchsia"))] +fn set_nonblocking(stream: &net::TcpStream) -> io::Result<()> { + stream.set_nonblocking(true) +} + + impl TcpStream { /// Create a new TCP stream and issue a non-blocking connect to the /// specified address. @@ -70,16 +90,16 @@ impl TcpStream { /// `TcpStream::connect_stream` to transfer ownership into mio and schedule /// the connect operation. pub fn connect(addr: &SocketAddr) -> io::Result { - let sock = try!(match *addr { + let sock = match *addr { SocketAddr::V4(..) => TcpBuilder::new_v4(), SocketAddr::V6(..) => TcpBuilder::new_v6(), - }); + }?; // Required on Windows for a future `connect_overlapped` operation to be // executed successfully. if cfg!(windows) { - try!(sock.bind(&inaddr_any(addr))); + sock.bind(&inaddr_any(addr))?; } - TcpStream::connect_stream(try!(sock.to_tcp_stream()), addr) + TcpStream::connect_stream(sock.to_tcp_stream()?, addr) } /// Creates a new `TcpStream` from the pending socket inside the given @@ -103,7 +123,7 @@ impl TcpStream { pub fn connect_stream(stream: net::TcpStream, addr: &SocketAddr) -> io::Result { Ok(TcpStream { - sys: try!(sys::TcpStream::connect(stream, addr)), + sys: sys::TcpStream::connect(stream, addr)?, selector_id: SelectorId::new(), }) } @@ -119,7 +139,8 @@ impl TcpStream { /// it should already be connected via some other means (be it manually, the /// net2 crate, or the standard library). pub fn from_stream(stream: net::TcpStream) -> io::Result { - try!(stream.set_nonblocking(true)); + set_nonblocking(&stream)?; + Ok(TcpStream { sys: sys::TcpStream::from_stream(stream), selector_id: SelectorId::new(), @@ -193,7 +214,7 @@ impl TcpStream { /// For more information about this option, see /// [`set_recv_buffer_size`][link]. /// - /// [link]: #tymethod.set_recv_buffer_size + /// [link]: #method.set_recv_buffer_size pub fn recv_buffer_size(&self) -> io::Result { self.sys.recv_buffer_size() } @@ -208,9 +229,10 @@ impl TcpStream { /// Gets the value of the `SO_SNDBUF` option on this socket. /// - /// For more information about this option, see [`set_send_buffer`][link]. + /// For more information about this option, see + /// [`set_send_buffer_size`][link]. /// - /// [link]: #tymethod.set_send_buffer + /// [link]: #method.set_send_buffer_size pub fn send_buffer_size(&self) -> io::Result { self.sys.send_buffer_size() } @@ -236,7 +258,7 @@ impl TcpStream { /// /// For more information about this option, see [`set_keepalive`][link]. /// - /// [link]: #tymethod.set_keepalive + /// [link]: #method.set_keepalive pub fn keepalive(&self) -> io::Result> { self.sys.keepalive() } @@ -253,7 +275,7 @@ impl TcpStream { /// /// For more information about this option, see [`set_ttl`][link]. /// - /// [link]: #tymethod.set_ttl + /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result { self.sys.ttl() } @@ -274,17 +296,21 @@ impl TcpStream { /// /// For more information about this option, see [`set_only_v6`][link]. /// - /// [link]: #tymethod.set_only_v6 + /// [link]: #method.set_only_v6 pub fn only_v6(&self) -> io::Result { self.sys.only_v6() } - /// Sets the linger duration of this socket by setting the SO_LINGER option + /// Sets the value for the `SO_LINGER` option on this socket. pub fn set_linger(&self, dur: Option) -> io::Result<()> { self.sys.set_linger(dur) } - /// reads the linger duration for this socket by getting the SO_LINGER option + /// Gets the value of the `SO_LINGER` option on this socket. + /// + /// For more information about this option, see [`set_linger`][link]. + /// + /// [link]: #method.set_linger pub fn linger(&self) -> io::Result> { self.sys.linger() } @@ -316,6 +342,16 @@ impl TcpStream { self.sys.take_error() } + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying recv system call. + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + self.sys.peek(buf) + } + /// Read in a list of buffers all at once. /// /// This operation will attempt to read bytes from this socket and place @@ -403,7 +439,7 @@ impl<'a> Write for &'a TcpStream { impl Evented for TcpStream { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { - try!(self.selector_id.associate_selector(poll)); + self.selector_id.associate_selector(poll)?; self.sys.register(poll, token, interest, opts) } @@ -417,6 +453,12 @@ impl Evented for TcpStream { } } +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.sys, f) + } +} + /* * * ===== TcpListener ===== @@ -428,24 +470,31 @@ impl Evented for TcpStream { /// # Examples /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Ready, Poll, PollOpt, Token}; -/// use mio::tcp::TcpListener; +/// use mio::net::TcpListener; /// use std::time::Duration; /// -/// let listener = TcpListener::bind(&"127.0.0.1:34254".parse().unwrap()).unwrap(); +/// let listener = TcpListener::bind(&"127.0.0.1:34255".parse()?)?; /// -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// let mut events = Events::with_capacity(128); /// /// // Register the socket with `Poll` /// poll.register(&listener, Token(0), Ready::writable(), -/// PollOpt::edge()).unwrap(); +/// PollOpt::edge())?; /// -/// poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; /// /// // There may be a socket ready to be accepted +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` -#[derive(Debug)] pub struct TcpListener { sys: sys::TcpListener, selector_id: SelectorId, @@ -468,27 +517,35 @@ impl TcpListener { /// ownership into mio. pub fn bind(addr: &SocketAddr) -> io::Result { // Create the socket - let sock = try!(match *addr { + let sock = match *addr { SocketAddr::V4(..) => TcpBuilder::new_v4(), SocketAddr::V6(..) => TcpBuilder::new_v6(), - }); + }?; // Set SO_REUSEADDR, but only on Unix (mirrors what libstd does) if cfg!(unix) { - try!(sock.reuse_address(true)); + sock.reuse_address(true)?; } // Bind the socket - try!(sock.bind(addr)); + sock.bind(addr)?; // listen - let listener = try!(sock.listen(1024)); + let listener = sock.listen(1024)?; Ok(TcpListener { - sys: try!(sys::TcpListener::new(listener, addr)), + sys: sys::TcpListener::new(listener)?, selector_id: SelectorId::new(), }) } + #[deprecated(since = "0.6.13", note = "use from_std instead")] + #[cfg(feature = "with-deprecated")] + #[doc(hidden)] + pub fn from_listener(listener: net::TcpListener, _: &SocketAddr) + -> io::Result { + TcpListener::from_std(listener) + } + /// Creates a new `TcpListener` from an instance of a /// `std::net::TcpListener` type. /// @@ -498,9 +555,8 @@ impl TcpListener { /// loop. /// /// The address provided must be the address that the listener is bound to. - pub fn from_listener(listener: net::TcpListener, addr: &SocketAddr) - -> io::Result { - sys::TcpListener::new(listener, addr).map(|s| { + pub fn from_std(listener: net::TcpListener) -> io::Result { + sys::TcpListener::new(listener).map(|s| { TcpListener { sys: s, selector_id: SelectorId::new(), @@ -518,14 +574,17 @@ impl TcpListener { /// If an accepted stream is returned, the remote address of the peer is /// returned along with it. pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - self.sys.accept().map(|(s, a)| { - let stream = TcpStream { - sys: s, - selector_id: SelectorId::new(), - }; + let (s, a) = try!(self.accept_std()); + Ok((TcpStream::from_stream(s)?, a)) + } - (stream, a) - }) + /// Accepts a new `std::net::TcpStream`. + /// + /// This method is the same as `accept`, except that it returns a TCP socket + /// *in blocking mode* which isn't bound to `mio`. This can be later then + /// converted to a `mio` type, if necessary. + pub fn accept_std(&self) -> io::Result<(net::TcpStream, SocketAddr)> { + self.sys.accept() } /// Returns the local socket address of this listener. @@ -598,7 +657,7 @@ impl TcpListener { impl Evented for TcpListener { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { - try!(self.selector_id.associate_selector(poll)); + self.selector_id.associate_selector(poll)?; self.sys.register(poll, token, interest, opts) } @@ -612,30 +671,36 @@ impl Evented for TcpListener { } } +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.sys, f) + } +} + /* * * ===== UNIX ext ===== * */ -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd}; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl IntoRawFd for TcpStream { fn into_raw_fd(self) -> RawFd { self.sys.into_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl AsRawFd for TcpStream { fn as_raw_fd(&self) -> RawFd { self.sys.as_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl FromRawFd for TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { TcpStream { @@ -645,21 +710,21 @@ impl FromRawFd for TcpStream { } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl IntoRawFd for TcpListener { fn into_raw_fd(self) -> RawFd { self.sys.into_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.sys.as_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl FromRawFd for TcpListener { unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { TcpListener { diff --git a/third_party/rust/mio/src/net/udp.rs b/third_party/rust/mio/src/net/udp.rs index cdd4d83fc842..d82cebc2bb8b 100644 --- a/third_party/rust/mio/src/net/udp.rs +++ b/third_party/rust/mio/src/net/udp.rs @@ -10,6 +10,7 @@ use {io, sys, Ready, Poll, PollOpt, Token}; use event::Evented; use poll::SelectorId; +use std::fmt; use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; /// A User Datagram Protocol socket. @@ -17,7 +18,74 @@ use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; /// This is an implementation of a bound UDP socket. This supports both IPv4 and /// IPv6 addresses, and there is no corresponding notion of a server because UDP /// is a datagram protocol. -#[derive(Debug)] +/// +/// # Examples +/// +/// ``` +/// # use std::error::Error; +/// # +/// # fn try_main() -> Result<(), Box> { +/// // An Echo program: +/// // SENDER -> sends a message. +/// // ECHOER -> listens and prints the message received. +/// +/// use mio::net::UdpSocket; +/// use mio::{Events, Ready, Poll, PollOpt, Token}; +/// use std::time::Duration; +/// +/// const SENDER: Token = Token(0); +/// const ECHOER: Token = Token(1); +/// +/// // This operation will fail if the address is in use, so we select different ports for each +/// // socket. +/// let sender_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; +/// let echoer_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; +/// +/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from +/// // respectively. +/// sender_socket.connect(echoer_socket.local_addr().unwrap())?; +/// +/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be +/// // read from. +/// let poll = Poll::new()?; +/// +/// // We register our sockets here so that we can check if they are ready to be written/read. +/// poll.register(&sender_socket, SENDER, Ready::writable(), PollOpt::edge())?; +/// poll.register(&echoer_socket, ECHOER, Ready::readable(), PollOpt::edge())?; +/// +/// let msg_to_send = [9; 9]; +/// let mut buffer = [0; 9]; +/// +/// let mut events = Events::with_capacity(128); +/// loop { +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// for event in events.iter() { +/// match event.token() { +/// // Our SENDER is ready to be written into. +/// SENDER => { +/// let bytes_sent = sender_socket.send(&msg_to_send)?; +/// assert_eq!(bytes_sent, 9); +/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent); +/// }, +/// // Our ECHOER is ready to be read from. +/// ECHOER => { +/// let num_recv = echoer_socket.recv(&mut buffer)?; +/// println!("echo {:?} -> {:?}", buffer, num_recv); +/// buffer = [0; 9]; +/// # return Ok(()); +/// } +/// _ => unreachable!() +/// } +/// } +/// } +/// # +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } +/// ``` pub struct UdpSocket { sys: sys::UdpSocket, selector_id: SelectorId, @@ -25,8 +93,34 @@ pub struct UdpSocket { impl UdpSocket { /// Creates a UDP socket from the given address. + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// // We must bind it to an open address. + /// let socket = match UdpSocket::bind(&"127.0.0.1:0".parse()?) { + /// Ok(new_socket) => new_socket, + /// Err(fail) => { + /// // We panic! here, but you could try to bind it again on another address. + /// panic!("Failed to bind socket. {:?}", fail); + /// } + /// }; + /// + /// // Our socket was created, but we should not use it before checking it's readiness. + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn bind(addr: &SocketAddr) -> io::Result { - let socket = try!(net::UdpSocket::bind(addr)); + let socket = net::UdpSocket::bind(addr)?; UdpSocket::from_socket(socket) } @@ -42,12 +136,34 @@ impl UdpSocket { /// options like `reuse_address` or binding to multiple addresses. pub fn from_socket(socket: net::UdpSocket) -> io::Result { Ok(UdpSocket { - sys: try!(sys::UdpSocket::new(socket)), + sys: sys::UdpSocket::new(socket)?, selector_id: SelectorId::new(), }) } /// Returns the socket address that this socket was created from. + /// + /// # Examples + /// + // This assertion is almost, but not quite, universal. It fails on + // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed, + // so simply disable the test on FreeBSD. + #[cfg_attr(not(target_os = "freebsd"), doc = " ```")] + #[cfg_attr(target_os = "freebsd", doc = " ```no_run")] + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let addr = "127.0.0.1:0".parse()?; + /// let socket = UdpSocket::bind(&addr)?; + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn local_addr(&self) -> io::Result { self.sys.local_addr() } @@ -57,6 +173,28 @@ impl UdpSocket { /// The returned `UdpSocket` is a reference to the same socket that this /// object references. Both handles will read and write the same port, and /// options set on one socket will be propagated to the other. + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// // We must bind it to an open address. + /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// let cloned_socket = socket.try_clone()?; + /// + /// assert_eq!(socket.local_addr()?, cloned_socket.local_addr()?); + /// + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn try_clone(&self) -> io::Result { self.sys.try_clone() .map(|s| { @@ -72,21 +210,66 @@ impl UdpSocket { /// /// Address type can be any implementor of `ToSocketAddrs` trait. See its /// documentation for concrete examples. + /// + /// # Examples + /// + /// ```no_run + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// + /// // We must check if the socket is writable before calling send_to, + /// // or we could run into a WouldBlock error. + /// + /// let bytes_sent = socket.send_to(&[9; 9], &"127.0.0.1:11100".parse()?)?; + /// assert_eq!(bytes_sent, 9); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result { self.sys.send_to(buf, target) } /// Receives data from the socket. On success, returns the number of bytes /// read and the address from whence the data came. + /// + /// # Examples + /// + /// ```no_run + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// + /// // We must check if the socket is readable before calling recv_from, + /// // or we could run into a WouldBlock error. + /// + /// let mut buf = [0; 9]; + /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?; + /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { self.sys.recv_from(buf) } /// Sends data on the socket to the address previously bound via connect(). On success, /// returns the number of bytes written. - /// - /// Address type can be any implementor of `ToSocketAddrs` trait. See its - /// documentation for concrete examples. pub fn send(&self, buf: &[u8]) -> io::Result { self.sys.send(buf) } @@ -104,34 +287,66 @@ impl UdpSocket { self.sys.connect(addr) } + /// Sets the value of the `SO_BROADCAST` option for this socket. + /// + /// When enabled, this socket is allowed to send packets to a broadcast + /// address. + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// if broadcast_socket.broadcast()? == false { + /// broadcast_socket.set_broadcast(true)?; + /// } + /// + /// assert_eq!(broadcast_socket.broadcast()?, true); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` + pub fn set_broadcast(&self, on: bool) -> io::Result<()> { + self.sys.set_broadcast(on) + } + /// Gets the value of the `SO_BROADCAST` option for this socket. /// /// For more information about this option, see /// [`set_broadcast`][link]. /// /// [link]: #method.set_broadcast + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let broadcast_socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// assert_eq!(broadcast_socket.broadcast()?, false); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn broadcast(&self) -> io::Result { self.sys.broadcast() } - /// Sets the value of the `SO_BROADCAST` option for this socket. - /// - /// When enabled, this socket is allowed to send packets to a broadcast - /// address. - pub fn set_broadcast(&self, on: bool) -> io::Result<()> { - self.sys.set_broadcast(on) - } - - /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_loop_v4`][link]. - /// - /// [link]: #method.set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - self.sys.multicast_loop_v4() - } - /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. /// /// If enabled, multicast packets will be looped back to the local socket. @@ -140,14 +355,14 @@ impl UdpSocket { self.sys.set_multicast_loop_v4(on) } - /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. /// /// For more information about this option, see - /// [`set_multicast_ttl_v4`][link]. + /// [`set_multicast_loop_v4`][link]. /// - /// [link]: #method.set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - self.sys.multicast_ttl_v4() + /// [link]: #method.set_multicast_loop_v4 + pub fn multicast_loop_v4(&self) -> io::Result { + self.sys.multicast_loop_v4() } /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. @@ -161,14 +376,14 @@ impl UdpSocket { self.sys.set_multicast_ttl_v4(ttl) } - /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. /// /// For more information about this option, see - /// [`set_multicast_loop_v6`][link]. + /// [`set_multicast_ttl_v4`][link]. /// - /// [link]: #method.set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - self.sys.multicast_loop_v6() + /// [link]: #method.set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result { + self.sys.multicast_ttl_v4() } /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. @@ -179,23 +394,77 @@ impl UdpSocket { self.sys.set_multicast_loop_v6(on) } - /// Gets the value of the `IP_TTL` option for this socket. + /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. /// - /// For more information about this option, see [`set_ttl`][link]. + /// For more information about this option, see + /// [`set_multicast_loop_v6`][link]. /// - /// [link]: #method.set_ttl - pub fn ttl(&self) -> io::Result { - self.sys.ttl() + /// [link]: #method.set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result { + self.sys.multicast_loop_v6() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// if socket.ttl()? < 255 { + /// socket.set_ttl(255)?; + /// } + /// + /// assert_eq!(socket.ttl()?, 255); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.sys.set_ttl(ttl) } + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`][link]. + /// + /// [link]: #method.set_ttl + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # + /// # fn try_main() -> Result<(), Box> { + /// use mio::net::UdpSocket; + /// + /// let socket = UdpSocket::bind(&"127.0.0.1:0".parse()?)?; + /// socket.set_ttl(255)?; + /// + /// assert_eq!(socket.ttl()?, 255); + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` + pub fn ttl(&self) -> io::Result { + self.sys.ttl() + } + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. /// /// This function specifies a new multicast group for this socket to join. @@ -244,6 +513,27 @@ impl UdpSocket { self.sys.leave_multicast_v6(multiaddr, interface) } + /// Sets the value for the `IPV6_V6ONLY` option on this socket. + /// + /// If this is set to `true` then the socket is restricted to sending and + /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications + /// can bind the same port at the same time. + /// + /// If this is set to `false` then the socket can be used to send and + /// receive packets from an IPv4-mapped IPv6 address. + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.sys.set_only_v6(only_v6) + } + + /// Gets the value of the `IPV6_V6ONLY` option for this socket. + /// + /// For more information about this option, see [`set_only_v6`][link]. + /// + /// [link]: #method.set_only_v6 + pub fn only_v6(&self) -> io::Result { + self.sys.only_v6() + } + /// Get the value of the `SO_ERROR` option on this socket. /// /// This will retrieve the stored error in the underlying socket, clearing @@ -256,7 +546,7 @@ impl UdpSocket { impl Evented for UdpSocket { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { - try!(self.selector_id.associate_selector(poll)); + self.selector_id.associate_selector(poll)?; self.sys.register(poll, token, interest, opts) } @@ -269,30 +559,36 @@ impl Evented for UdpSocket { } } +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.sys, f) + } +} + /* * * ===== UNIX ext ===== * */ -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd}; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl IntoRawFd for UdpSocket { fn into_raw_fd(self) -> RawFd { self.sys.into_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl AsRawFd for UdpSocket { fn as_raw_fd(&self) -> RawFd { self.sys.as_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl FromRawFd for UdpSocket { unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket { UdpSocket { diff --git a/third_party/rust/mio/src/poll.rs b/third_party/rust/mio/src/poll.rs index 456f78817d05..a2fcf23dfd54 100644 --- a/third_party/rust/mio/src/poll.rs +++ b/third_party/rust/mio/src/poll.rs @@ -3,16 +3,14 @@ use event_imp::{self as event, Ready, Event, Evented, PollOpt}; use std::{fmt, io, ptr, usize}; use std::cell::UnsafeCell; use std::{mem, ops, isize}; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] use std::os::unix::io::AsRawFd; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] use std::os::unix::io::RawFd; use std::sync::{Arc, Mutex, Condvar}; use std::sync::atomic::{AtomicUsize, AtomicPtr, AtomicBool}; use std::sync::atomic::Ordering::{self, Acquire, Release, AcqRel, Relaxed, SeqCst}; use std::time::{Duration, Instant}; -#[cfg(unix)] -use sys::unix::UnixReady; // Poll is backed by two readiness queues. The first is a system readiness queue // represented by `sys::Selector`. The system readiness queue handles events @@ -20,7 +18,7 @@ use sys::unix::UnixReady; // implemented in user space by `ReadinessQueue`. It provides a way to implement // purely user space `Evented` types. // -// `ReadinessQueue` is is backed by a MPSC queue that supports reuse of linked +// `ReadinessQueue` is backed by a MPSC queue that supports reuse of linked // list nodes. This significantly reduces the number of required allocations. // Each `Registration` / `SetReadiness` pair allocates a single readiness node // that is used for the lifetime of the registration. @@ -89,38 +87,46 @@ use sys::unix::UnixReady; /// A basic example -- establishing a `TcpStream` connection. /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll, Ready, PollOpt, Token}; -/// use mio::tcp::TcpStream; +/// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. -/// let addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); -/// let server = TcpListener::bind(&addr).unwrap(); +/// let addr: SocketAddr = "127.0.0.1:0".parse()?; +/// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream -/// let stream = TcpStream::connect(&server.local_addr().unwrap()).unwrap(); +/// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` -/// poll.register(&stream, Token(0), Ready::all(), PollOpt::edge()).unwrap(); +/// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { -/// poll.poll(&mut events, None).unwrap(); +/// poll.poll(&mut events, None)?; /// /// for event in &events { /// if event.token() == Token(0) && event.readiness().is_writable() { /// // The socket connected (probably, it could still be a spurious /// // wakeup) -/// return; +/// return Ok(()); /// } /// } /// } +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// # Edge-triggered and level-triggered @@ -153,7 +159,7 @@ use sys::unix::UnixReady; /// assume that [`Poll::poll`] may never return another event for the same token /// and readiness until the operation returns [`WouldBlock`]. /// -/// By contrast, when level-triggered notfications was requested, each call to +/// By contrast, when level-triggered notifications was requested, each call to /// [`Poll::poll`] will return an event for the socket as long as data remains /// in the socket buffer. Generally, level-triggered events should be avoided if /// high performance is a concern. @@ -246,31 +252,39 @@ use sys::unix::UnixReady; /// /// [`readable`]: struct.Ready.html#method.readable /// [`writable`]: struct.Ready.html#method.writable -/// [`error`]: struct.Ready.html#method.error -/// [`hup`]: struct.Ready.html#method.hup +/// [`error`]: unix/struct.UnixReady.html#method.error +/// [`hup`]: unix/struct.UnixReady.html#method.hup /// /// ### Registering handles /// /// Unless otherwise noted, it should be assumed that types implementing -/// [`Evented`] will never be become ready unless they are registered with `Poll`. +/// [`Evented`] will never become ready unless they are registered with `Poll`. /// /// For example: /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Poll, Ready, PollOpt, Token}; -/// use mio::tcp::TcpStream; +/// use mio::net::TcpStream; /// use std::time::Duration; /// use std::thread; /// -/// let sock = TcpStream::connect(&"216.58.193.100:80".parse().unwrap()).unwrap(); +/// let sock = TcpStream::connect(&"216.58.193.100:80".parse()?)?; /// /// thread::sleep(Duration::from_secs(1)); /// -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// // The connect is not guaranteed to have started until it is registered at /// // this point -/// poll.register(&sock, Token(0), Ready::all(), PollOpt::edge()).unwrap(); +/// poll.register(&sock, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?; +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// # Implementation notes @@ -417,6 +431,11 @@ pub struct Poll { /// [`Poll`]: struct.Poll.html /// [`Registration::new2`]: struct.Registration.html#method.new2 /// [`Evented`]: event/trait.Evented.html +/// [`set_readiness`]: struct.SetReadiness.html#method.set_readiness +/// [`register`]: struct.Poll.html#method.register +/// [`reregister`]: struct.Poll.html#method.reregister +/// [`deregister`]: struct.Poll.html#method.deregister +/// [portability]: struct.Poll.html#portability pub struct Registration { inner: RegistrationInner, } @@ -424,12 +443,13 @@ pub struct Registration { unsafe impl Send for Registration {} unsafe impl Sync for Registration {} -/// Updates the readiness state of the associated [`Registration`]. +/// Updates the readiness state of the associated `Registration`. /// /// See [`Registration`] for more documentation on using `SetReadiness` and /// [`Poll`] for high level polling documentation. /// -/// [`Registration`] +/// [`Poll`]: struct.Poll.html +/// [`Registration`]: struct.Registration.html #[derive(Clone)] pub struct SetReadiness { inner: RegistrationInner, @@ -603,6 +623,8 @@ impl Poll { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Poll, Events}; /// use std::time::Duration; /// @@ -616,30 +638,36 @@ impl Poll { /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. - /// let n = poll.poll(&mut events, Some(Duration::from_millis(500))).unwrap(); + /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` pub fn new() -> io::Result { is_send::(); is_sync::(); let poll = Poll { - selector: try!(sys::Selector::new()), - readiness_queue: try!(ReadinessQueue::new()), + selector: sys::Selector::new()?, + readiness_queue: ReadinessQueue::new()?, lock_state: AtomicUsize::new(0), lock: Mutex::new(()), condvar: Condvar::new(), }; // Register the notification wakeup FD with the IO poller - try!(poll.readiness_queue.inner.awakener.register(&poll, AWAKEN, Ready::readable(), PollOpt::edge())); + poll.readiness_queue.inner.awakener.register(&poll, AWAKEN, Ready::readable(), PollOpt::edge())?; Ok(poll) } /// Register an `Evented` handle with the `Poll` instance. /// - /// Once registerd, the `Poll` instance will monitor the `Evented` handle + /// Once registered, the `Poll` instance will monitor the `Evented` handle /// for readiness state changes. When it notices a state change, it will /// return a readiness event for the handle the next time [`poll`] is /// called. @@ -703,15 +731,17 @@ impl Poll { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll, Ready, PollOpt, Token}; - /// use mio::tcp::TcpStream; + /// use mio::net::TcpStream; /// use std::time::{Duration, Instant}; /// - /// let poll = Poll::new().unwrap(); - /// let socket = TcpStream::connect(&"216.58.193.100:80".parse().unwrap()).unwrap(); + /// let poll = Poll::new()?; + /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?; /// /// // Register the socket with `poll` - /// poll.register(&socket, Token(0), Ready::all(), PollOpt::edge()).unwrap(); + /// poll.register(&socket, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?; /// /// let mut events = Events::with_capacity(1024); /// let start = Instant::now(); @@ -722,24 +752,30 @@ impl Poll { /// /// if elapsed >= timeout { /// // Connection timed out - /// return; + /// return Ok(()); /// } /// /// let remaining = timeout - elapsed; - /// poll.poll(&mut events, Some(remaining)).unwrap(); + /// poll.poll(&mut events, Some(remaining))?; /// /// for event in &events { /// if event.token() == Token(0) { /// // Something (probably) happened on the socket. - /// return; + /// return Ok(()); /// } /// } /// } + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` pub fn register(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> where E: Evented { - try!(validate_args(token, interest)); + validate_args(token)?; /* * Undefined behavior: @@ -749,7 +785,7 @@ impl Poll { trace!("registering with poller"); // Register interests for this socket - try!(handle.register(self, token, interest, opts)); + handle.register(self, token, interest, opts)?; Ok(()) } @@ -780,19 +816,27 @@ impl Poll { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Poll, Ready, PollOpt, Token}; - /// use mio::tcp::TcpStream; + /// use mio::net::TcpStream; /// - /// let poll = Poll::new().unwrap(); - /// let socket = TcpStream::connect(&"216.58.193.100:80".parse().unwrap()).unwrap(); + /// let poll = Poll::new()?; + /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?; /// /// // Register the socket with `poll`, requesting readable - /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?; /// /// // Reregister the socket specifying a different token and write interest /// // instead. `PollOpt::edge()` must be specified even though that value /// // is not being changed. - /// poll.reregister(&socket, Token(2), Ready::writable(), PollOpt::edge()).unwrap(); + /// poll.reregister(&socket, Token(2), Ready::writable(), PollOpt::edge())?; + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` /// /// [`struct`]: # @@ -802,12 +846,12 @@ impl Poll { pub fn reregister(&self, handle: &E, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> where E: Evented { - try!(validate_args(token, interest)); + validate_args(token)?; trace!("registering with poller"); // Register interests for this socket - try!(handle.reregister(self, token, interest, opts)); + handle.reregister(self, token, interest, opts)?; Ok(()) } @@ -829,23 +873,31 @@ impl Poll { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll, Ready, PollOpt, Token}; - /// use mio::tcp::TcpStream; + /// use mio::net::TcpStream; /// use std::time::Duration; /// - /// let poll = Poll::new().unwrap(); - /// let socket = TcpStream::connect(&"216.58.193.100:80".parse().unwrap()).unwrap(); + /// let poll = Poll::new()?; + /// let socket = TcpStream::connect(&"216.58.193.100:80".parse()?)?; /// /// // Register the socket with `poll` - /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + /// poll.register(&socket, Token(0), Ready::readable(), PollOpt::edge())?; /// - /// poll.deregister(&socket).unwrap(); + /// poll.deregister(&socket)?; /// /// let mut events = Events::with_capacity(1024); /// /// // Set a timeout because this poll should never receive any events. - /// let n = poll.poll(&mut events, Some(Duration::from_secs(1))).unwrap(); + /// let n = poll.poll(&mut events, Some(Duration::from_secs(1)))?; /// assert_eq!(0, n); + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` pub fn deregister(&self, handle: &E) -> io::Result<()> where E: Evented @@ -853,7 +905,7 @@ impl Poll { trace!("deregistering handle with poller"); // Deregister interests for this socket - try!(handle.deregister(self)); + handle.deregister(self)?; Ok(()) } @@ -866,7 +918,7 @@ impl Poll { /// been received or `timeout` has elapsed. A `timeout` of `None` means that /// `poll` will block until a readiness event has been received. /// - /// The supplied `events` will be cleared and newly received readinss events + /// The supplied `events` will be cleared and newly received readiness events /// will be pushed onto the end. At most `events.capacity()` events will be /// returned. If there are further pending readiness events, they will be /// returned on the next call to `poll`. @@ -884,7 +936,10 @@ impl Poll { /// /// `poll` returns the number of readiness events that have been pushed into /// `events` or `Err` when an error has been encountered with the system - /// selector. + /// selector. The value returned is deprecated and will be removed in 0.7.0. + /// Accessing the events by index is also deprecated. Events can be + /// inserted by other events triggering, thus making sequential access + /// problematic. Use the iterator API instead. See [`iter`]. /// /// See the [struct] level documentation for a higher level discussion of /// polling. @@ -892,22 +947,25 @@ impl Poll { /// [`readable`]: struct.Ready.html#method.readable /// [`writable`]: struct.Ready.html#method.writable /// [struct]: # + /// [`iter`]: struct.Events.html#method.iter /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll, Ready, PollOpt, Token}; - /// use mio::tcp::TcpStream; + /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// use std::thread; /// /// // Bind a server socket to connect to. - /// let addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); - /// let server = TcpListener::bind(&addr).unwrap(); - /// let addr = server.local_addr().unwrap().clone(); + /// let addr: SocketAddr = "127.0.0.1:0".parse()?; + /// let server = TcpListener::bind(&addr)?; + /// let addr = server.local_addr()?.clone(); /// /// // Spawn a thread to accept the socket /// thread::spawn(move || { @@ -915,32 +973,50 @@ impl Poll { /// }); /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into - /// let poll = Poll::new().unwrap(); + /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream - /// let stream = TcpStream::connect(&addr).unwrap(); + /// let stream = TcpStream::connect(&addr)?; /// /// // Register the stream with `Poll` - /// poll.register(&stream, Token(0), Ready::all(), PollOpt::edge()).unwrap(); + /// poll.register(&stream, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { - /// poll.poll(&mut events, None).unwrap(); + /// poll.poll(&mut events, None)?; /// /// for event in &events { /// if event.token() == Token(0) && event.readiness().is_writable() { /// // The socket connected (probably, it could still be a spurious /// // wakeup) - /// return; + /// return Ok(()); /// } /// } /// } + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` /// /// [struct]: # - pub fn poll(&self, events: &mut Events, mut timeout: Option) -> io::Result { + pub fn poll(&self, events: &mut Events, timeout: Option) -> io::Result { + self.poll1(events, timeout, false) + } + + /// Like `poll`, but may be interrupted by a signal + /// + /// If `poll` is inturrupted while blocking, it will transparently retry the syscall. If you + /// want to handle signals yourself, however, use `poll_interruptible`. + pub fn poll_interruptible(&self, events: &mut Events, timeout: Option) -> io::Result { + self.poll1(events, timeout, true) + } + + fn poll1(&self, events: &mut Events, mut timeout: Option, interruptible: bool) -> io::Result { let zero = Some(Duration::from_millis(0)); // At a high level, the synchronization strategy is to acquire access to @@ -1058,7 +1134,7 @@ impl Poll { } } - let ret = self.poll2(events, timeout); + let ret = self.poll2(events, timeout, interruptible); // Release the lock if 1 != self.lock_state.fetch_and(!1, Release) { @@ -1073,72 +1149,74 @@ impl Poll { } #[inline] - fn poll2(&self, events: &mut Events, timeout: Option) -> io::Result { + fn poll2(&self, events: &mut Events, mut timeout: Option, interruptible: bool) -> io::Result { // Compute the timeout value passed to the system selector. If the // readiness queue has pending nodes, we still want to poll the system // selector for new events, but we don't want to block the thread to // wait for new events. - let timeout = if timeout == Some(Duration::from_millis(0)) { + if timeout == Some(Duration::from_millis(0)) { // If blocking is not requested, then there is no need to prepare // the queue for sleep - timeout } else if self.readiness_queue.prepare_for_sleep() { // The readiness queue is empty. The call to `prepare_for_sleep` // inserts `sleep_marker` into the queue. This signals to any // threads setting readiness that the `Poll::poll` is going to // sleep, so the awakener should be used. - timeout } else { // The readiness queue is not empty, so do not block the thread. - Some(Duration::from_millis(0)) - }; + timeout = Some(Duration::from_millis(0)); + } - // First get selector events - let res = self.selector.select(&mut events.inner, AWAKEN, timeout); - - if try!(res) { - // Some awakeners require reading from a FD. - self.readiness_queue.inner.awakener.cleanup(); + loop { + let now = Instant::now(); + // First get selector events + let res = self.selector.select(&mut events.inner, AWAKEN, timeout); + match res { + Ok(true) => { + // Some awakeners require reading from a FD. + self.readiness_queue.inner.awakener.cleanup(); + break; + } + Ok(false) => break, + Err(ref e) if e.kind() == io::ErrorKind::Interrupted && !interruptible => { + // Interrupted by a signal; update timeout if necessary and retry + if let Some(to) = timeout { + let elapsed = now.elapsed(); + if elapsed >= to { + break; + } else { + timeout = Some(to - elapsed); + } + } + } + Err(e) => return Err(e), + } } // Poll custom event queue self.readiness_queue.poll(&mut events.inner); // Return number of polled events - Ok(events.len()) + Ok(events.inner.len()) } } -#[cfg(unix)] -fn registerable(interest: Ready) -> bool { - let unixinterest = UnixReady::from(interest); - unixinterest.is_readable() || unixinterest.is_writable() || unixinterest.is_aio() -} - -#[cfg(not(unix))] -fn registerable(interest: Ready) -> bool { - interest.is_readable() || interest.is_writable() -} - -fn validate_args(token: Token, interest: Ready) -> io::Result<()> { +fn validate_args(token: Token) -> io::Result<()> { if token == AWAKEN { return Err(io::Error::new(io::ErrorKind::Other, "invalid token")); } - if !registerable(interest) { - return Err(io::Error::new(io::ErrorKind::Other, "interest must include readable or writable or aio")); - } - Ok(()) } impl fmt::Debug for Poll { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Poll") + fmt.debug_struct("Poll") + .finish() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl AsRawFd for Poll { fn as_raw_fd(&self) -> RawFd { self.selector.as_raw_fd() @@ -1148,34 +1226,41 @@ impl AsRawFd for Poll { /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`] and will be used to -/// receive any new readiness events received since the last call to [`poll`]. -/// Usually, a single `Events` instance is created at the same time as the -/// [`Poll`] and the single instance is reused for each call to [`poll`]. +/// receive any new readiness events received since the last poll. Usually, a +/// single `Events` instance is created at the same time as a [`Poll`] and +/// reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// # Examples /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll}; /// use std::time::Duration; /// /// let mut events = Events::with_capacity(1024); -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// assert_eq!(0, events.len()); /// /// // Register `Evented` handles with `poll` /// -/// poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; /// /// for event in &events { /// println!("event={:?}", event); /// } +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// [`Poll::poll`]: struct.Poll.html#method.poll -/// [`poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { inner: sys::Events, @@ -1188,29 +1273,73 @@ pub struct Events { /// # Examples /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll}; /// use std::time::Duration; /// /// let mut events = Events::with_capacity(1024); -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// // Register handles with `poll` /// -/// poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; /// /// for event in events.iter() { /// println!("event={:?}", event); /// } +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// [`Events`]: struct.Events.html /// [`iter`]: struct.Events.html#method.iter -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Iter<'a> { inner: &'a Events, pos: usize, } +/// Owned [`Events`] iterator. +/// +/// This struct is created by the `into_iter` method on [`Events`]. +/// +/// # Examples +/// +/// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { +/// use mio::{Events, Poll}; +/// use std::time::Duration; +/// +/// let mut events = Events::with_capacity(1024); +/// let poll = Poll::new()?; +/// +/// // Register handles with `poll` +/// +/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; +/// +/// for event in events { +/// println!("event={:?}", event); +/// } +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } +/// ``` +/// [`Events`]: struct.Events.html +#[derive(Debug)] +pub struct IntoIter { + inner: Events, + pos: usize, +} + impl Events { /// Return a new `Events` capable of holding up to `capacity` events. /// @@ -1229,41 +1358,14 @@ impl Events { } } - /// Returns the `Event` at the given index, or `None` if the index is out of - /// bounds. - /// - /// # Examples - /// - /// ``` - /// use mio::{Events, Poll}; - /// use std::time::Duration; - /// - /// let mut events = Events::with_capacity(1024); - /// let poll = Poll::new().unwrap(); - /// - /// // Register handles with `poll` - /// - /// let n = poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); - /// - /// for i in 0..n { - /// println!("event={:?}", events.get(i).unwrap()); - /// } - /// ``` + #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")] + #[doc(hidden)] pub fn get(&self, idx: usize) -> Option { self.inner.get(idx) } - /// Returns the number of `Event` values currently in `self`. - /// - /// # Examples - /// - /// ``` - /// use mio::Events; - /// - /// let events = Events::with_capacity(1024); - /// - /// assert_eq!(0, events.len()); - /// ``` + #[doc(hidden)] + #[deprecated(since="0.6.10", note="Index access removed in favor of iterator only API.")] pub fn len(&self) -> usize { self.inner.len() } @@ -1301,19 +1403,27 @@ impl Events { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Poll}; /// use std::time::Duration; /// /// let mut events = Events::with_capacity(1024); - /// let poll = Poll::new().unwrap(); + /// let poll = Poll::new()?; /// /// // Register handles with `poll` /// - /// poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; /// /// for event in events.iter() { /// println!("event={:?}", event); /// } + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` pub fn iter(&self) -> Iter { Iter { @@ -1321,6 +1431,39 @@ impl Events { pos: 0 } } + + /// Clearing all `Event` values from container explicitly. + /// + /// # Examples + /// + /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { + /// use mio::{Events, Poll}; + /// use std::time::Duration; + /// + /// let mut events = Events::with_capacity(1024); + /// let poll = Poll::new()?; + /// + /// // Register handles with `poll` + /// for _ in 0..2 { + /// events.clear(); + /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; + /// + /// for event in events.iter() { + /// println!("event={:?}", event); + /// } + /// } + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` + pub fn clear(&mut self) { + self.inner.clear(); + } } impl<'a> IntoIterator for &'a Events { @@ -1336,7 +1479,29 @@ impl<'a> Iterator for Iter<'a> { type Item = Event; fn next(&mut self) -> Option { - let ret = self.inner.get(self.pos); + let ret = self.inner.inner.get(self.pos); + self.pos += 1; + ret + } +} + +impl IntoIterator for Events { + type Item = Event; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + inner: self, + pos: 0, + } + } +} + +impl Iterator for IntoIter { + type Item = Event; + + fn next(&mut self) -> Option { + let ret = self.inner.inner.get(self.pos); self.pos += 1; ret } @@ -1345,7 +1510,6 @@ impl<'a> Iterator for Iter<'a> { impl fmt::Debug for Events { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Events") - .field("len", &self.len()) .field("capacity", &self.capacity()) .finish() } @@ -1381,6 +1545,8 @@ impl Registration { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Ready, Registration, Poll, PollOpt, Token}; /// use std::thread; /// @@ -1393,8 +1559,8 @@ impl Registration { /// set_readiness.set_readiness(Ready::readable()); /// }); /// - /// let poll = Poll::new().unwrap(); - /// poll.register(®istration, Token(0), Ready::all(), PollOpt::edge()).unwrap(); + /// let poll = Poll::new()?; + /// poll.register(®istration, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge())?; /// /// let mut events = Events::with_capacity(256); /// @@ -1403,11 +1569,16 @@ impl Registration { /// /// for event in &events { /// if event.token() == Token(0) && event.readiness().is_readable() { - /// return; + /// return Ok(()); /// } /// } /// } - /// + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` /// [struct]: # /// [`Poll`]: struct.Poll.html @@ -1536,14 +1707,22 @@ impl SetReadiness { /// # Examples /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Registration, Ready}; /// /// let (registration, set_readiness) = Registration::new2(); /// /// assert!(set_readiness.readiness().is_empty()); /// - /// set_readiness.set_readiness(Ready::readable()).unwrap(); + /// set_readiness.set_readiness(Ready::readable())?; /// assert!(set_readiness.readiness().is_readable()); + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` pub fn readiness(&self) -> Ready { self.inner.readiness() @@ -1552,9 +1731,9 @@ impl SetReadiness { /// Set the registration's readiness /// /// If the associated `Registration` is registered with a [`Poll`] instance - /// and has requested readiness events that include `ready`, then a call - /// [`poll`] will receive a readiness event representing the readiness - /// state change. + /// and has requested readiness events that include `ready`, then a future + /// call to [`Poll::poll`] will receive a readiness event representing the + /// readiness state change. /// /// # Note /// @@ -1568,28 +1747,36 @@ impl SetReadiness { /// work: /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Registration, Ready, Poll, PollOpt, Token}; /// - /// let poll = Poll::new().unwrap(); + /// let poll = Poll::new()?; /// let (registration, set_readiness) = Registration::new2(); /// /// poll.register(®istration, /// Token(0), /// Ready::readable(), - /// PollOpt::edge()).unwrap(); + /// PollOpt::edge())?; /// /// // Set the readiness, then immediately poll to try to get the readiness /// // event - /// set_readiness.set_readiness(Ready::readable()).unwrap(); + /// set_readiness.set_readiness(Ready::readable())?; /// /// let mut events = Events::with_capacity(1024); - /// poll.poll(&mut events, None).unwrap(); + /// poll.poll(&mut events, None)?; /// /// // There is NO guarantee that the following will work. It is possible /// // that the readiness event will be delivered at a later time. /// let event = events.get(0).unwrap(); /// assert_eq!(event.token(), Token(0)); /// assert!(event.readiness().is_readable()); + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` /// /// # Examples @@ -1598,19 +1785,28 @@ impl SetReadiness { /// documentation. /// /// ``` + /// # use std::error::Error; + /// # fn try_main() -> Result<(), Box> { /// use mio::{Registration, Ready}; /// /// let (registration, set_readiness) = Registration::new2(); /// /// assert!(set_readiness.readiness().is_empty()); /// - /// set_readiness.set_readiness(Ready::readable()).unwrap(); + /// set_readiness.set_readiness(Ready::readable())?; /// assert!(set_readiness.readiness().is_readable()); + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } /// ``` /// /// [`Registration`]: struct.Registration.html + /// [`Evented`]: event/trait.Evented.html#examples /// [`Poll`]: struct.Poll.html - /// [`poll`]: struct.Poll.html#method.poll + /// [`Poll::poll`]: struct.Poll.html#method.poll pub fn set_readiness(&self, ready: Ready) -> io::Result<()> { self.inner.set_readiness(ready) } @@ -1618,7 +1814,8 @@ impl SetReadiness { impl fmt::Debug for SetReadiness { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "SetReadiness") + f.debug_struct("SetReadiness") + .finish() } } @@ -1666,7 +1863,7 @@ impl RegistrationInner { if !state.is_queued() && next.is_queued() { // We toggled the queued flag, making us responsible for queuing the // node in the MPSC readiness queue. - try!(self.enqueue_with_wakeup()); + self.enqueue_with_wakeup()?; } Ok(()) @@ -1824,7 +2021,7 @@ impl RegistrationInner { if !state.is_queued() && next.is_queued() { // We are responsible for enqueing the node. - try!(enqueue_with_wakeup(queue, self)); + enqueue_with_wakeup(queue, self)?; } Ok(()) @@ -1902,7 +2099,7 @@ impl ReadinessQueue { Ok(ReadinessQueue { inner: Arc::new(ReadinessQueueInner { - awakener: try!(sys::Awakener::new()), + awakener: sys::Awakener::new()?, head_readiness: AtomicPtr::new(ptr), tail_readiness: UnsafeCell::new(ptr), end_marker: end_marker, @@ -2012,7 +2209,7 @@ impl ReadinessQueue { /// Prepare the queue for the `Poll::poll` thread to block in the system /// selector. This involves changing `head_readiness` to `sleep_marker`. - /// Returns true if successfull and `poll` can block. + /// Returns true if successful and `poll` can block. fn prepare_for_sleep(&self) -> bool { let end_marker = self.inner.end_marker(); let sleep_marker = self.inner.sleep_marker(); @@ -2033,6 +2230,16 @@ impl ReadinessQueue { return false; } + // The sleep marker is *not* currently in the readiness queue. + // + // The sleep marker is only inserted in this function. It is also only + // inserted in the tail position. This is guaranteed by first checking + // that the end marker is in the tail position, pushing the sleep marker + // after the end marker, then removing the end marker. + // + // Before inserting a node into the queue, the next pointer has to be + // set to null. Again, this is only safe to do when the node is not + // currently in the queue, but we already have ensured this. self.inner.sleep_marker.next_readiness.store(ptr::null_mut(), Relaxed); let actual = self.inner.head_readiness.compare_and_swap( @@ -2094,7 +2301,7 @@ impl ReadinessQueueInner { /// with relaxed ordering. Returns true if `Poll` needs to be woken up. fn enqueue_node_with_wakeup(&self, node: &ReadinessNode) -> io::Result<()> { if self.enqueue_node(node) { - try!(self.wakeup()); + self.wakeup()?; } Ok(()) @@ -2519,7 +2726,7 @@ impl Clone for SelectorId { } #[test] -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub fn as_raw_fd() { let poll = Poll::new().unwrap(); assert!(poll.as_raw_fd() > 0); diff --git a/third_party/rust/mio/src/sys/fuchsia/awakener.rs b/third_party/rust/mio/src/sys/fuchsia/awakener.rs new file mode 100644 index 000000000000..19bc76242906 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/awakener.rs @@ -0,0 +1,73 @@ +use {io, poll, Evented, Ready, Poll, PollOpt, Token}; +use zircon; +use std::sync::{Arc, Mutex, Weak}; + +pub struct Awakener { + /// Token and weak reference to the port on which Awakener was registered. + /// + /// When `Awakener::wakeup` is called, these are used to send a wakeup message to the port. + inner: Mutex)>>, +} + +impl Awakener { + /// Create a new `Awakener`. + pub fn new() -> io::Result { + Ok(Awakener { + inner: Mutex::new(None) + }) + } + + /// Send a wakeup signal to the `Selector` on which the `Awakener` was registered. + pub fn wakeup(&self) -> io::Result<()> { + let inner_locked = self.inner.lock().unwrap(); + let &(token, ref weak_port) = + inner_locked.as_ref().expect("Called wakeup on unregistered awakener."); + + let port = weak_port.upgrade().expect("Tried to wakeup a closed port."); + + let status = 0; // arbitrary + let packet = zircon::Packet::from_user_packet( + token.0 as u64, status, zircon::UserPacket::from_u8_array([0; 32])); + + Ok(port.queue(&packet)?) + } + + pub fn cleanup(&self) {} +} + +impl Evented for Awakener { + fn register(&self, + poll: &Poll, + token: Token, + _events: Ready, + _opts: PollOpt) -> io::Result<()> + { + let mut inner_locked = self.inner.lock().unwrap(); + if inner_locked.is_some() { + panic!("Called register on already-registered Awakener."); + } + *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port()))); + + Ok(()) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + _events: Ready, + _opts: PollOpt) -> io::Result<()> + { + let mut inner_locked = self.inner.lock().unwrap(); + *inner_locked = Some((token, Arc::downgrade(poll::selector(poll).port()))); + + Ok(()) + } + + fn deregister(&self, _poll: &Poll) -> io::Result<()> + { + let mut inner_locked = self.inner.lock().unwrap(); + *inner_locked = None; + + Ok(()) + } +} \ No newline at end of file diff --git a/third_party/rust/mio/src/sys/fuchsia/eventedfd.rs b/third_party/rust/mio/src/sys/fuchsia/eventedfd.rs new file mode 100644 index 000000000000..e23d0c4a1ee0 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/eventedfd.rs @@ -0,0 +1,263 @@ +use {io, poll, Evented, Ready, Poll, PollOpt, Token}; +use libc; +use zircon; +use zircon::AsHandleRef; +use sys::fuchsia::{DontDrop, poll_opts_to_wait_async, sys}; +use std::mem; +use std::os::unix::io::RawFd; +use std::sync::{Arc, Mutex}; + +/// Properties of an `EventedFd`'s current registration +#[derive(Debug)] +pub struct EventedFdRegistration { + token: Token, + handle: DontDrop, + rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>, +} + +impl EventedFdRegistration { + unsafe fn new(token: Token, + raw_handle: sys::zx_handle_t, + rereg_signals: Option<(zircon::Signals, zircon::WaitAsyncOpts)>, + ) -> Self + { + EventedFdRegistration { + token: token, + handle: DontDrop::new(zircon::Handle::from_raw(raw_handle)), + rereg_signals: rereg_signals + } + } + + pub fn rereg_signals(&self) -> Option<(zircon::Signals, zircon::WaitAsyncOpts)> { + self.rereg_signals + } +} + +/// An event-ed file descriptor. The file descriptor is owned by this structure. +#[derive(Debug)] +pub struct EventedFdInner { + /// Properties of the current registration. + registration: Mutex>, + + /// Owned file descriptor. + /// + /// `fd` is closed on `Drop`, so modifying `fd` is a memory-unsafe operation. + fd: RawFd, + + /// Owned `fdio_t` pointer. + fdio: *const sys::fdio_t, +} + +impl EventedFdInner { + pub fn rereg_for_level(&self, port: &zircon::Port) { + let registration_opt = self.registration.lock().unwrap(); + if let Some(ref registration) = *registration_opt { + if let Some((rereg_signals, rereg_opts)) = registration.rereg_signals { + let _res = + registration + .handle.inner_ref() + .wait_async_handle( + port, + registration.token.0 as u64, + rereg_signals, + rereg_opts); + } + } + } + + pub fn registration(&self) -> &Mutex> { + &self.registration + } + + pub fn fdio(&self) -> &sys::fdio_t { + unsafe { &*self.fdio } + } +} + +impl Drop for EventedFdInner { + fn drop(&mut self) { + unsafe { + sys::__fdio_release(self.fdio); + let _ = libc::close(self.fd); + } + } +} + +// `EventedInner` must be manually declared `Send + Sync` because it contains a `RawFd` and a +// `*const sys::fdio_t`. These are only used to make thread-safe system calls, so accessing +// them is entirely thread-safe. +// +// Note: one minor exception to this are the calls to `libc::close` and `__fdio_release`, which +// happen on `Drop`. These accesses are safe because `drop` can only be called at most once from +// a single thread, and after it is called no other functions can be called on the `EventedFdInner`. +unsafe impl Sync for EventedFdInner {} +unsafe impl Send for EventedFdInner {} + +#[derive(Clone, Debug)] +pub struct EventedFd { + pub inner: Arc +} + +impl EventedFd { + pub unsafe fn new(fd: RawFd) -> Self { + let fdio = sys::__fdio_fd_to_io(fd); + assert!(fdio != ::std::ptr::null(), "FileDescriptor given to EventedFd must be valid."); + + EventedFd { + inner: Arc::new(EventedFdInner { + registration: Mutex::new(None), + fd: fd, + fdio: fdio, + }) + } + } + + fn handle_and_signals_for_events(&self, interest: Ready, opts: PollOpt) + -> (sys::zx_handle_t, zircon::Signals) + { + let epoll_events = ioevent_to_epoll(interest, opts); + + unsafe { + let mut raw_handle: sys::zx_handle_t = mem::uninitialized(); + let mut signals: sys::zx_signals_t = mem::uninitialized(); + sys::__fdio_wait_begin(self.inner.fdio, epoll_events, &mut raw_handle, &mut signals); + + (raw_handle, signals) + } + } + + fn register_with_lock( + &self, + registration: &mut Option, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + if registration.is_some() { + return Err(io::Error::new( + io::ErrorKind::AlreadyExists, + "Called register on an already registered file descriptor.")); + } + + let (raw_handle, signals) = self.handle_and_signals_for_events(interest, opts); + + let needs_rereg = opts.is_level() && !opts.is_oneshot(); + + // If we need to reregister, then each registration should be `oneshot` + let opts = opts | if needs_rereg { PollOpt::oneshot() } else { PollOpt::empty() }; + + let rereg_signals = if needs_rereg { + Some((signals, poll_opts_to_wait_async(opts))) + } else { + None + }; + + *registration = Some( + unsafe { EventedFdRegistration::new(token, raw_handle, rereg_signals) } + ); + + // We don't have ownership of the handle, so we can't drop it + let handle = DontDrop::new(unsafe { zircon::Handle::from_raw(raw_handle) }); + + let registered = poll::selector(poll) + .register_fd(handle.inner_ref(), self, token, signals, opts); + + if registered.is_err() { + *registration = None; + } + + registered + } + + fn deregister_with_lock( + &self, + registration: &mut Option, + poll: &Poll) -> io::Result<()> + { + let old_registration = if let Some(old_reg) = registration.take() { + old_reg + } else { + return Err(io::Error::new( + io::ErrorKind::NotFound, + "Called rereregister on an unregistered file descriptor.")) + }; + + poll::selector(poll) + .deregister_fd(old_registration.handle.inner_ref(), old_registration.token) + } +} + +impl Evented for EventedFd { + fn register(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.register_with_lock( + &mut *self.inner.registration.lock().unwrap(), + poll, + token, + interest, + opts) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + // Take out the registration lock + let mut registration_lock = self.inner.registration.lock().unwrap(); + + // Deregister + self.deregister_with_lock(&mut *registration_lock, poll)?; + + self.register_with_lock( + &mut *registration_lock, + poll, + token, + interest, + opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + let mut registration_lock = self.inner.registration.lock().unwrap(); + self.deregister_with_lock(&mut *registration_lock, poll) + } +} + +fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 { + use event_imp::ready_from_usize; + const HUP: usize = 0b01000; + + let mut kind = 0; + + if interest.is_readable() { + kind |= libc::EPOLLIN; + } + + if interest.is_writable() { + kind |= libc::EPOLLOUT; + } + + if interest.contains(ready_from_usize(HUP)) { + kind |= libc::EPOLLRDHUP; + } + + if opts.is_edge() { + kind |= libc::EPOLLET; + } + + if opts.is_oneshot() { + kind |= libc::EPOLLONESHOT; + } + + if opts.is_level() { + kind &= !libc::EPOLLET; + } + + kind as u32 +} diff --git a/third_party/rust/mio/src/sys/fuchsia/handles.rs b/third_party/rust/mio/src/sys/fuchsia/handles.rs new file mode 100644 index 000000000000..ae6f07f6d959 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/handles.rs @@ -0,0 +1,78 @@ +use {io, poll, Evented, Ready, Poll, PollOpt, Token}; +use zircon_sys::zx_handle_t; +use std::sync::Mutex; + +/// Wrapper for registering a `HandleBase` type with mio. +#[derive(Debug)] +pub struct EventedHandle { + /// The handle to be registered. + handle: zx_handle_t, + + /// The current `Token` with which the handle is registered with mio. + token: Mutex>, +} + +impl EventedHandle { + /// Create a new `EventedHandle` which can be registered with mio + /// in order to receive event notifications. + /// + /// The underlying handle must not be dropped while the + /// `EventedHandle` still exists. + pub unsafe fn new(handle: zx_handle_t) -> Self { + EventedHandle { + handle: handle, + token: Mutex::new(None), + } + } + + /// Get the underlying handle being registered. + pub fn get_handle(&self) -> zx_handle_t { + self.handle + } +} + +impl Evented for EventedHandle { + fn register(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + let mut this_token = self.token.lock().unwrap(); + { + poll::selector(poll).register_handle(self.handle, token, interest, opts)?; + *this_token = Some(token); + } + Ok(()) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + let mut this_token = self.token.lock().unwrap(); + { + poll::selector(poll).deregister_handle(self.handle, token)?; + *this_token = None; + poll::selector(poll).register_handle(self.handle, token, interest, opts)?; + *this_token = Some(token); + } + Ok(()) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + let mut this_token = self.token.lock().unwrap(); + let token = if let Some(token) = *this_token { token } else { + return Err(io::Error::new( + io::ErrorKind::NotFound, + "Attempted to deregister an unregistered handle.")) + }; + { + poll::selector(poll).deregister_handle(self.handle, token)?; + *this_token = None; + } + Ok(()) + } +} diff --git a/third_party/rust/mio/src/sys/fuchsia/mod.rs b/third_party/rust/mio/src/sys/fuchsia/mod.rs new file mode 100644 index 000000000000..10728fc8dcd5 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/mod.rs @@ -0,0 +1,177 @@ +use {io, Ready, PollOpt}; +use libc; +use zircon; +use std::mem; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::{Deref, DerefMut}; +use std::os::unix::io::RawFd; + +mod awakener; +mod handles; +mod eventedfd; +mod net; +mod ready; +mod selector; + +use self::eventedfd::{EventedFd, EventedFdInner}; +use self::ready::assert_fuchsia_ready_repr; + +pub use self::awakener::Awakener; +pub use self::handles::EventedHandle; +pub use self::net::{TcpListener, TcpStream, UdpSocket}; +pub use self::selector::{Events, Selector}; +pub use self::ready::{FuchsiaReady, zx_signals_t}; + +// Set non-blocking (workaround since the std version doesn't work in fuchsia) +// TODO: fix the std version and replace this +pub fn set_nonblock(fd: RawFd) -> io::Result<()> { + cvt(unsafe { libc::fcntl(fd, libc::F_SETFL, libc::O_NONBLOCK) }).map(|_| ()) +} + +/// Workaround until fuchsia's recv_from is fixed +unsafe fn recv_from(fd: RawFd, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + let flags = 0; + + let n = cvt( + libc::recv(fd, + buf.as_mut_ptr() as *mut libc::c_void, + buf.len(), + flags) + )?; + + // random address-- we don't use it + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + Ok((n as usize, addr)) +} + +mod sys { + #![allow(non_camel_case_types)] + use std::os::unix::io::RawFd; + pub use zircon_sys::{zx_handle_t, zx_signals_t}; + + // 17 fn pointers we don't need for mio :) + pub type fdio_ops_t = [usize; 17]; + + pub type atomic_int_fast32_t = usize; // TODO: https://github.com/rust-lang/libc/issues/631 + + #[repr(C)] + pub struct fdio_t { + pub ops: *const fdio_ops_t, + pub magic: u32, + pub refcount: atomic_int_fast32_t, + pub dupcount: u32, + pub flags: u32, + } + + #[link(name="fdio")] + extern { + pub fn __fdio_fd_to_io(fd: RawFd) -> *const fdio_t; + pub fn __fdio_release(io: *const fdio_t); + + pub fn __fdio_wait_begin( + io: *const fdio_t, + events: u32, + handle_out: &mut zx_handle_t, + signals_out: &mut zx_signals_t, + ); + pub fn __fdio_wait_end( + io: *const fdio_t, + signals: zx_signals_t, + events_out: &mut u32, + ); + } +} + +fn epoll_event_to_ready(epoll: u32) -> Ready { + let epoll = epoll as i32; // casts the bits directly + let mut kind = Ready::empty(); + + if (epoll & libc::EPOLLIN) != 0 || (epoll & libc::EPOLLPRI) != 0 { + kind = kind | Ready::readable(); + } + + if (epoll & libc::EPOLLOUT) != 0 { + kind = kind | Ready::writable(); + } + + kind + + /* TODO: support? + // EPOLLHUP - Usually means a socket error happened + if (epoll & libc::EPOLLERR) != 0 { + kind = kind | UnixReady::error(); + } + + if (epoll & libc::EPOLLRDHUP) != 0 || (epoll & libc::EPOLLHUP) != 0 { + kind = kind | UnixReady::hup(); + } + */ +} + +fn poll_opts_to_wait_async(poll_opts: PollOpt) -> zircon::WaitAsyncOpts { + if poll_opts.is_oneshot() { + zircon::WaitAsyncOpts::Once + } else { + zircon::WaitAsyncOpts::Repeating + } +} + +trait IsMinusOne { + fn is_minus_one(&self) -> bool; +} + +impl IsMinusOne for i32 { + fn is_minus_one(&self) -> bool { *self == -1 } +} + +impl IsMinusOne for isize { + fn is_minus_one(&self) -> bool { *self == -1 } +} + +fn cvt(t: T) -> ::io::Result { + use std::io; + + if t.is_minus_one() { + Err(io::Error::last_os_error()) + } else { + Ok(t) + } +} + +/// Utility type to prevent the type inside of it from being dropped. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +struct DontDrop(Option); + +impl DontDrop { + fn new(t: T) -> DontDrop { + DontDrop(Some(t)) + } + + fn inner_ref(&self) -> &T { + self.0.as_ref().unwrap() + } + + fn inner_mut(&mut self) -> &mut T { + self.0.as_mut().unwrap() + } +} + +impl Deref for DontDrop { + type Target = T; + fn deref(&self) -> &Self::Target { + self.inner_ref() + } +} + +impl DerefMut for DontDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner_mut() + } +} + +impl Drop for DontDrop { + fn drop(&mut self) { + let inner = self.0.take(); + mem::forget(inner); + } +} diff --git a/third_party/rust/mio/src/sys/fuchsia/net.rs b/third_party/rust/mio/src/sys/fuchsia/net.rs new file mode 100644 index 000000000000..d43ad27bb555 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/net.rs @@ -0,0 +1,444 @@ +use {io, Evented, Ready, Poll, PollOpt, Token}; +use iovec::IoVec; +use iovec::unix as iovec; +use libc; +use net2::TcpStreamExt; +#[allow(unused_imports)] // only here for Rust 1.8 +use net2::UdpSocketExt; +use sys::fuchsia::{recv_from, set_nonblock, EventedFd, DontDrop}; +use std::cmp; +use std::io::{Read, Write}; +use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::os::unix::io::AsRawFd; +use std::time::Duration; + +#[derive(Debug)] +pub struct TcpStream { + io: DontDrop, + evented_fd: EventedFd, +} + +impl TcpStream { + pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result { + try!(set_nonblock(stream.as_raw_fd())); + + let connected = stream.connect(addr); + match connected { + Ok(..) => {} + Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) }; + + return Ok(TcpStream { + io: DontDrop::new(stream), + evented_fd: evented_fd, + }) + } + + pub fn from_stream(stream: net::TcpStream) -> TcpStream { + let evented_fd = unsafe { EventedFd::new(stream.as_raw_fd()) }; + + TcpStream { + io: DontDrop::new(stream), + evented_fd: evented_fd, + } + } + + pub fn peer_addr(&self) -> io::Result { + self.io.peer_addr() + } + + pub fn local_addr(&self) -> io::Result { + self.io.local_addr() + } + + pub fn try_clone(&self) -> io::Result { + self.io.try_clone().map(|s| { + let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) }; + TcpStream { + io: DontDrop::new(s), + evented_fd: evented_fd, + } + }) + } + + pub fn shutdown(&self, how: net::Shutdown) -> io::Result<()> { + self.io.shutdown(how) + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + self.io.set_nodelay(nodelay) + } + + pub fn nodelay(&self) -> io::Result { + self.io.nodelay() + } + + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.set_recv_buffer_size(size) + } + + pub fn recv_buffer_size(&self) -> io::Result { + self.io.recv_buffer_size() + } + + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.set_send_buffer_size(size) + } + + pub fn send_buffer_size(&self) -> io::Result { + self.io.send_buffer_size() + } + + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + self.io.set_keepalive(keepalive) + } + + pub fn keepalive(&self) -> io::Result> { + self.io.keepalive() + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.set_ttl(ttl) + } + + pub fn ttl(&self) -> io::Result { + self.io.ttl() + } + + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.set_only_v6(only_v6) + } + + pub fn only_v6(&self) -> io::Result { + self.io.only_v6() + } + + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.io.set_linger(dur) + } + + pub fn linger(&self) -> io::Result> { + self.io.linger() + } + + pub fn take_error(&self) -> io::Result> { + self.io.take_error() + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + self.io.peek(buf) + } + + pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result { + unsafe { + let slice = iovec::as_os_slice_mut(bufs); + let len = cmp::min(::max_value() as usize, slice.len()); + let rc = libc::readv(self.io.as_raw_fd(), + slice.as_ptr(), + len as libc::c_int); + if rc < 0 { + Err(io::Error::last_os_error()) + } else { + Ok(rc as usize) + } + } + } + + pub fn writev(&self, bufs: &[&IoVec]) -> io::Result { + unsafe { + let slice = iovec::as_os_slice(bufs); + let len = cmp::min(::max_value() as usize, slice.len()); + let rc = libc::writev(self.io.as_raw_fd(), + slice.as_ptr(), + len as libc::c_int); + if rc < 0 { + Err(io::Error::last_os_error()) + } else { + Ok(rc as usize) + } + } + } +} + +impl<'a> Read for &'a TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.io.inner_ref().read(buf) + } +} + +impl<'a> Write for &'a TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.io.inner_ref().write(buf) + } + fn flush(&mut self) -> io::Result<()> { + self.io.inner_ref().flush() + } +} + +impl Evented for TcpStream { + fn register(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.register(poll, token, interest, opts) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.evented_fd.deregister(poll) + } +} + +#[derive(Debug)] +pub struct TcpListener { + io: DontDrop, + evented_fd: EventedFd, +} + +impl TcpListener { + pub fn new(inner: net::TcpListener) -> io::Result { + set_nonblock(inner.as_raw_fd())?; + + let evented_fd = unsafe { EventedFd::new(inner.as_raw_fd()) }; + + Ok(TcpListener { + io: DontDrop::new(inner), + evented_fd: evented_fd, + }) + } + + pub fn local_addr(&self) -> io::Result { + self.io.local_addr() + } + + pub fn try_clone(&self) -> io::Result { + self.io.try_clone().map(|io| { + let evented_fd = unsafe { EventedFd::new(io.as_raw_fd()) }; + TcpListener { + io: DontDrop::new(io), + evented_fd: evented_fd, + } + }) + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + self.io.accept().and_then(|(s, a)| { + set_nonblock(s.as_raw_fd())?; + let evented_fd = unsafe { EventedFd::new(s.as_raw_fd()) }; + return Ok((TcpStream { + io: DontDrop::new(s), + evented_fd: evented_fd, + }, a)) + }) + } + + #[allow(deprecated)] + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.set_only_v6(only_v6) + } + + #[allow(deprecated)] + pub fn only_v6(&self) -> io::Result { + self.io.only_v6() + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.set_ttl(ttl) + } + + pub fn ttl(&self) -> io::Result { + self.io.ttl() + } + + pub fn take_error(&self) -> io::Result> { + self.io.take_error() + } +} + +impl Evented for TcpListener { + fn register(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.register(poll, token, interest, opts) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.evented_fd.deregister(poll) + } +} + +#[derive(Debug)] +pub struct UdpSocket { + io: DontDrop, + evented_fd: EventedFd, +} + +impl UdpSocket { + pub fn new(socket: net::UdpSocket) -> io::Result { + set_nonblock(socket.as_raw_fd())?; + + let evented_fd = unsafe { EventedFd::new(socket.as_raw_fd()) }; + + Ok(UdpSocket { + io: DontDrop::new(socket), + evented_fd: evented_fd, + }) + } + + pub fn local_addr(&self) -> io::Result { + self.io.local_addr() + } + + pub fn try_clone(&self) -> io::Result { + self.io.try_clone().and_then(|io| { + UdpSocket::new(io) + }) + } + + pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result { + self.io.send_to(buf, target) + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unsafe { recv_from(self.io.as_raw_fd(), buf) } + } + + pub fn send(&self, buf: &[u8]) -> io::Result { + self.io.send(buf) + } + + pub fn recv(&self, buf: &mut [u8]) -> io::Result { + self.io.recv(buf) + } + + pub fn connect(&self, addr: SocketAddr) + -> io::Result<()> { + self.io.connect(addr) + } + + pub fn broadcast(&self) -> io::Result { + self.io.broadcast() + } + + pub fn set_broadcast(&self, on: bool) -> io::Result<()> { + self.io.set_broadcast(on) + } + + pub fn multicast_loop_v4(&self) -> io::Result { + self.io.multicast_loop_v4() + } + + pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { + self.io.set_multicast_loop_v4(on) + } + + pub fn multicast_ttl_v4(&self) -> io::Result { + self.io.multicast_ttl_v4() + } + + pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { + self.io.set_multicast_ttl_v4(ttl) + } + + pub fn multicast_loop_v6(&self) -> io::Result { + self.io.multicast_loop_v6() + } + + pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { + self.io.set_multicast_loop_v6(on) + } + + pub fn ttl(&self) -> io::Result { + self.io.ttl() + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.set_ttl(ttl) + } + + pub fn join_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.io.join_multicast_v4(multiaddr, interface) + } + + pub fn join_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.io.join_multicast_v6(multiaddr, interface) + } + + pub fn leave_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.io.leave_multicast_v4(multiaddr, interface) + } + + pub fn leave_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.io.leave_multicast_v6(multiaddr, interface) + } + + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.set_only_v6(only_v6) + } + + pub fn only_v6(&self) -> io::Result { + self.io.only_v6() + } + + + pub fn take_error(&self) -> io::Result> { + self.io.take_error() + } +} + +impl Evented for UdpSocket { + fn register(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.register(poll, token, interest, opts) + } + + fn reregister(&self, + poll: &Poll, + token: Token, + interest: Ready, + opts: PollOpt) -> io::Result<()> + { + self.evented_fd.reregister(poll, token, interest, opts) + } + + fn deregister(&self, poll: &Poll) -> io::Result<()> { + self.evented_fd.deregister(poll) + } +} diff --git a/third_party/rust/mio/src/sys/fuchsia/ready.rs b/third_party/rust/mio/src/sys/fuchsia/ready.rs new file mode 100644 index 000000000000..97854f8c07df --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/ready.rs @@ -0,0 +1,181 @@ +use event_imp::{Ready, ready_as_usize, ready_from_usize}; +pub use zircon_sys::{ + zx_signals_t, + ZX_OBJECT_READABLE, + ZX_OBJECT_WRITABLE, +}; +use std::ops; + +// The following impls are valid because Fuchsia and mio both represent +// "readable" as `1 << 0` and "writable" as `1 << 2`. +// We define this assertion here and call it from `Selector::new`, +// since `Selector:;new` is guaranteed to be called during a standard mio runtime, +// unlike the functions in this file. +#[inline] +pub fn assert_fuchsia_ready_repr() { + debug_assert!( + ZX_OBJECT_READABLE.bits() as usize == ready_as_usize(Ready::readable()), + "Zircon ZX_OBJECT_READABLE should have the same repr as Ready::readable()" + ); + debug_assert!( + ZX_OBJECT_WRITABLE.bits() as usize == ready_as_usize(Ready::writable()), + "Zircon ZX_OBJECT_WRITABLE should have the same repr as Ready::writable()" + ); +} + +/// Fuchsia specific extensions to `Ready` +/// +/// Provides additional readiness event kinds that are available on Fuchsia. +/// +/// Conversion traits are implemented between `Ready` and `FuchsiaReady`. +/// +/// For high level documentation on polling and readiness, see [`Poll`]. +/// +/// [`Poll`]: struct.Poll.html +#[derive(Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord)] +pub struct FuchsiaReady(Ready); + +impl FuchsiaReady { + /// Returns the `FuchsiaReady` as raw zircon signals. + /// This function is just a more explicit, non-generic version of + /// `FuchsiaReady::into`. + #[inline] + pub fn into_zx_signals(self) -> zx_signals_t { + zx_signals_t::from_bits_truncate(ready_as_usize(self.0) as u32) + } +} + +impl Into for FuchsiaReady { + #[inline] + fn into(self) -> zx_signals_t { + self.into_zx_signals() + } +} + +impl From for FuchsiaReady { + #[inline] + fn from(src: zx_signals_t) -> Self { + FuchsiaReady(src.into()) + } +} + +impl From for Ready { + #[inline] + fn from(src: zx_signals_t) -> Self { + ready_from_usize(src.bits() as usize) + } +} + +impl From for FuchsiaReady { + #[inline] + fn from(src: Ready) -> FuchsiaReady { + FuchsiaReady(src) + } +} + +impl From for Ready { + #[inline] + fn from(src: FuchsiaReady) -> Ready { + src.0 + } +} + +impl ops::Deref for FuchsiaReady { + type Target = Ready; + + #[inline] + fn deref(&self) -> &Ready { + &self.0 + } +} + +impl ops::DerefMut for FuchsiaReady { + #[inline] + fn deref_mut(&mut self) -> &mut Ready { + &mut self.0 + } +} + +impl ops::BitOr for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitor(self, other: FuchsiaReady) -> FuchsiaReady { + (self.0 | other.0).into() + } +} + +impl ops::BitXor for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitxor(self, other: FuchsiaReady) -> FuchsiaReady { + (self.0 ^ other.0).into() + } +} + +impl ops::BitAnd for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitand(self, other: FuchsiaReady) -> FuchsiaReady { + (self.0 & other.0).into() + } +} + +impl ops::Sub for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn sub(self, other: FuchsiaReady) -> FuchsiaReady { + (self.0 & !other.0).into() + } +} + +#[deprecated(since = "0.6.10", note = "removed")] +#[cfg(feature = "with-deprecated")] +#[doc(hidden)] +impl ops::Not for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn not(self) -> FuchsiaReady { + (!self.0).into() + } +} + +impl ops::BitOr for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitor(self, other: zx_signals_t) -> FuchsiaReady { + self | FuchsiaReady::from(other) + } +} + +impl ops::BitXor for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitxor(self, other: zx_signals_t) -> FuchsiaReady { + self ^ FuchsiaReady::from(other) + } +} + +impl ops::BitAnd for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn bitand(self, other: zx_signals_t) -> FuchsiaReady { + self & FuchsiaReady::from(other) + } +} + +impl ops::Sub for FuchsiaReady { + type Output = FuchsiaReady; + + #[inline] + fn sub(self, other: zx_signals_t) -> FuchsiaReady { + self - FuchsiaReady::from(other) + } +} diff --git a/third_party/rust/mio/src/sys/fuchsia/selector.rs b/third_party/rust/mio/src/sys/fuchsia/selector.rs new file mode 100644 index 000000000000..27226ac5ff28 --- /dev/null +++ b/third_party/rust/mio/src/sys/fuchsia/selector.rs @@ -0,0 +1,353 @@ +use {io, Event, PollOpt, Ready, Token}; +use sys::fuchsia::{ + assert_fuchsia_ready_repr, + epoll_event_to_ready, + poll_opts_to_wait_async, + EventedFd, + EventedFdInner, + FuchsiaReady, +}; +use zircon; +use zircon::AsHandleRef; +use zircon_sys::zx_handle_t; +use std::collections::hash_map; +use std::fmt; +use std::mem; +use std::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::{Arc, Mutex, Weak}; +use std::time::Duration; +use sys; + +/// The kind of registration-- file descriptor or handle. +/// +/// The last bit of a token is set to indicate the type of the registration. +#[derive(Copy, Clone, Eq, PartialEq)] +enum RegType { + Fd, + Handle, +} + +fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result { + let key = token.0 as u64; + let msb = 1u64 << 63; + if (key & msb) != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Most-significant bit of token must remain unset.")); + } + + Ok(match reg_type { + RegType::Fd => key, + RegType::Handle => key | msb, + }) +} + +fn token_and_type_from_key(key: u64) -> (Token, RegType) { + let msb = 1u64 << 63; + ( + Token((key & !msb) as usize), + if (key & msb) == 0 { + RegType::Fd + } else { + RegType::Handle + } + ) +} + +/// Each Selector has a globally unique(ish) ID associated with it. This ID +/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first +/// registered with the `Selector`. If a type that is previously associated with +/// a `Selector` attempts to register itself with a different `Selector`, the +/// operation will return with an error. This matches windows behavior. +static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT; + +pub struct Selector { + id: usize, + + /// Zircon object on which the handles have been registered, and on which events occur + port: Arc, + + /// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt + /// used to prevent having to lock `tokens_to_rereg` when it is empty. + has_tokens_to_rereg: AtomicBool, + + /// List of `Token`s corresponding to registrations that need to be reregistered before the + /// next `port::wait`. This is necessary to provide level-triggered behavior for + /// `Async::repeating` registrations. + /// + /// When a level-triggered `Async::repeating` event is seen, its token is added to this list so + /// that it will be reregistered before the next `port::wait` call, making `port::wait` return + /// immediately if the signal was high during the reregistration. + /// + /// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_ + /// `token_to_fd`. + tokens_to_rereg: Mutex>, + + /// Map from tokens to weak references to `EventedFdInner`-- a structure describing a + /// file handle, its associated `fdio` object, and its current registration. + token_to_fd: Mutex>>, +} + +impl Selector { + pub fn new() -> io::Result { + // Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is + // compatible with Ready. + assert_fuchsia_ready_repr(); + + let port = Arc::new( + zircon::Port::create(zircon::PortOpts::Default)? + ); + + // offset by 1 to avoid choosing 0 as the id of a selector + let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1; + + let has_tokens_to_rereg = AtomicBool::new(false); + let tokens_to_rereg = Mutex::new(Vec::new()); + let token_to_fd = Mutex::new(hash_map::HashMap::new()); + + Ok(Selector { + id: id, + port: port, + has_tokens_to_rereg: has_tokens_to_rereg, + tokens_to_rereg: tokens_to_rereg, + token_to_fd: token_to_fd, + }) + } + + pub fn id(&self) -> usize { + self.id + } + + /// Returns a reference to the underlying port `Arc`. + pub fn port(&self) -> &Arc { &self.port } + + /// Reregisters all registrations pointed to by the `tokens_to_rereg` list + /// if `has_tokens_to_rereg`. + fn reregister_handles(&self) -> io::Result<()> { + // We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg` + // written before the store using `Ordering::Release`. + if self.has_tokens_to_rereg.load(Ordering::Acquire) { + let mut tokens = self.tokens_to_rereg.lock().unwrap(); + let token_to_fd = self.token_to_fd.lock().unwrap(); + for token in tokens.drain(0..) { + if let Some(eventedfd) = token_to_fd.get(&token) + .and_then(|h| h.upgrade()) { + eventedfd.rereg_for_level(&self.port); + } + } + self.has_tokens_to_rereg.store(false, Ordering::Release); + } + Ok(()) + } + + pub fn select(&self, + evts: &mut Events, + _awakener: Token, + timeout: Option) -> io::Result + { + evts.clear(); + + self.reregister_handles()?; + + let deadline = match timeout { + Some(duration) => { + let nanos = duration.as_secs().saturating_mul(1_000_000_000) + .saturating_add(duration.subsec_nanos() as u64); + + zircon::deadline_after(nanos) + } + None => zircon::ZX_TIME_INFINITE, + }; + + let packet = match self.port.wait(deadline) { + Ok(packet) => packet, + Err(zircon::Status::ErrTimedOut) => return Ok(false), + Err(e) => Err(e)?, + }; + + let observed_signals = match packet.contents() { + zircon::PacketContents::SignalOne(signal_packet) => { + signal_packet.observed() + } + zircon::PacketContents::SignalRep(signal_packet) => { + signal_packet.observed() + } + zircon::PacketContents::User(_user_packet) => { + // User packets are only ever sent by an Awakener + return Ok(true); + } + }; + + let key = packet.key(); + let (token, reg_type) = token_and_type_from_key(key); + + match reg_type { + RegType::Handle => { + // We can return immediately-- no lookup or registration necessary. + evts.events.push(Event::new(Ready::from(observed_signals), token)); + Ok(false) + }, + RegType::Fd => { + // Convert the signals to epoll events using __fdio_wait_end, + // and add to reregistration list if necessary. + let events: u32; + { + let handle = if let Some(handle) = + self.token_to_fd.lock().unwrap() + .get(&token) + .and_then(|h| h.upgrade()) { + handle + } else { + // This handle is apparently in the process of removal. + // It has been removed from the list, but port_cancel has not been called. + return Ok(false); + }; + + events = unsafe { + let mut events: u32 = mem::uninitialized(); + sys::fuchsia::sys::__fdio_wait_end(handle.fdio(), observed_signals, &mut events); + events + }; + + // If necessary, queue to be reregistered before next port_await + let needs_to_rereg = { + let registration_lock = handle.registration().lock().unwrap(); + + registration_lock + .as_ref() + .and_then(|r| r.rereg_signals()) + .is_some() + }; + + if needs_to_rereg { + let mut tokens_to_rereg_lock = self.tokens_to_rereg.lock().unwrap(); + tokens_to_rereg_lock.push(token); + // We use `Ordering::Release` to make sure that we see all `tokens_to_rereg` + // written before the store. + self.has_tokens_to_rereg.store(true, Ordering::Release); + } + } + + evts.events.push(Event::new(epoll_event_to_ready(events), token)); + Ok(false) + }, + } + } + + /// Register event interests for the given IO handle with the OS + pub fn register_fd(&self, + handle: &zircon::Handle, + fd: &EventedFd, + token: Token, + signals: zircon::Signals, + poll_opts: PollOpt) -> io::Result<()> + { + { + let mut token_to_fd = self.token_to_fd.lock().unwrap(); + match token_to_fd.entry(token) { + hash_map::Entry::Occupied(_) => + return Err(io::Error::new(io::ErrorKind::AlreadyExists, + "Attempted to register a filedescriptor on an existing token.")), + hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)), + }; + } + + let wait_async_opts = poll_opts_to_wait_async(poll_opts); + + let wait_res = handle.wait_async_handle(&self.port, token.0 as u64, signals, wait_async_opts); + + if wait_res.is_err() { + self.token_to_fd.lock().unwrap().remove(&token); + } + + Ok(wait_res?) + } + + /// Deregister event interests for the given IO handle with the OS + pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> { + self.token_to_fd.lock().unwrap().remove(&token); + + // We ignore NotFound errors since oneshots are automatically deregistered, + // but mio will attempt to deregister them manually. + self.port.cancel(&*handle, token.0 as u64) + .map_err(io::Error::from) + .or_else(|e| if e.kind() == io::ErrorKind::NotFound { + Ok(()) + } else { + Err(e) + }) + } + + pub fn register_handle(&self, + handle: zx_handle_t, + token: Token, + interests: Ready, + poll_opts: PollOpt) -> io::Result<()> + { + if poll_opts.is_level() && !poll_opts.is_oneshot() { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Repeated level-triggered events are not supported on Fuchsia handles.")); + } + + let temp_handle = unsafe { zircon::Handle::from_raw(handle) }; + + let res = temp_handle.wait_async_handle( + &self.port, + key_from_token_and_type(token, RegType::Handle)?, + FuchsiaReady::from(interests).into_zx_signals(), + poll_opts_to_wait_async(poll_opts)); + + mem::forget(temp_handle); + + Ok(res?) + } + + + pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> + { + let temp_handle = unsafe { zircon::Handle::from_raw(handle) }; + let res = self.port.cancel(&temp_handle, key_from_token_and_type(token, RegType::Handle)?); + + mem::forget(temp_handle); + + Ok(res?) + } +} + +pub struct Events { + events: Vec +} + +impl Events { + pub fn with_capacity(_u: usize) -> Events { + // The Fuchsia selector only handles one event at a time, + // so we ignore the default capacity and set it to one. + Events { events: Vec::with_capacity(1) } + } + pub fn len(&self) -> usize { + self.events.len() + } + pub fn capacity(&self) -> usize { + self.events.capacity() + } + pub fn is_empty(&self) -> bool { + self.events.is_empty() + } + pub fn get(&self, idx: usize) -> Option { + self.events.get(idx).map(|e| *e) + } + pub fn push_event(&mut self, event: Event) { + self.events.push(event) + } + pub fn clear(&mut self) { + self.events.events.drain(0..); + } +} + +impl fmt::Debug for Events { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Events") + .field("len", &self.len()) + .finish() + } +} diff --git a/third_party/rust/mio/src/sys/mod.rs b/third_party/rust/mio/src/sys/mod.rs index cff06315d639..8a1705db6c1d 100644 --- a/third_party/rust/mio/src/sys/mod.rs +++ b/third_party/rust/mio/src/sys/mod.rs @@ -1,4 +1,4 @@ -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub use self::unix::{ Awakener, EventedFd, @@ -12,11 +12,14 @@ pub use self::unix::{ set_nonblock, }; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] +pub use self::unix::READY_ALL; + +#[cfg(all(unix, not(target_os = "fuchsia")))] #[cfg(feature = "with-deprecated")] pub use self::unix::UnixSocket; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] pub mod unix; #[cfg(windows)] @@ -33,3 +36,21 @@ pub use self::windows::{ #[cfg(windows)] mod windows; + +#[cfg(target_os = "fuchsia")] +pub use self::fuchsia::{ + Awakener, + Events, + EventedHandle, + Selector, + TcpStream, + TcpListener, + UdpSocket, + set_nonblock, +}; + +#[cfg(target_os = "fuchsia")] +pub mod fuchsia; + +#[cfg(not(all(unix, not(target_os = "fuchsia"))))] +pub const READY_ALL: usize = 0; diff --git a/third_party/rust/mio/src/sys/unix/awakener.rs b/third_party/rust/mio/src/sys/unix/awakener.rs index cc3a046a508c..9cc367a78cdf 100644 --- a/third_party/rust/mio/src/sys/unix/awakener.rs +++ b/third_party/rust/mio/src/sys/unix/awakener.rs @@ -20,7 +20,7 @@ mod pipe { impl Awakener { pub fn new() -> io::Result { - let (rd, wr) = try!(unix::pipe()); + let (rd, wr) = unix::pipe()?; Ok(Awakener { reader: rd, diff --git a/third_party/rust/mio/src/sys/unix/epoll.rs b/third_party/rust/mio/src/sys/unix/epoll.rs index b9d463855092..50b2a14e4906 100644 --- a/third_party/rust/mio/src/sys/unix/epoll.rs +++ b/third_party/rust/mio/src/sys/unix/epoll.rs @@ -5,20 +5,10 @@ use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use std::time::Duration; use std::{cmp, i32}; -use libc::c_int; -use libc; -use libc::{EPOLLERR, EPOLLHUP}; +use libc::{self, c_int}; +use libc::{EPOLLERR, EPOLLHUP, EPOLLRDHUP, EPOLLONESHOT}; use libc::{EPOLLET, EPOLLOUT, EPOLLIN, EPOLLPRI}; -#[cfg(not(target_os = "android"))] -use libc::{EPOLLRDHUP, EPOLLONESHOT}; - -// libc doesn't define these constants on android, but they are supported. -#[cfg(target_os = "android")] -const EPOLLRDHUP: libc::c_int = 0x00002000; -#[cfg(target_os = "android")] -const EPOLLONESHOT: libc::c_int = 0x40000000; - use {io, Ready, PollOpt, Token}; use event_imp::Event; use sys::unix::{cvt, UnixReady}; @@ -47,10 +37,10 @@ impl Selector { match epoll_create1.get() { Some(epoll_create1_fn) => { - try!(cvt(epoll_create1_fn(libc::EPOLL_CLOEXEC))) + cvt(epoll_create1_fn(libc::EPOLL_CLOEXEC))? } None => { - let fd = try!(cvt(libc::epoll_create(1024))); + let fd = cvt(libc::epoll_create(1024))?; drop(set_cloexec(fd)); fd } @@ -77,12 +67,12 @@ impl Selector { .unwrap_or(-1); // Wait for epoll events for at most timeout_ms milliseconds + evts.clear(); unsafe { - evts.events.set_len(0); - let cnt = try!(cvt(libc::epoll_wait(self.epfd, - evts.events.as_mut_ptr(), - evts.events.capacity() as i32, - timeout_ms))); + let cnt = cvt(libc::epoll_wait(self.epfd, + evts.events.as_mut_ptr(), + evts.events.capacity() as i32, + timeout_ms))?; let cnt = cnt as usize; evts.events.set_len(cnt); @@ -105,7 +95,7 @@ impl Selector { }; unsafe { - try!(cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_ADD, fd, &mut info))); + cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_ADD, fd, &mut info))?; Ok(()) } } @@ -118,7 +108,7 @@ impl Selector { }; unsafe { - try!(cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_MOD, fd, &mut info))); + cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_MOD, fd, &mut info))?; Ok(()) } } @@ -134,32 +124,17 @@ impl Selector { }; unsafe { - try!(cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_DEL, fd, &mut info))); + cvt(libc::epoll_ctl(self.epfd, libc::EPOLL_CTL_DEL, fd, &mut info))?; Ok(()) } } } -#[cfg(feature = "with-deprecated")] -#[allow(deprecated)] -fn is_urgent(opts: PollOpt) -> bool { - opts.is_urgent() -} - -#[cfg(not(feature = "with-deprecated"))] -fn is_urgent(_: PollOpt) -> bool { - false -} - fn ioevent_to_epoll(interest: Ready, opts: PollOpt) -> u32 { let mut kind = 0; if interest.is_readable() { - if is_urgent(opts) { - kind |= EPOLLPRI; - } else { - kind |= EPOLLIN; - } + kind |= EPOLLIN; } if interest.is_writable() { @@ -260,6 +235,10 @@ impl Events { u64: usize::from(event.token()) as u64 }); } + + pub fn clear(&mut self) { + unsafe { self.events.set_len(0); } + } } const NANOS_PER_MILLI: u32 = 1_000_000; diff --git a/third_party/rust/mio/src/sys/unix/eventedfd.rs b/third_party/rust/mio/src/sys/unix/eventedfd.rs index 7ee2918e0a08..6331062cb100 100644 --- a/third_party/rust/mio/src/sys/unix/eventedfd.rs +++ b/third_party/rust/mio/src/sys/unix/eventedfd.rs @@ -10,7 +10,7 @@ use std::os::unix::io::RawFd; #[derive(Debug)] -/// Adapter for [`RawFd`] providing an [`Evented`] implementation. +/// Adapter for `RawFd` providing an [`Evented`] implementation. /// /// `EventedFd` enables registering any type with an FD with [`Poll`]. /// @@ -29,6 +29,8 @@ use std::os::unix::io::RawFd; /// Basic usage /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Ready, Poll, PollOpt, Token}; /// use mio::unix::EventedFd; /// @@ -36,13 +38,19 @@ use std::os::unix::io::RawFd; /// use std::net::TcpListener; /// /// // Bind a std listener -/// let listener = TcpListener::bind("127.0.0.1:0").unwrap(); +/// let listener = TcpListener::bind("127.0.0.1:0")?; /// -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// // Register the listener /// poll.register(&EventedFd(&listener.as_raw_fd()), -/// Token(0), Ready::readable(), PollOpt::edge()).unwrap(); +/// Token(0), Ready::readable(), PollOpt::edge())?; +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// Implementing `Evented` for a custom type backed by a `RawFd`. @@ -78,9 +86,9 @@ use std::os::unix::io::RawFd; /// } /// ``` /// -/// [`RawFd`]: # -/// [`Evented`]: # -/// [`Poll`]: # +/// [`Evented`]: ../event/trait.Evented.html +/// [`Poll`]: ../struct.Poll.html +/// [`Poll::register`]: ../struct.Poll.html#method.register pub struct EventedFd<'a>(pub &'a RawFd); impl<'a> Evented for EventedFd<'a> { diff --git a/third_party/rust/mio/src/sys/unix/io.rs b/third_party/rust/mio/src/sys/unix/io.rs index 3f4cb3374e52..47a3a70d1fcd 100644 --- a/third_party/rust/mio/src/sys/unix/io.rs +++ b/third_party/rust/mio/src/sys/unix/io.rs @@ -38,7 +38,7 @@ pub struct Io { impl Io { /// Try to clone the FD pub fn try_clone(&self) -> io::Result { - Ok(Io { fd: try!(self.fd.try_clone()) }) + Ok(Io { fd: self.fd.try_clone()? }) } } diff --git a/third_party/rust/mio/src/sys/unix/kqueue.rs b/third_party/rust/mio/src/sys/unix/kqueue.rs index 3aba51b8e083..89a5da1120a6 100644 --- a/third_party/rust/mio/src/sys/unix/kqueue.rs +++ b/third_party/rust/mio/src/sys/unix/kqueue.rs @@ -1,5 +1,6 @@ use std::{cmp, fmt, ptr}; -use std::os::raw::c_int; +#[cfg(not(target_os = "netbsd"))] +use std::os::raw::{c_int, c_short}; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::collections::HashMap; @@ -20,15 +21,29 @@ use sys::unix::io::set_cloexec; /// operation will return with an error. This matches windows behavior. static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT; +#[cfg(not(target_os = "netbsd"))] +type Filter = c_short; +#[cfg(not(target_os = "netbsd"))] +type UData = *mut ::libc::c_void; +#[cfg(not(target_os = "netbsd"))] +type Count = c_int; + +#[cfg(target_os = "netbsd")] +type Filter = u32; +#[cfg(target_os = "netbsd")] +type UData = ::libc::intptr_t; +#[cfg(target_os = "netbsd")] +type Count = usize; + macro_rules! kevent { ($id: expr, $filter: expr, $flags: expr, $data: expr) => { libc::kevent { ident: $id as ::libc::uintptr_t, - filter: $filter, + filter: $filter as Filter, flags: $flags, fflags: 0, data: 0, - udata: $data as *mut _, + udata: $data as UData, } } } @@ -42,7 +57,7 @@ impl Selector { pub fn new() -> io::Result { // offset by 1 to avoid choosing 0 as the id of a selector let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1; - let kq = unsafe { try!(cvt(libc::kqueue())) }; + let kq = unsafe { cvt(libc::kqueue())? }; drop(set_cloexec(kq)); Ok(Selector { @@ -64,14 +79,14 @@ impl Selector { }); let timeout = timeout.as_ref().map(|s| s as *const _).unwrap_or(ptr::null_mut()); + evts.clear(); unsafe { - let cnt = try!(cvt(libc::kevent(self.kq, + let cnt = cvt(libc::kevent(self.kq, ptr::null(), 0, evts.sys_events.0.as_mut_ptr(), - // FIXME: needs a saturating cast here. - evts.sys_events.0.capacity() as c_int, - timeout))); + evts.sys_events.0.capacity() as Count, + timeout))?; evts.sys_events.0.set_len(cnt as usize); Ok(evts.coalesce(awakener)) } @@ -91,9 +106,14 @@ impl Selector { kevent!(fd, libc::EVFILT_READ, flags | r, usize::from(token)), kevent!(fd, libc::EVFILT_WRITE, flags | w, usize::from(token)), ]; - try!(cvt(libc::kevent(self.kq, changes.as_ptr(), changes.len() as c_int, - changes.as_mut_ptr(), changes.len() as c_int, - ::std::ptr::null()))); + + cvt(libc::kevent(self.kq, + changes.as_ptr(), + changes.len() as Count, + changes.as_mut_ptr(), + changes.len() as Count, + ::std::ptr::null()))?; + for change in changes.iter() { debug_assert_eq!(change.flags & libc::EV_ERROR, libc::EV_ERROR); @@ -116,12 +136,12 @@ impl Selector { // // More info can be found at carllerche/mio#582 if change.data as i32 == libc::EPIPE && - change.filter == libc::EVFILT_WRITE { + change.filter == libc::EVFILT_WRITE as Filter { continue } // ignore ENOENT error for EV_DELETE - let orig_flags = if change.filter == libc::EVFILT_READ { r } else { w }; + let orig_flags = if change.filter == libc::EVFILT_READ as Filter { r } else { w }; if change.data as i32 == libc::ENOENT && orig_flags & libc::EV_DELETE != 0 { continue } @@ -143,13 +163,25 @@ impl Selector { // EV_RECEIPT is a nice way to apply changes and get back per-event results while not // draining the actual changes. let filter = libc::EV_DELETE | libc::EV_RECEIPT; +#[cfg(not(target_os = "netbsd"))] let mut changes = [ kevent!(fd, libc::EVFILT_READ, filter, ptr::null_mut()), kevent!(fd, libc::EVFILT_WRITE, filter, ptr::null_mut()), ]; - try!(cvt(libc::kevent(self.kq, changes.as_ptr(), changes.len() as c_int, - changes.as_mut_ptr(), changes.len() as c_int, - ::std::ptr::null())).map(|_| ())); + +#[cfg(target_os = "netbsd")] + let mut changes = [ + kevent!(fd, libc::EVFILT_READ, filter, 0), + kevent!(fd, libc::EVFILT_WRITE, filter, 0), + ]; + + cvt(libc::kevent(self.kq, + changes.as_ptr(), + changes.len() as Count, + changes.as_mut_ptr(), + changes.len() as Count, + ::std::ptr::null())).map(|_| ())?; + if changes[0].data as i32 == libc::ENOENT && changes[1].data as i32 == libc::ENOENT { return Err(::std::io::Error::from_raw_os_error(changes[0].data as i32)); } @@ -255,9 +287,9 @@ impl Events { event::kind_mut(&mut self.events[idx]).insert(*UnixReady::error()); } - if e.filter == libc::EVFILT_READ { + if e.filter == libc::EVFILT_READ as Filter { event::kind_mut(&mut self.events[idx]).insert(Ready::readable()); - } else if e.filter == libc::EVFILT_WRITE { + } else if e.filter == libc::EVFILT_WRITE as Filter { event::kind_mut(&mut self.events[idx]).insert(Ready::writable()); } #[cfg(any(target_os = "dragonfly", @@ -267,6 +299,12 @@ impl Events { event::kind_mut(&mut self.events[idx]).insert(UnixReady::aio()); } } +#[cfg(any(target_os = "freebsd"))] + { + if e.filter == libc::EVFILT_LIO { + event::kind_mut(&mut self.events[idx]).insert(UnixReady::lio()); + } + } if e.flags & libc::EV_EOF != 0 { event::kind_mut(&mut self.events[idx]).insert(UnixReady::hup()); @@ -285,33 +323,35 @@ impl Events { pub fn push_event(&mut self, event: Event) { self.events.push(event); } + + pub fn clear(&mut self) { + self.sys_events.0.truncate(0); + self.events.truncate(0); + self.event_map.clear(); + } } impl fmt::Debug for Events { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Events {{ len: {} }}", self.sys_events.0.len()) + fmt.debug_struct("Events") + .field("len", &self.sys_events.0.len()) + .finish() } } #[test] fn does_not_register_rw() { - #![allow(deprecated)] + use {Poll, Ready, PollOpt, Token}; + use unix::EventedFd; - use ::deprecated::{EventLoopBuilder, Handler}; - use ::unix::EventedFd; - struct Nop; - impl Handler for Nop { - type Timeout = (); - type Message = (); - } + let kq = unsafe { libc::kqueue() }; + let kqf = EventedFd(&kq); + let poll = Poll::new().unwrap(); // registering kqueue fd will fail if write is requested (On anything but some versions of OS // X) - let kq = unsafe { libc::kqueue() }; - let kqf = EventedFd(&kq); - let mut evtloop = EventLoopBuilder::new().build::().expect("evt loop builds"); - evtloop.register(&kqf, Token(1234), Ready::readable(), - PollOpt::edge() | PollOpt::oneshot()).unwrap(); + poll.register(&kqf, Token(1234), Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); } #[cfg(any(target_os = "dragonfly", diff --git a/third_party/rust/mio/src/sys/unix/mod.rs b/third_party/rust/mio/src/sys/unix/mod.rs index ee6f5cecfaf1..39e60455483a 100644 --- a/third_party/rust/mio/src/sys/unix/mod.rs +++ b/third_party/rust/mio/src/sys/unix/mod.rs @@ -3,10 +3,10 @@ use libc::{self, c_int}; #[macro_use] pub mod dlsym; -#[cfg(any(target_os = "linux", target_os = "android", target_os = "fuchsia"))] +#[cfg(any(target_os = "linux", target_os = "android", target_os = "solaris"))] mod epoll; -#[cfg(any(target_os = "linux", target_os = "android", target_os = "fuchsia"))] +#[cfg(any(target_os = "linux", target_os = "android", target_os = "solaris"))] pub use self::epoll::{Events, Selector}; #[cfg(any(target_os = "bitrig", target_os = "dragonfly", @@ -32,7 +32,7 @@ mod uds; pub use self::awakener::Awakener; pub use self::eventedfd::EventedFd; pub use self::io::{Io, set_nonblock}; -pub use self::ready::UnixReady; +pub use self::ready::{UnixReady, READY_ALL}; pub use self::tcp::{TcpStream, TcpListener}; pub use self::udp::UdpSocket; @@ -53,10 +53,10 @@ pub fn pipe() -> ::io::Result<(Io, Io)> { unsafe { match pipe2.get() { Some(pipe2_fn) => { - try!(cvt(pipe2_fn(pipes.as_mut_ptr(), flags))); + cvt(pipe2_fn(pipes.as_mut_ptr(), flags))?; } None => { - try!(cvt(libc::pipe(pipes.as_mut_ptr()))); + cvt(libc::pipe(pipes.as_mut_ptr()))?; libc::fcntl(pipes[0], libc::F_SETFL, flags); libc::fcntl(pipes[1], libc::F_SETFL, flags); } diff --git a/third_party/rust/mio/src/sys/unix/ready.rs b/third_party/rust/mio/src/sys/unix/ready.rs index 7c62492adb31..398fc17ccf21 100644 --- a/third_party/rust/mio/src/sys/unix/ready.rs +++ b/third_party/rust/mio/src/sys/unix/ready.rs @@ -1,6 +1,7 @@ -use event_imp::{Ready, ready_from_usize}; +use event_imp::{Ready, ready_as_usize, ready_from_usize}; use std::ops; +use std::fmt; /// Unix specific extensions to `Ready` /// @@ -63,30 +64,53 @@ use std::ops; /// Registering readable and error interest on a socket /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Ready, Poll, PollOpt, Token}; -/// use mio::tcp::TcpStream; +/// use mio::net::TcpStream; /// use mio::unix::UnixReady; /// -/// let addr = "216.58.193.68:80".parse().unwrap(); -/// let socket = TcpStream::connect(&addr).unwrap(); +/// let addr = "216.58.193.68:80".parse()?; +/// let socket = TcpStream::connect(&addr)?; /// -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// poll.register(&socket, /// Token(0), /// Ready::readable() | UnixReady::error(), -/// PollOpt::edge()).unwrap(); -/// +/// PollOpt::edge())?; +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// -/// [`Poll`]: struct.Poll.html +/// [`Poll`]: ../struct.Poll.html /// [readiness]: struct.Poll.html#readiness-operations -#[derive(Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord)] +#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)] pub struct UnixReady(Ready); -const ERROR: usize = 0b00100; -const HUP: usize = 0b01000; -const AIO: usize = 0b10000; +const ERROR: usize = 0b000100; +const HUP: usize = 0b001000; + +#[cfg(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos"))] +const AIO: usize = 0b010000; + +#[cfg(not(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos")))] +const AIO: usize = 0b000000; + +#[cfg(any(target_os = "freebsd"))] +const LIO: usize = 0b100000; + +#[cfg(not(any(target_os = "freebsd")))] +const LIO: usize = 0b000000; + +// Export to support `Ready::all` +pub const READY_ALL: usize = ERROR | HUP | AIO | LIO; impl UnixReady { /// Returns a `Ready` representing AIO completion readiness @@ -103,12 +127,22 @@ impl UnixReady { /// assert!(ready.is_aio()); /// ``` /// - /// [`Poll`]: struct.Poll.html + /// [`Poll`]: ../struct.Poll.html #[inline] + #[cfg(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos"))] pub fn aio() -> UnixReady { UnixReady(ready_from_usize(AIO)) } + #[cfg(not(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos")))] + #[deprecated(since = "0.6.12", note = "this function is now platform specific")] + #[doc(hidden)] + pub fn aio() -> UnixReady { + UnixReady(Ready::empty()) + } + /// Returns a `Ready` representing error readiness. /// /// **Note that only readable and writable readiness is guaranteed to be @@ -121,15 +155,15 @@ impl UnixReady { /// # Examples /// /// ``` - /// use mio::Ready; + /// use mio::unix::UnixReady; /// - /// let ready = Ready::error(); + /// let ready = UnixReady::error(); /// /// assert!(ready.is_error()); /// ``` /// - /// [`Poll`]: struct.Poll.html - /// [readiness]: struct.Poll.html#readiness-operations + /// [`Poll`]: ../struct.Poll.html + /// [readiness]: ../struct.Poll.html#readiness-operations #[inline] pub fn error() -> UnixReady { UnixReady(ready_from_usize(ERROR)) @@ -150,20 +184,41 @@ impl UnixReady { /// # Examples /// /// ``` - /// use mio::Ready; + /// use mio::unix::UnixReady; /// - /// let ready = Ready::hup(); + /// let ready = UnixReady::hup(); /// /// assert!(ready.is_hup()); /// ``` /// - /// [`Poll`]: struct.Poll.html - /// [readiness]: struct.Poll.html#readiness-operations + /// [`Poll`]: ../struct.Poll.html + /// [readiness]: ../struct.Poll.html#readiness-operations #[inline] pub fn hup() -> UnixReady { UnixReady(ready_from_usize(HUP)) } + /// Returns a `Ready` representing LIO completion readiness + /// + /// See [`Poll`] for more documentation on polling. + /// + /// # Examples + /// + /// ``` + /// use mio::unix::UnixReady; + /// + /// let ready = UnixReady::lio(); + /// + /// assert!(ready.is_lio()); + /// ``` + /// + /// [`Poll`]: struct.Poll.html + #[inline] + #[cfg(any(target_os = "freebsd"))] + pub fn lio() -> UnixReady { + UnixReady(ready_from_usize(LIO)) + } + /// Returns true if `Ready` contains AIO readiness /// /// See [`Poll`] for more documentation on polling. @@ -177,11 +232,24 @@ impl UnixReady { /// /// assert!(ready.is_aio()); /// ``` + /// + /// [`Poll`]: ../struct.Poll.html #[inline] + #[cfg(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos"))] pub fn is_aio(&self) -> bool { self.contains(ready_from_usize(AIO)) } + #[deprecated(since = "0.6.12", note = "this function is now platform specific")] + #[cfg(feature = "with-deprecated")] + #[cfg(not(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos")))] + #[doc(hidden)] + pub fn is_aio(&self) -> bool { + false + } + /// Returns true if the value includes error readiness /// /// **Note that only readable and writable readiness is guaranteed to be @@ -194,14 +262,15 @@ impl UnixReady { /// # Examples /// /// ``` - /// use mio::Ready; + /// use mio::unix::UnixReady; /// - /// let ready = Ready::error(); + /// let ready = UnixReady::error(); /// /// assert!(ready.is_error()); /// ``` /// - /// [`Poll`]: struct.Poll.html + /// [`Poll`]: ../struct.Poll.html + /// [readiness]: ../struct.Poll.html#readiness-operations #[inline] pub fn is_error(&self) -> bool { self.contains(ready_from_usize(ERROR)) @@ -222,18 +291,38 @@ impl UnixReady { /// # Examples /// /// ``` - /// use mio::Ready; + /// use mio::unix::UnixReady; /// - /// let ready = Ready::hup(); + /// let ready = UnixReady::hup(); /// /// assert!(ready.is_hup()); /// ``` /// - /// [`Poll`]: struct.Poll.html + /// [`Poll`]: ../struct.Poll.html + /// [readiness]: ../struct.Poll.html#readiness-operations #[inline] pub fn is_hup(&self) -> bool { self.contains(ready_from_usize(HUP)) } + + /// Returns true if `Ready` contains LIO readiness + /// + /// See [`Poll`] for more documentation on polling. + /// + /// # Examples + /// + /// ``` + /// use mio::unix::UnixReady; + /// + /// let ready = UnixReady::lio(); + /// + /// assert!(ready.is_lio()); + /// ``` + #[inline] + #[cfg(any(target_os = "freebsd"))] + pub fn is_lio(&self) -> bool { + self.contains(ready_from_usize(LIO)) + } } impl From for UnixReady { @@ -294,10 +383,13 @@ impl ops::Sub for UnixReady { #[inline] fn sub(self, other: UnixReady) -> UnixReady { - (self.0 & !other.0).into() + ready_from_usize(ready_as_usize(self.0) & !ready_as_usize(other.0)).into() } } +#[deprecated(since = "0.6.10", note = "removed")] +#[cfg(feature = "with-deprecated")] +#[doc(hidden)] impl ops::Not for UnixReady { type Output = UnixReady; @@ -306,3 +398,31 @@ impl ops::Not for UnixReady { (!self.0).into() } } + +impl fmt::Debug for UnixReady { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut one = false; + let flags = [ + (UnixReady(Ready::readable()), "Readable"), + (UnixReady(Ready::writable()), "Writable"), + (UnixReady::error(), "Error"), + (UnixReady::hup(), "Hup"), + #[allow(deprecated)] + (UnixReady::aio(), "Aio")]; + + for &(flag, msg) in &flags { + if self.contains(flag) { + if one { write!(fmt, " | ")? } + write!(fmt, "{}", msg)?; + + one = true + } + } + + if !one { + fmt.write_str("(empty)")?; + } + + Ok(()) + } +} diff --git a/third_party/rust/mio/src/sys/unix/tcp.rs b/third_party/rust/mio/src/sys/unix/tcp.rs index 365214fe8c59..a5f6dba11d69 100644 --- a/third_party/rust/mio/src/sys/unix/tcp.rs +++ b/third_party/rust/mio/src/sys/unix/tcp.rs @@ -1,4 +1,5 @@ use std::cmp; +use std::fmt; use std::io::{Read, Write}; use std::net::{self, SocketAddr}; use std::os::unix::io::{RawFd, FromRawFd, IntoRawFd, AsRawFd}; @@ -15,19 +16,17 @@ use event::Evented; use sys::unix::eventedfd::EventedFd; use sys::unix::io::set_nonblock; -#[derive(Debug)] pub struct TcpStream { inner: net::TcpStream, } -#[derive(Debug)] pub struct TcpListener { inner: net::TcpListener, } impl TcpStream { pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result { - try!(set_nonblock(stream.as_raw_fd())); + set_nonblock(stream.as_raw_fd())?; match stream.connect(addr) { Ok(..) => {} @@ -126,6 +125,10 @@ impl TcpStream { self.inner.take_error() } + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + self.inner.peek(buf) + } + pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result { unsafe { let slice = iovec::as_os_slice_mut(bufs); @@ -189,6 +192,12 @@ impl Evented for TcpStream { } } +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + impl FromRawFd for TcpStream { unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { TcpStream { @@ -210,8 +219,8 @@ impl AsRawFd for TcpStream { } impl TcpListener { - pub fn new(inner: net::TcpListener, _addr: &SocketAddr) -> io::Result { - try!(set_nonblock(inner.as_raw_fd())); + pub fn new(inner: net::TcpListener) -> io::Result { + set_nonblock(inner.as_raw_fd())?; Ok(TcpListener { inner: inner, }) @@ -229,13 +238,8 @@ impl TcpListener { }) } - pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - self.inner.accept().and_then(|(s, a)| { - try!(set_nonblock(s.as_raw_fd())); - Ok((TcpStream { - inner: s, - }, a)) - }) + pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> { + self.inner.accept() } #[allow(deprecated)] @@ -277,6 +281,12 @@ impl Evented for TcpListener { } } +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + impl FromRawFd for TcpListener { unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { TcpListener { diff --git a/third_party/rust/mio/src/sys/unix/udp.rs b/third_party/rust/mio/src/sys/unix/udp.rs index 0af2773adc7b..fee1528cd067 100644 --- a/third_party/rust/mio/src/sys/unix/udp.rs +++ b/third_party/rust/mio/src/sys/unix/udp.rs @@ -1,20 +1,20 @@ use {io, Ready, Poll, PollOpt, Token}; use event::Evented; use unix::EventedFd; +use std::fmt; use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::os::unix::io::{RawFd, IntoRawFd, AsRawFd, FromRawFd}; #[allow(unused_imports)] // only here for Rust 1.8 use net2::UdpSocketExt; -#[derive(Debug)] pub struct UdpSocket { io: net::UdpSocket, } impl UdpSocket { pub fn new(socket: net::UdpSocket) -> io::Result { - try!(socket.set_nonblocking(true)); + socket.set_nonblocking(true)?; Ok(UdpSocket { io: socket, }) @@ -117,6 +117,14 @@ impl UdpSocket { self.io.leave_multicast_v6(multiaddr, interface) } + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.set_only_v6(only_v6) + } + + pub fn only_v6(&self) -> io::Result { + self.io.only_v6() + } + pub fn take_error(&self) -> io::Result> { self.io.take_error() } @@ -136,6 +144,12 @@ impl Evented for UdpSocket { } } +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.io, f) + } +} + impl FromRawFd for UdpSocket { unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket { UdpSocket { diff --git a/third_party/rust/mio/src/sys/unix/uds.rs b/third_party/rust/mio/src/sys/unix/uds.rs index 32ffd7313f39..e2888b23a41b 100644 --- a/third_party/rust/mio/src/sys/unix/uds.rs +++ b/third_party/rust/mio/src/sys/unix/uds.rs @@ -6,7 +6,8 @@ use std::path::Path; use libc; -use {io, Evented, Ready, Poll, PollOpt, Token}; +use {io, Ready, Poll, PollOpt, Token}; +use event::Evented; use sys::unix::{cvt, Io}; use sys::unix::io::{set_nonblock, set_cloexec}; @@ -82,10 +83,10 @@ impl UnixSocket { } } - let fd = try!(cvt(libc::socket(libc::AF_UNIX, libc::SOCK_STREAM, 0))); + let fd = cvt(libc::socket(libc::AF_UNIX, libc::SOCK_STREAM, 0))?; let fd = UnixSocket::from_raw_fd(fd); - try!(set_cloexec(fd.as_raw_fd())); - try!(set_nonblock(fd.as_raw_fd())); + set_cloexec(fd.as_raw_fd())?; + set_nonblock(fd.as_raw_fd())?; Ok(fd) } } @@ -93,10 +94,10 @@ impl UnixSocket { /// Connect the socket to the specified address pub fn connect + ?Sized>(&self, addr: &P) -> io::Result<()> { unsafe { - let (addr, len) = try!(sockaddr_un(addr.as_ref())); - try!(cvt(libc::connect(self.as_raw_fd(), + let (addr, len) = sockaddr_un(addr.as_ref())?; + cvt(libc::connect(self.as_raw_fd(), &addr as *const _ as *const _, - len))); + len))?; Ok(()) } } @@ -104,49 +105,36 @@ impl UnixSocket { /// Listen for incoming requests pub fn listen(&self, backlog: usize) -> io::Result<()> { unsafe { - try!(cvt(libc::listen(self.as_raw_fd(), backlog as i32))); + cvt(libc::listen(self.as_raw_fd(), backlog as i32))?; Ok(()) } } pub fn accept(&self) -> io::Result { unsafe { - let fd = try!(cvt(libc::accept(self.as_raw_fd(), + let fd = cvt(libc::accept(self.as_raw_fd(), 0 as *mut _, - 0 as *mut _))); + 0 as *mut _))?; let fd = Io::from_raw_fd(fd); - try!(set_cloexec(fd.as_raw_fd())); - try!(set_nonblock(fd.as_raw_fd())); + set_cloexec(fd.as_raw_fd())?; + set_nonblock(fd.as_raw_fd())?; Ok(UnixSocket { io: fd }) } } /// Bind the socket to the specified address - #[cfg(not(all(target_arch = "aarch64",target_os = "android")))] pub fn bind + ?Sized>(&self, addr: &P) -> io::Result<()> { unsafe { - let (addr, len) = try!(sockaddr_un(addr.as_ref())); - try!(cvt(libc::bind(self.as_raw_fd(), + let (addr, len) = sockaddr_un(addr.as_ref())?; + cvt(libc::bind(self.as_raw_fd(), &addr as *const _ as *const _, - len))); - Ok(()) - } - } - - #[cfg(all(target_arch = "aarch64",target_os = "android"))] - pub fn bind + ?Sized>(&self, addr: &P) -> io::Result<()> { - unsafe { - let (addr, len) = try!(sockaddr_un(addr.as_ref())); - let len_i32 = len as i32; - try!(cvt(libc::bind(self.as_raw_fd(), - &addr as *const _ as *const _, - len_i32))); + len))?; Ok(()) } } pub fn try_clone(&self) -> io::Result { - Ok(UnixSocket { io: try!(self.io.try_clone()) }) + Ok(UnixSocket { io: self.io.try_clone()? }) } pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { @@ -156,7 +144,7 @@ impl UnixSocket { Shutdown::Both => libc::SHUT_RDWR, }; unsafe { - try!(cvt(libc::shutdown(self.as_raw_fd(), how))); + cvt(libc::shutdown(self.as_raw_fd(), how))?; Ok(()) } } @@ -177,7 +165,7 @@ impl UnixSocket { msg.msg_iovlen = 1; msg.msg_control = &mut cmsg as *mut _ as *mut _; msg.msg_controllen = mem::size_of_val(&cmsg).my_into(); - let bytes = try!(cvt(libc::recvmsg(self.as_raw_fd(), &mut msg, 0))); + let bytes = cvt(libc::recvmsg(self.as_raw_fd(), &mut msg, 0))?; const SCM_RIGHTS: libc::c_int = 1; @@ -211,7 +199,7 @@ impl UnixSocket { msg.msg_iovlen = 1; msg.msg_control = &mut cmsg as *mut _ as *mut _; msg.msg_controllen = mem::size_of_val(&cmsg).my_into(); - let bytes = try!(cvt(libc::sendmsg(self.as_raw_fd(), &msg, 0))); + let bytes = cvt(libc::sendmsg(self.as_raw_fd(), &msg, 0))?; Ok(bytes as usize) } } diff --git a/third_party/rust/mio/src/sys/windows/awakener.rs b/third_party/rust/mio/src/sys/windows/awakener.rs index dfbee20aee29..c913bc93f8a6 100644 --- a/third_party/rust/mio/src/sys/windows/awakener.rs +++ b/third_party/rust/mio/src/sys/windows/awakener.rs @@ -32,7 +32,7 @@ impl Awakener { let status = CompletionStatus::new(0, usize::from(inner.token), 0 as *mut _); - try!(inner.selector.port().post(status)); + inner.selector.port().post(status)?; } Ok(()) } diff --git a/third_party/rust/mio/src/sys/windows/from_raw_arc.rs b/third_party/rust/mio/src/sys/windows/from_raw_arc.rs index f7b301b3ef4e..b6d38b2408a0 100644 --- a/third_party/rust/mio/src/sys/windows/from_raw_arc.rs +++ b/third_party/rust/mio/src/sys/windows/from_raw_arc.rs @@ -108,7 +108,7 @@ mod tests { let mut a = false; { let a = FromRawArc::new(A(&mut a)); - a.clone(); + let _ = a.clone(); assert!(!*a.0); } assert!(a); diff --git a/third_party/rust/mio/src/sys/windows/mod.rs b/third_party/rust/mio/src/sys/windows/mod.rs index 29867458de04..dad70b0c1531 100644 --- a/third_party/rust/mio/src/sys/windows/mod.rs +++ b/third_party/rust/mio/src/sys/windows/mod.rs @@ -160,10 +160,6 @@ enum Family { V4, V6, } -fn wouldblock() -> io::Error { - io::Error::new(io::ErrorKind::WouldBlock, "operation would block") -} - unsafe fn cancel(socket: &AsRawSocket, overlapped: &Overlapped) -> io::Result<()> { let handle = socket.as_raw_socket() as winapi::HANDLE; diff --git a/third_party/rust/mio/src/sys/windows/selector.rs b/third_party/rust/mio/src/sys/windows/selector.rs index d05722976281..21e894b8eb75 100644 --- a/third_party/rust/mio/src/sys/windows/selector.rs +++ b/third_party/rust/mio/src/sys/windows/selector.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] -use std::{fmt, io, u32}; +use std::{fmt, io}; use std::cell::UnsafeCell; use std::os::windows::prelude::*; use std::sync::{Arc, Mutex}; @@ -73,7 +73,7 @@ impl Selector { trace!("select; timeout={:?}", timeout); // Clear out the previous list of I/O events and get some more! - events.events.truncate(0); + events.clear(); trace!("polling IOCP"); let n = match self.inner.port.get_many(&mut events.statuses, timeout) { @@ -85,7 +85,7 @@ impl Selector { let mut ret = false; for status in events.statuses[..n].iter() { // This should only ever happen from the awakener, and we should - // only ever have one awakener right not, so assert as such. + // only ever have one awakener right now, so assert as such. if status.overlapped() as usize == 0 { assert_eq!(status.token(), usize::from(awakener)); ret = true; @@ -184,7 +184,7 @@ impl Binding { // Ignore errors, we'll see them on the next line. drop(self.selector.fill(selector.inner.clone())); - try!(self.check_same_selector(poll)); + self.check_same_selector(poll)?; selector.inner.port.add_handle(usize::from(token), handle) } @@ -196,7 +196,7 @@ impl Binding { poll: &Poll) -> io::Result<()> { let selector = poll::selector(poll); drop(self.selector.fill(selector.inner.clone())); - try!(self.check_same_selector(poll)); + self.check_same_selector(poll)?; selector.inner.port.add_socket(usize::from(token), handle) } @@ -270,7 +270,8 @@ impl Binding { impl fmt::Debug for Binding { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Binding") + f.debug_struct("Binding") + .finish() } } @@ -357,7 +358,7 @@ impl ReadyBinding { -> io::Result<()> { trace!("register {:?} {:?}", token, events); unsafe { - try!(self.binding.register_socket(socket, token, poll)); + self.binding.register_socket(socket, token, poll)?; } let (r, s) = poll::new_registration(poll, token, events, opts); @@ -377,7 +378,7 @@ impl ReadyBinding { -> io::Result<()> { trace!("reregister {:?} {:?}", token, events); unsafe { - try!(self.binding.reregister_socket(socket, token, poll)); + self.binding.reregister_socket(socket, token, poll)?; } registration.lock().unwrap() @@ -396,7 +397,7 @@ impl ReadyBinding { -> io::Result<()> { trace!("deregistering"); unsafe { - try!(self.binding.deregister_socket(socket, poll)); + self.binding.deregister_socket(socket, poll)?; } registration.lock().unwrap() @@ -452,6 +453,10 @@ impl Events { pub fn push_event(&mut self, event: Event) { self.events.push(event); } + + pub fn clear(&mut self) { + self.events.truncate(0); + } } macro_rules! overlapped2arc { @@ -517,7 +522,8 @@ impl Overlapped { impl fmt::Debug for Overlapped { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Overlapped") + f.debug_struct("Overlapped") + .finish() } } diff --git a/third_party/rust/mio/src/sys/windows/tcp.rs b/third_party/rust/mio/src/sys/windows/tcp.rs index 8f490d1a138f..40843f6c51bd 100644 --- a/third_party/rust/mio/src/sys/windows/tcp.rs +++ b/third_party/rust/mio/src/sys/windows/tcp.rs @@ -16,7 +16,7 @@ use {poll, Ready, Poll, PollOpt, Token}; use event::Evented; use sys::windows::from_raw_arc::FromRawArc; use sys::windows::selector::{Overlapped, ReadyBinding}; -use sys::windows::{wouldblock, Family}; +use sys::windows::Family; pub struct TcpStream { /// Separately stored implementation to ensure that the `Drop` @@ -115,7 +115,7 @@ impl TcpStream { pub fn connect(socket: net::TcpStream, addr: &SocketAddr) -> io::Result { - try!(socket.set_nonblocking(true)); + socket.set_nonblocking(true)?; Ok(TcpStream::new(socket, Some(*addr))) } @@ -196,7 +196,7 @@ impl TcpStream { } pub fn take_error(&self) -> io::Result> { - if let Some(e) = try!(self.imp.inner.socket.take_error()) { + if let Some(e) = self.imp.inner.socket.take_error()? { return Ok(Some(e)) } @@ -230,33 +230,14 @@ impl TcpStream { self.imp.inner() } - fn post_register(&self, interest: Ready, me: &mut StreamInner) { - if interest.is_readable() { - self.imp.schedule_read(me); - } - - // At least with epoll, if a socket is registered with an interest in - // writing and it's immediately writable then a writable event is - // generated immediately, so do so here. - if interest.is_writable() { - if let State::Empty = me.write { - self.imp.add_readiness(me, Ready::writable()); - } - } - } - - pub fn read(&self, buf: &mut [u8]) -> io::Result { - self.readv(&mut [buf.into()]) - } - - pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result { + fn before_read(&self) -> io::Result> { let mut me = self.inner(); match me.read { // Empty == we're not associated yet, and if we're pending then // these are both cases where we return "would block" State::Empty | - State::Pending(()) => return Err(wouldblock()), + State::Pending(()) => return Err(io::ErrorKind::WouldBlock.into()), // If we got a delayed error as part of a `read_overlapped` below, // return that here. Also schedule another read in case it was @@ -277,6 +258,47 @@ impl TcpStream { State::Ready(()) => {} } + Ok(me) + } + + fn post_register(&self, interest: Ready, me: &mut StreamInner) { + if interest.is_readable() { + self.imp.schedule_read(me); + } + + // At least with epoll, if a socket is registered with an interest in + // writing and it's immediately writable then a writable event is + // generated immediately, so do so here. + if interest.is_writable() { + if let State::Empty = me.write { + self.imp.add_readiness(me, Ready::writable()); + } + } + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result { + match IoVec::from_bytes_mut(buf) { + Some(vec) => self.readv(&mut [vec]), + None => Ok(0), + } + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + let mut me = self.before_read()?; + + match (&self.imp.inner.socket).peek(buf) { + Ok(n) => Ok(n), + Err(e) => { + me.read = State::Empty; + self.imp.schedule_read(&mut me); + Err(e) + } + } + } + + pub fn readv(&self, bufs: &mut [&mut IoVec]) -> io::Result { + let mut me = self.before_read()?; + // TODO: Does WSARecv work on a nonblocking sockets? We ideally want to // call that instead of looping over all the buffers and calling // `recv` on each buffer. I'm not sure though if an overlapped @@ -326,20 +348,27 @@ impl TcpStream { } pub fn write(&self, buf: &[u8]) -> io::Result { - self.writev(&[buf.into()]) + match IoVec::from_bytes(buf) { + Some(vec) => self.writev(&[vec]), + None => Ok(0), + } } pub fn writev(&self, bufs: &[&IoVec]) -> io::Result { let mut me = self.inner(); let me = &mut *me; - match me.write { + match mem::replace(&mut me.write, State::Empty) { State::Empty => {} - _ => return Err(wouldblock()) + State::Error(e) => return Err(e), + other => { + me.write = other; + return Err(io::ErrorKind::WouldBlock.into()) + } } if !me.iocp.registered() { - return Err(wouldblock()) + return Err(io::ErrorKind::WouldBlock.into()) } if bufs.len() == 0 { @@ -368,7 +397,7 @@ impl StreamImp { fn schedule_connect(&self, addr: &SocketAddr) -> io::Result<()> { unsafe { trace!("scheduling a connect"); - try!(self.inner.socket.connect_overlapped(addr, &[], self.inner.read.as_mut_ptr())); + self.inner.socket.connect_overlapped(addr, &[], self.inner.read.as_mut_ptr())?; } // see docs above on StreamImp.inner for rationale on forget mem::forget(self.clone()); @@ -392,7 +421,7 @@ impl StreamImp { _ => return, } - me.iocp.set_readiness(me.iocp.readiness() & !Ready::readable()); + me.iocp.set_readiness(me.iocp.readiness() - Ready::readable()); trace!("scheduling a read"); let res = unsafe { @@ -448,15 +477,16 @@ impl StreamImp { me: &mut StreamInner) { // About to write, clear any pending level triggered events - me.iocp.set_readiness(me.iocp.readiness() & !Ready::writable()); + me.iocp.set_readiness(me.iocp.readiness() - Ready::writable()); - trace!("scheduling a write"); loop { + trace!("scheduling a write of {} bytes", buf[pos..].len()); let ret = unsafe { self.inner.socket.write_overlapped(&buf[pos..], self.inner.write.as_mut_ptr()) }; match ret { Ok(Some(transferred_bytes)) if me.instant_notify => { + trace!("done immediately with {} bytes", transferred_bytes); if transferred_bytes == buf.len() - pos { self.add_readiness(me, Ready::writable()); me.write = State::Empty; @@ -465,12 +495,14 @@ impl StreamImp { pos += transferred_bytes; } Ok(_) => { + trace!("scheduled for later"); // see docs above on StreamImp.inner for rationale on forget me.write = State::Pending((buf, pos)); mem::forget(self.clone()); break; } Err(e) => { + trace!("write error: {}", e); me.write = State::Error(e); self.add_readiness(me, Ready::writable()); me.iocp.put_buffer(buf); @@ -550,11 +582,11 @@ impl Evented for TcpStream { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.register_socket(&self.imp.inner.socket, poll, token, - interest, opts, &self.registration)); + me.iocp.register_socket(&self.imp.inner.socket, poll, token, + interest, opts, &self.registration)?; unsafe { - try!(super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)); + super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?; me.instant_notify = true; } @@ -572,8 +604,8 @@ impl Evented for TcpStream { fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.reregister_socket(&self.imp.inner.socket, poll, token, - interest, opts, &self.registration)); + me.iocp.reregister_socket(&self.imp.inner.socket, poll, token, + interest, opts, &self.registration)?; self.post_register(interest, &mut me); Ok(()) } @@ -586,7 +618,8 @@ impl Evented for TcpStream { impl fmt::Debug for TcpStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "TcpStream { ... }".fmt(f) + f.debug_struct("TcpStream") + .finish() } } @@ -612,9 +645,10 @@ impl Drop for TcpStream { } impl TcpListener { - pub fn new(socket: net::TcpListener, addr: &SocketAddr) + pub fn new(socket: net::TcpListener) -> io::Result { - Ok(TcpListener::new_family(socket, match *addr { + let addr = socket.local_addr()?; + Ok(TcpListener::new_family(socket, match addr { SocketAddr::V4(..) => Family::V4, SocketAddr::V6(..) => Family::V6, })) @@ -639,19 +673,16 @@ impl TcpListener { } } - pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + pub fn accept(&self) -> io::Result<(net::TcpStream, SocketAddr)> { let mut me = self.inner(); let ret = match mem::replace(&mut me.accept, State::Empty) { - State::Empty => return Err(would_block()), + State::Empty => return Err(io::ErrorKind::WouldBlock.into()), State::Pending(t) => { me.accept = State::Pending(t); - return Err(would_block()); - } - State::Ready((s, a)) => { - try!(s.set_nonblocking(true)); - Ok((TcpStream::new(s, None), a)) + return Err(io::ErrorKind::WouldBlock.into()); } + State::Ready((s, a)) => Ok((s, a)), State::Error(e) => Err(e), }; @@ -708,7 +739,7 @@ impl ListenerImp { _ => return } - me.iocp.set_readiness(me.iocp.readiness() & !Ready::readable()); + me.iocp.set_readiness(me.iocp.readiness() - Ready::readable()); let res = match self.inner.family { Family::V4 => TcpBuilder::new_v4(), @@ -767,11 +798,11 @@ impl Evented for TcpListener { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.register_socket(&self.imp.inner.socket, poll, token, - interest, opts, &self.registration)); + me.iocp.register_socket(&self.imp.inner.socket, poll, token, + interest, opts, &self.registration)?; unsafe { - try!(super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)); + super::no_notify_on_instant_completion(self.imp.inner.socket.as_raw_socket() as HANDLE)?; me.instant_notify = true; } @@ -782,8 +813,8 @@ impl Evented for TcpListener { fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.reregister_socket(&self.imp.inner.socket, poll, token, - interest, opts, &self.registration)); + me.iocp.reregister_socket(&self.imp.inner.socket, poll, token, + interest, opts, &self.registration)?; self.imp.schedule_accept(&mut me); Ok(()) } @@ -796,7 +827,8 @@ impl Evented for TcpListener { impl fmt::Debug for TcpListener { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "TcpListener { ... }".fmt(f) + f.debug_struct("TcpListener") + .finish() } } @@ -817,11 +849,3 @@ impl Drop for TcpListener { } } } - -// TODO: Use std's allocation free io::Error -const WOULDBLOCK: i32 = ::winapi::winerror::WSAEWOULDBLOCK as i32; - -/// Returns a std `WouldBlock` error without allocating -pub fn would_block() -> ::std::io::Error { - ::std::io::Error::from_raw_os_error(WOULDBLOCK) -} diff --git a/third_party/rust/mio/src/sys/windows/udp.rs b/third_party/rust/mio/src/sys/windows/udp.rs index c21ce7770ce6..4d3fc040fdf1 100644 --- a/third_party/rust/mio/src/sys/windows/udp.rs +++ b/third_party/rust/mio/src/sys/windows/udp.rs @@ -94,23 +94,23 @@ impl UdpSocket { match me.write { State::Empty => {} - _ => return Err(would_block()), + _ => return Err(io::ErrorKind::WouldBlock.into()), } if !me.iocp.registered() { - return Err(would_block()) + return Err(io::ErrorKind::WouldBlock.into()) } let interest = me.iocp.readiness(); - me.iocp.set_readiness(interest & !Ready::writable()); + me.iocp.set_readiness(interest - Ready::writable()); let mut owned_buf = me.iocp.get_buffer(64 * 1024); - let amt = try!(owned_buf.write(buf)); - try!(unsafe { + let amt = owned_buf.write(buf)?; + unsafe { trace!("scheduling a send"); self.imp.inner.socket.send_to_overlapped(&owned_buf, target, self.imp.inner.write.as_mut_ptr()) - }); + }?; me.write = State::Pending(owned_buf); mem::forget(self.imp.clone()); Ok(amt) @@ -128,23 +128,23 @@ impl UdpSocket { match me.write { State::Empty => {} - _ => return Err(would_block()), + _ => return Err(io::ErrorKind::WouldBlock.into()), } if !me.iocp.registered() { - return Err(would_block()) + return Err(io::ErrorKind::WouldBlock.into()) } let interest = me.iocp.readiness(); - me.iocp.set_readiness(interest & !Ready::writable()); + me.iocp.set_readiness(interest - Ready::writable()); let mut owned_buf = me.iocp.get_buffer(64 * 1024); - let amt = try!(owned_buf.write(buf)); - try!(unsafe { + let amt = owned_buf.write(buf)?; + unsafe { trace!("scheduling a send"); self.imp.inner.socket.send_overlapped(&owned_buf, self.imp.inner.write.as_mut_ptr()) - }); + }?; me.write = State::Pending(owned_buf); mem::forget(self.imp.clone()); Ok(amt) @@ -153,8 +153,8 @@ impl UdpSocket { pub fn recv_from(&self, mut buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { let mut me = self.inner(); match mem::replace(&mut me.read, State::Empty) { - State::Empty => Err(would_block()), - State::Pending(b) => { me.read = State::Pending(b); Err(would_block()) } + State::Empty => Err(io::ErrorKind::WouldBlock.into()), + State::Pending(b) => { me.read = State::Pending(b); Err(io::ErrorKind::WouldBlock.into()) } State::Ready(data) => { // If we weren't provided enough space to receive the message // then don't actually read any data, just return an error. @@ -181,7 +181,7 @@ impl UdpSocket { } } - pub fn recv(&self, mut buf: &mut [u8]) + pub fn recv(&self, buf: &mut [u8]) -> io::Result { //Since recv_from can be used on connected sockets just call it and drop the address. self.recv_from(buf).map(|(size,_)| size) @@ -255,6 +255,14 @@ impl UdpSocket { self.imp.inner.socket.leave_multicast_v6(multiaddr, interface) } + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.imp.inner.socket.set_only_v6(only_v6) + } + + pub fn only_v6(&self) -> io::Result { + self.imp.inner.socket.only_v6() + } + pub fn take_error(&self) -> io::Result> { self.imp.inner.socket.take_error() } @@ -291,7 +299,7 @@ impl Imp { } let interest = me.iocp.readiness(); - me.iocp.set_readiness(interest & !Ready::readable()); + me.iocp.set_readiness(interest - Ready::readable()); let mut buf = me.iocp.get_buffer(64 * 1024); let res = unsafe { @@ -324,9 +332,9 @@ impl Evented for UdpSocket { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.register_socket(&self.imp.inner.socket, + me.iocp.register_socket(&self.imp.inner.socket, poll, token, interest, opts, - &self.registration)); + &self.registration)?; self.post_register(interest, &mut me); Ok(()) } @@ -334,9 +342,9 @@ impl Evented for UdpSocket { fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let mut me = self.inner(); - try!(me.iocp.reregister_socket(&self.imp.inner.socket, + me.iocp.reregister_socket(&self.imp.inner.socket, poll, token, interest, - opts, &self.registration)); + opts, &self.registration)?; self.post_register(interest, &mut me); Ok(()) } @@ -349,7 +357,8 @@ impl Evented for UdpSocket { impl fmt::Debug for UdpSocket { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "UdpSocket { ... }".fmt(f) + f.debug_struct("UdpSocket") + .finish() } } @@ -402,11 +411,3 @@ fn recv_done(status: &OVERLAPPED_ENTRY) { me.read = State::Ready(buf); me2.add_readiness(&mut me, Ready::readable()); } - -// TODO: Use std's allocation free io::Error -const WOULDBLOCK: i32 = ::winapi::winerror::WSAEWOULDBLOCK as i32; - -/// Returns a std `WouldBlock` error without allocating -pub fn would_block() -> ::std::io::Error { - ::std::io::Error::from_raw_os_error(WOULDBLOCK) -} diff --git a/third_party/rust/mio/src/timer.rs b/third_party/rust/mio/src/timer.rs index cf1f5ab37765..0b762ea38687 100644 --- a/third_party/rust/mio/src/timer.rs +++ b/third_party/rust/mio/src/timer.rs @@ -2,8 +2,10 @@ #![allow(deprecated, missing_debug_implementations)] -use {convert, io, Evented, Ready, Poll, PollOpt, Registration, SetReadiness, Token}; +use {convert, io, Ready, Poll, PollOpt, Registration, SetReadiness, Token}; +use event::Evented; use lazycell::LazyCell; +use slab::Slab; use std::{cmp, error, fmt, u64, usize, iter, thread}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -91,19 +93,16 @@ const TICK_MAX: Tick = u64::MAX; // Manages communication with wakeup thread type WakeupState = Arc; -type Slab = ::slab::Slab; - pub type Result = ::std::result::Result; // TODO: remove pub type TimerResult = Result; +/// Deprecated and unused. #[derive(Debug)] -pub struct TimerError { - kind: TimerErrorKind, - desc: &'static str, -} +pub struct TimerError; +/// Deprecated and unused. #[derive(Debug)] pub enum TimerErrorKind { TimerOverflow, @@ -189,14 +188,13 @@ impl Timer { let curr = self.wheel[slot]; // Insert the new entry - let token = try!( - self.entries.insert(Entry::new(state, tick, curr.head)) - .map_err(|_| TimerError::overflow())); + let entry = Entry::new(state, tick, curr.head); + let token = Token(self.entries.insert(entry)); if curr.head != EMPTY { // If there was a previous entry, set its prev pointer to the new // entry - self.entries[curr.head].links.prev = token; + self.entries[curr.head.into()].links.prev = token; } // Update the head slot @@ -207,7 +205,7 @@ impl Timer { self.schedule_readiness(tick); - trace!("inserted timout; slot={}; token={:?}", slot, token); + trace!("inserted timeout; slot={}; token={:?}", slot, token); // Return the new timeout Ok(Timeout { @@ -217,7 +215,7 @@ impl Timer { } pub fn cancel_timeout(&mut self, timeout: &Timeout) -> Option { - let links = match self.entries.get(timeout.token) { + let links = match self.entries.get(timeout.token.into()) { Some(e) => e.links, None => return None }; @@ -228,7 +226,7 @@ impl Timer { } self.unlink(&links, timeout.token); - self.entries.remove(timeout.token).map(|e| e.state) + Some(self.entries.remove(timeout.token.into()).state) } pub fn poll(&mut self) -> Option { @@ -269,7 +267,7 @@ impl Timer { self.wheel[slot].next_tick = TICK_MAX; } - let links = self.entries[curr].links; + let links = self.entries[curr.into()].links; if links.tick <= self.tick { trace!("triggering; token={:?}", curr); @@ -278,8 +276,7 @@ impl Timer { self.unlink(&links, curr); // Remove and return the token - return self.entries.remove(curr) - .map(|e| e.state); + return Some(self.entries.remove(curr.into()).state); } else { let next_tick = self.wheel[slot].next_tick; self.wheel[slot].next_tick = cmp::min(next_tick, links.tick); @@ -309,11 +306,11 @@ impl Timer { let slot = self.slot_for(links.tick); self.wheel[slot].head = links.next; } else { - self.entries[links.prev].links.next = links.next; + self.entries[links.prev.into()].links.next = links.next; } if links.next != EMPTY { - self.entries[links.next].links.prev = links.prev; + self.entries[links.next.into()].links.prev = links.prev; if token == self.next { self.next = links.next; @@ -354,7 +351,7 @@ impl Timer { // Next tick containing a timeout fn next_tick(&self) -> Option { if self.next != EMPTY { - let slot = self.slot_for(self.entries[self.next].links.tick); + let slot = self.slot_for(self.entries[self.next.into()].links.tick); if self.wheel[slot].next_tick == self.tick { // There is data ready right now @@ -497,23 +494,16 @@ impl Entry { } impl fmt::Display for TimerError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}: {}", self.kind, self.desc) - } -} - -impl TimerError { - fn overflow() -> TimerError { - TimerError { - kind: TimerOverflow, - desc: "too many timer entries" - } + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + // `TimerError` will never be constructed. + unreachable!(); } } impl error::Error for TimerError { fn description(&self) -> &str { - self.desc + // `TimerError` will never be constructed. + unreachable!(); } } diff --git a/third_party/rust/mio/src/token.rs b/third_party/rust/mio/src/token.rs index b31a31b2cb3e..09e42450bc43 100644 --- a/third_party/rust/mio/src/token.rs +++ b/third_party/rust/mio/src/token.rs @@ -11,8 +11,10 @@ /// example, `HashMap` is used, but usually something like [`slab`] is better. /// /// ``` +/// # use std::error::Error; +/// # fn try_main() -> Result<(), Box> { /// use mio::{Events, Ready, Poll, PollOpt, Token}; -/// use mio::tcp::TcpListener; +/// use mio::net::TcpListener; /// /// use std::thread; /// use std::io::{self, Read}; @@ -32,19 +34,19 @@ /// let mut next_socket_index = 0; /// /// // The `Poll` instance -/// let poll = Poll::new().unwrap(); +/// let poll = Poll::new()?; /// /// // Tcp listener -/// let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); +/// let listener = TcpListener::bind(&"127.0.0.1:0".parse()?)?; /// /// // Register the listener /// poll.register(&listener, /// LISTENER, /// Ready::readable(), -/// PollOpt::edge()).unwrap(); +/// PollOpt::edge())?; /// /// // Spawn a thread that will connect a bunch of sockets then close them -/// let addr = listener.local_addr().unwrap(); +/// let addr = listener.local_addr()?; /// thread::spawn(move || { /// use std::net::TcpStream; /// @@ -64,7 +66,7 @@ /// // The main event loop /// loop { /// // Wait for events -/// poll.poll(&mut events, None).unwrap(); +/// poll.poll(&mut events, None)?; /// /// for event in &events { /// match event.token() { @@ -76,7 +78,7 @@ /// Ok((socket, _)) => { /// // Shutdown the server /// if next_socket_index == MAX_SOCKETS { -/// return; +/// return Ok(()); /// } /// /// // Get the token for the socket @@ -87,7 +89,7 @@ /// poll.register(&socket, /// token, /// Ready::readable(), -/// PollOpt::edge()).unwrap(); +/// PollOpt::edge())?; /// /// // Store the socket /// sockets.insert(token, socket); @@ -113,7 +115,7 @@ /// Ok(_) => unreachable!(), /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// // Socket is not ready anymore, stop reading -/// continue; +/// break; /// } /// e => panic!("err={:?}", e), // Unexpected error /// } @@ -122,6 +124,12 @@ /// } /// } /// } +/// # Ok(()) +/// # } +/// # +/// # fn main() { +/// # try_main().unwrap(); +/// # } /// ``` /// /// [`Evented`]: event/trait.Evented.html diff --git a/third_party/rust/mio/src/udp.rs b/third_party/rust/mio/src/udp.rs index eba542cbeb94..a71bd2191412 100644 --- a/third_party/rust/mio/src/udp.rs +++ b/third_party/rust/mio/src/udp.rs @@ -29,7 +29,7 @@ pub struct UdpSocket { impl UdpSocket { /// Creates a UDP socket from the given address. pub fn bind(addr: &SocketAddr) -> io::Result { - let socket = try!(net::UdpSocket::bind(addr)); + let socket = net::UdpSocket::bind(addr)?; UdpSocket::from_socket(socket) } @@ -45,7 +45,7 @@ impl UdpSocket { /// options like `reuse_address` or binding to multiple addresses. pub fn from_socket(socket: net::UdpSocket) -> io::Result { Ok(UdpSocket { - sys: try!(sys::UdpSocket::new(socket)), + sys: sys::UdpSocket::new(socket)?, selector_id: SelectorId::new(), }) } @@ -80,8 +80,17 @@ impl UdpSocket { self.sys.send_to(buf, target).map_non_block() } - /// Receives data from the socket. On success, returns the number of bytes - /// read and the address from whence the data came. + /// Receives data from the socket and stores data in the supplied buffer `buf`. On success, + /// returns the number of bytes read and the address from whence the data came. + /// + /// The function must be called with valid byte array `buf` of sufficient size to + /// hold the message bytes. If a message is too long to fit in the supplied buffer, + /// excess bytes may be discarded. + /// + /// The function does not read from `buf`, but is overwriting previous content of `buf`. + /// + /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides + /// efficient access with iterators and boundary checks. pub fn recv_from(&self, buf: &mut [u8]) -> io::Result> { self.sys.recv_from(buf).map_non_block() @@ -89,16 +98,22 @@ impl UdpSocket { /// Sends data on the socket to the address previously bound via connect(). On success, /// returns the number of bytes written. - /// - /// Address type can be any implementor of `ToSocketAddrs` trait. See its - /// documentation for concrete examples. pub fn send(&self, buf: &[u8]) -> io::Result> { self.sys.send(buf).map_non_block() } - /// Receives data from the socket previously bound with connect(). On success, returns - /// the number of bytes read and the address from whence the data came. + /// Receives data from the socket previously bound with connect() and stores data in + /// the supplied buffer `buf`. On success, returns the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient size to + /// hold the message bytes. If a message is too long to fit in the supplied buffer, + /// excess bytes may be discarded. + /// + /// The function does not read from `buf`, but is overwriting previous content of `buf`. + /// + /// Assuming the function has read `n` bytes, slicing `&buf[..n]` provides + /// efficient access with iterators and boundary checks. pub fn recv(&self, buf: &mut [u8]) -> io::Result> { self.sys.recv(buf).map_non_block() @@ -264,7 +279,7 @@ impl UdpSocket { impl Evented for UdpSocket { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { - try!(self.selector_id.associate_selector(poll)); + self.selector_id.associate_selector(poll)?; self.sys.register(poll, token, interest, opts) } @@ -283,24 +298,24 @@ impl Evented for UdpSocket { * */ -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd}; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl IntoRawFd for UdpSocket { fn into_raw_fd(self) -> RawFd { self.sys.into_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl AsRawFd for UdpSocket { fn as_raw_fd(&self) -> RawFd { self.sys.as_raw_fd() } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] impl FromRawFd for UdpSocket { unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket { UdpSocket { diff --git a/third_party/rust/mio/test/benchmark.rs b/third_party/rust/mio/test/benchmark.rs new file mode 100644 index 000000000000..1e4fac517c68 --- /dev/null +++ b/third_party/rust/mio/test/benchmark.rs @@ -0,0 +1,80 @@ +use std::mem; +use mio::net::{AddressFamily, Inet, Inet6, SockAddr, InetAddr, IPv4Addr, SocketType, Dgram, Stream}; +use std::io::net::ip::IpAddr; +use native::NativeTaskBuilder; +use std::task::TaskBuilder; +use mio::os::{from_sockaddr}; +use time::Instant; +use std::vec::*; +use std::io::timer; + +mod nix { + pub use nix::c_int; + pub use nix::fcntl::{Fd, O_NONBLOCK, O_CLOEXEC}; + pub use nix::errno::{EWOULDBLOCK, EINPROGRESS}; + pub use nix::sys::socket::*; + pub use nix::unistd::*; + pub use nix::sys::epoll::*; +} + +fn timed(label: &str, f: ||) { + let start = Instant::now(); + f(); + let elapsed = start.elapsed(); + println!(" {}: {}", label, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0); +} + +fn init(saddr: &str) -> (nix::Fd, nix::Fd) { + let optval = 1i; + let addr = SockAddr::parse(saddr.as_slice()).expect("could not parse InetAddr"); + let srvfd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC).unwrap(); + nix::setsockopt(srvfd, nix::SOL_SOCKET, nix::SO_REUSEADDR, &optval).unwrap(); + nix::bind(srvfd, &from_sockaddr(&addr)).unwrap(); + nix::listen(srvfd, 256u).unwrap(); + + let fd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap(); + let res = nix::connect(fd, &from_sockaddr(&addr)); + let start = Instant::now(); + println!("connecting : {}", res); + + let clifd = nix::accept4(srvfd, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap(); + let elapsed = start.elapsed(); + println!("accepted : {} - {}", clifd, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0); + + (clifd, srvfd) +} + +#[test] +fn read_bench() { + let (clifd, srvfd) = init("10.10.1.5:11111"); + let mut buf = Vec::with_capacity(1600); + unsafe { buf.set_len(1600); } + timed("read", || { + let mut i = 0u; + while i < 10000000 { + let res = nix::read(clifd, buf.as_mut_slice()); + assert_eq!(res.unwrap_err().kind, nix::EWOULDBLOCK); + i = i + 1; + } + }); +} + +#[test] +fn epollctl_bench() { + let (clifd, srvfd) = init("10.10.1.5:22222"); + + let epfd = nix::epoll_create().unwrap(); + let info = nix::EpollEvent { events: nix::EPOLLIN | nix::EPOLLONESHOT | nix::EPOLLET, + data: 0u64 }; + + nix::epoll_ctl(epfd, nix::EpollCtlAdd, clifd, &info); + + timed("epoll_ctl", || { + let mut i = 0u; + while i < 10000000 { + nix::epoll_ctl(epfd, nix::EpollCtlMod, clifd, &info); + i = i + 1; + } + }); + +} diff --git a/third_party/rust/mio/test/mod.rs b/third_party/rust/mio/test/mod.rs new file mode 100644 index 000000000000..75cda53f4461 --- /dev/null +++ b/third_party/rust/mio/test/mod.rs @@ -0,0 +1,217 @@ +#![allow(deprecated)] + +extern crate mio; +extern crate bytes; +extern crate net2; + +#[macro_use] +extern crate log; +extern crate env_logger; +extern crate iovec; +extern crate slab; +extern crate tempdir; + +#[cfg(target_os = "fuchsia")] +extern crate fuchsia_zircon as zircon; + +pub use ports::localhost; + +mod test_custom_evented; +mod test_close_on_drop; +mod test_double_register; +mod test_echo_server; +mod test_local_addr_ready; +mod test_multicast; +mod test_oneshot; +mod test_poll; +mod test_register_deregister; +mod test_register_multiple_event_loops; +mod test_reregister_without_poll; +mod test_smoke; +mod test_tcp; +mod test_tcp_level; +mod test_udp_level; +mod test_udp_socket; +mod test_write_then_drop; + +#[cfg(feature = "with-deprecated")] +mod test_notify; +#[cfg(feature = "with-deprecated")] +mod test_poll_channel; +#[cfg(feature = "with-deprecated")] +mod test_tick; + +// The following tests are for deprecated features. Only run these tests on +// platforms that were supported from before the features were deprecated +#[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))] +#[cfg(feature = "with-deprecated")] +mod test_timer; +#[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))] +#[cfg(feature = "with-deprecated")] +mod test_battery; + +#[cfg(any(target_os = "macos", target_os = "linux"))] +#[cfg(feature = "with-deprecated")] +mod test_unix_echo_server; +#[cfg(any(target_os = "macos", target_os = "linux"))] +#[cfg(feature = "with-deprecated")] +mod test_unix_pass_fd; +#[cfg(any(target_os = "macos", target_os = "linux"))] +#[cfg(feature = "with-deprecated")] +mod test_uds_shutdown; +#[cfg(any(target_os = "macos", target_os = "linux"))] +#[cfg(feature = "with-deprecated")] +mod test_subprocess_pipe; +#[cfg(any(target_os = "macos", target_os = "linux"))] +#[cfg(feature = "with-deprecated")] +mod test_broken_pipe; + +#[cfg(any(target_os = "fuchsia"))] +mod test_fuchsia_handles; + +use bytes::{Buf, MutBuf}; +use std::io::{self, Read, Write}; +use std::time::Duration; +use mio::{Events, Poll}; +use mio::event::Event; + +pub trait TryRead { + fn try_read_buf(&mut self, buf: &mut B) -> io::Result> + where Self : Sized + { + // Reads the length of the slice supplied by buf.mut_bytes into the buffer + // This is not guaranteed to consume an entire datagram or segment. + // If your protocol is msg based (instead of continuous stream) you should + // ensure that your buffer is large enough to hold an entire segment (1532 bytes if not jumbo + // frames) + let res = self.try_read(unsafe { buf.mut_bytes() }); + + if let Ok(Some(cnt)) = res { + unsafe { buf.advance(cnt); } + } + + res + } + + fn try_read(&mut self, buf: &mut [u8]) -> io::Result>; +} + +pub trait TryWrite { + fn try_write_buf(&mut self, buf: &mut B) -> io::Result> + where Self : Sized + { + let res = self.try_write(buf.bytes()); + + if let Ok(Some(cnt)) = res { + buf.advance(cnt); + } + + res + } + + fn try_write(&mut self, buf: &[u8]) -> io::Result>; +} + +impl TryRead for T { + fn try_read(&mut self, dst: &mut [u8]) -> io::Result> { + self.read(dst).map_non_block() + } +} + +impl TryWrite for T { + fn try_write(&mut self, src: &[u8]) -> io::Result> { + self.write(src).map_non_block() + } +} + +/* + * + * ===== Helpers ===== + * + */ + +/// A helper trait to provide the map_non_block function on Results. +trait MapNonBlock { + /// Maps a `Result` to a `Result>` by converting + /// operation-would-block errors into `Ok(None)`. + fn map_non_block(self) -> io::Result>; +} + +impl MapNonBlock for io::Result { + fn map_non_block(self) -> io::Result> { + use std::io::ErrorKind::WouldBlock; + + match self { + Ok(value) => Ok(Some(value)), + Err(err) => { + if let WouldBlock = err.kind() { + Ok(None) + } else { + Err(err) + } + } + } + } +} + +mod ports { + use std::net::SocketAddr; + use std::str::FromStr; + use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + use std::sync::atomic::Ordering::SeqCst; + + // Helper for getting a unique port for the task run + // TODO: Reuse ports to not spam the system + static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT; + const FIRST_PORT: usize = 18080; + + fn next_port() -> usize { + unsafe { + // If the atomic was never used, set it to the initial port + NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst); + + // Get and increment the port list + NEXT_PORT.fetch_add(1, SeqCst) + } + } + + pub fn localhost() -> SocketAddr { + let s = format!("127.0.0.1:{}", next_port()); + FromStr::from_str(&s).unwrap() + } +} + +pub fn sleep_ms(ms: u64) { + use std::thread; + use std::time::Duration; + thread::sleep(Duration::from_millis(ms)); +} + +pub fn expect_events(poll: &Poll, + event_buffer: &mut Events, + poll_try_count: usize, + mut expected: Vec) +{ + const MS: u64 = 1_000; + + for _ in 0..poll_try_count { + poll.poll(event_buffer, Some(Duration::from_millis(MS))).unwrap(); + for event in event_buffer.iter() { + let pos_opt = match expected.iter().position(|exp_event| { + (event.token() == exp_event.token()) && + event.readiness().contains(exp_event.readiness()) + }) { + Some(x) => Some(x), + None => None, + }; + if let Some(pos) = pos_opt { expected.remove(pos); } + } + + if expected.len() == 0 { + break; + } + } + + assert!(expected.len() == 0, "The following expected events were not found: {:?}", expected); +} + diff --git a/third_party/rust/mio/test/test_battery.rs b/third_party/rust/mio/test/test_battery.rs new file mode 100644 index 000000000000..d918196515ec --- /dev/null +++ b/third_party/rust/mio/test/test_battery.rs @@ -0,0 +1,269 @@ +use {localhost, sleep_ms, TryRead, TryWrite}; +use mio::*; +use mio::deprecated::{EventLoop, EventLoopBuilder, Handler}; +use mio::net::{TcpListener, TcpStream}; +use std::collections::LinkedList; +use slab::Slab; +use std::{io, thread}; +use std::time::Duration; + +// Don't touch the connection slab +const SERVER: Token = Token(10_000_000); +const CLIENT: Token = Token(10_000_001); + +#[cfg(windows)] +const N: usize = 10_000; +#[cfg(unix)] +const N: usize = 1_000_000; + +struct EchoConn { + sock: TcpStream, + token: Option, + count: usize, + buf: Vec +} + +impl EchoConn { + fn new(sock: TcpStream) -> EchoConn { + let mut ec = + EchoConn { + sock: sock, + token: None, + buf: Vec::with_capacity(22), + count: 0 + }; + unsafe { ec.buf.set_len(22) }; + ec + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + event_loop.reregister(&self.sock, self.token.unwrap(), + Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()) + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + loop { + match self.sock.try_read(&mut self.buf[..]) { + Ok(None) => { + break; + } + Ok(Some(_)) => { + self.count += 1; + if self.count % 10000 == 0 { + info!("Received {} messages", self.count); + } + if self.count == N { + event_loop.shutdown(); + } + } + Err(_) => { + break; + } + + }; + } + + event_loop.reregister(&self.sock, self.token.unwrap(), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct EchoServer { + sock: TcpListener, + conns: Slab +} + +impl EchoServer { + fn accept(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("server accepting socket"); + + let sock = self.sock.accept().unwrap().0; + let conn = EchoConn::new(sock,); + let tok = self.conns.insert(conn); + + // Register the connection + self.conns[tok].token = Some(Token(tok)); + event_loop.register(&self.conns[tok].sock, Token(tok), Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()) + .ok().expect("could not register socket with event loop"); + + Ok(()) + } + + fn conn_readable(&mut self, event_loop: &mut EventLoop, + tok: Token) -> io::Result<()> { + debug!("server conn readable; tok={:?}", tok); + self.conn(tok).readable(event_loop) + } + + fn conn_writable(&mut self, event_loop: &mut EventLoop, + tok: Token) -> io::Result<()> { + debug!("server conn writable; tok={:?}", tok); + self.conn(tok).writable(event_loop) + } + + fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn { + &mut self.conns[tok.into()] + } +} + +struct EchoClient { + sock: TcpStream, + backlog: LinkedList, + token: Token, + count: u32 +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl EchoClient { + fn new(sock: TcpStream, tok: Token) -> EchoClient { + + EchoClient { + sock: sock, + backlog: LinkedList::new(), + token: tok, + count: 0 + } + } + + fn readable(&mut self, _event_loop: &mut EventLoop) -> io::Result<()> { + Ok(()) + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket writable"); + + while self.backlog.len() > 0 { + match self.sock.try_write(self.backlog.front().unwrap().as_bytes()) { + Ok(None) => { + break; + } + Ok(Some(_)) => { + self.backlog.pop_front(); + self.count += 1; + if self.count % 10000 == 0 { + info!("Sent {} messages", self.count); + } + } + Err(e) => { debug!("not implemented; client err={:?}", e); break; } + } + } + if self.backlog.len() > 0 { + event_loop.reregister(&self.sock, self.token, Ready::writable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + } + + Ok(()) + } +} + +struct Echo { + server: EchoServer, + client: EchoClient, +} + +impl Echo { + fn new(srv: TcpListener, client: TcpStream) -> Echo { + Echo { + server: EchoServer { + sock: srv, + conns: Slab::with_capacity(128), + }, + client: EchoClient::new(client, CLIENT), + } + } +} + +impl Handler for Echo { + type Timeout = usize; + type Message = String; + + fn ready(&mut self, event_loop: &mut EventLoop, token: Token, + events: Ready) { + + if events.is_readable() { + match token { + SERVER => self.server.accept(event_loop).unwrap(), + CLIENT => self.client.readable(event_loop).unwrap(), + i => self.server.conn_readable(event_loop, i).unwrap() + } + } + if events.is_writable() { + match token { + SERVER => panic!("received writable for token 0"), + CLIENT => self.client.writable(event_loop).unwrap(), + _ => self.server.conn_writable(event_loop, token).unwrap() + } + } + } + + fn notify(&mut self, event_loop: &mut EventLoop, msg: String) { + match self.client.sock.try_write(msg.as_bytes()) { + Ok(Some(n)) => { + self.client.count += 1; + if self.client.count % 10000 == 0 { + info!("Sent {} bytes: count {}", n, self.client.count); + } + }, + + _ => { + self.client.backlog.push_back(msg); + event_loop.reregister( + &self.client.sock, + self.client.token, + Ready::writable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + } + } + } +} + +#[test] +pub fn test_echo_server() { + debug!("Starting TEST_ECHO_SERVER"); + let mut b = EventLoopBuilder::new(); + b.notify_capacity(1_048_576) + .messages_per_tick(64) + .timer_tick(Duration::from_millis(100)) + .timer_wheel_size(1_024) + .timer_capacity(65_536); + + let mut event_loop = b.build().unwrap(); + + let addr = localhost(); + + let srv = TcpListener::bind(&addr).unwrap(); + + info!("listen for connections"); + event_loop.register(&srv, SERVER, Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let sock = TcpStream::connect(&addr).unwrap(); + + // Connect to the server + event_loop.register(&sock, CLIENT, Ready::writable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + let chan = event_loop.channel(); + + let go = move || { + let mut i = N; + + sleep_ms(1_000); + + let message = "THIS IS A TEST MESSAGE".to_string(); + while i > 0 { + chan.send(message.clone()).unwrap(); + i -= 1; + if i % 10000 == 0 { + info!("Enqueued {} messages", N - i); + } + } + }; + + let t = thread::spawn(go); + + // Start the event loop + event_loop.run(&mut Echo::new(srv, sock)).unwrap(); + t.join().unwrap(); +} diff --git a/third_party/rust/mio/test/test_broken_pipe.rs b/third_party/rust/mio/test/test_broken_pipe.rs new file mode 100644 index 000000000000..1cd0ca7465d1 --- /dev/null +++ b/third_party/rust/mio/test/test_broken_pipe.rs @@ -0,0 +1,28 @@ +use mio::{Token, Ready, PollOpt}; +use mio::deprecated::{unix, EventLoop, Handler}; +use std::time::Duration; + +pub struct BrokenPipeHandler; + +impl Handler for BrokenPipeHandler { + type Timeout = (); + type Message = (); + fn ready(&mut self, _: &mut EventLoop, token: Token, _: Ready) { + if token == Token(1) { + panic!("Received ready() on a closed pipe."); + } + } +} + +#[test] +pub fn broken_pipe() { + let mut event_loop: EventLoop = EventLoop::new().unwrap(); + let (reader, _) = unix::pipe().unwrap(); + + event_loop.register(&reader, Token(1), Ready::all(), PollOpt::edge()) + .unwrap(); + + let mut handler = BrokenPipeHandler; + drop(reader); + event_loop.run_once(&mut handler, Some(Duration::from_millis(1000))).unwrap(); +} diff --git a/third_party/rust/mio/test/test_close_on_drop.rs b/third_party/rust/mio/test/test_close_on_drop.rs new file mode 100644 index 000000000000..c429d3bf2500 --- /dev/null +++ b/third_party/rust/mio/test/test_close_on_drop.rs @@ -0,0 +1,119 @@ +use {localhost, TryRead}; +use mio::{Events, Poll, PollOpt, Ready, Token}; +use bytes::ByteBuf; +use mio::net::{TcpListener, TcpStream}; + +use self::TestState::{Initial, AfterRead}; + +const SERVER: Token = Token(0); +const CLIENT: Token = Token(1); + +#[derive(Debug, PartialEq)] +enum TestState { + Initial, + AfterRead, +} + +struct TestHandler { + srv: TcpListener, + cli: TcpStream, + state: TestState, + shutdown: bool, +} + +impl TestHandler { + fn new(srv: TcpListener, cli: TcpStream) -> TestHandler { + TestHandler { + srv: srv, + cli: cli, + state: Initial, + shutdown: false, + } + } + + fn handle_read(&mut self, poll: &mut Poll, tok: Token, events: Ready) { + debug!("readable; tok={:?}; hint={:?}", tok, events); + + match tok { + SERVER => { + debug!("server connection ready for accept"); + let _ = self.srv.accept().unwrap(); + } + CLIENT => { + debug!("client readable"); + + match self.state { + Initial => { + let mut buf = [0; 4096]; + debug!("GOT={:?}", self.cli.try_read(&mut buf[..])); + self.state = AfterRead; + }, + AfterRead => {} + } + + let mut buf = ByteBuf::mut_with_capacity(1024); + + match self.cli.try_read_buf(&mut buf) { + Ok(Some(0)) => self.shutdown = true, + Ok(_) => panic!("the client socket should not be readable"), + Err(e) => panic!("Unexpected error {:?}", e) + } + } + _ => panic!("received unknown token {:?}", tok) + } + poll.reregister(&self.cli, CLIENT, Ready::readable(), PollOpt::edge()).unwrap(); + } + + fn handle_write(&mut self, poll: &mut Poll, tok: Token, _: Ready) { + match tok { + SERVER => panic!("received writable for token 0"), + CLIENT => { + debug!("client connected"); + poll.reregister(&self.cli, CLIENT, Ready::readable(), PollOpt::edge()).unwrap(); + } + _ => panic!("received unknown token {:?}", tok) + } + } +} + +#[test] +pub fn test_close_on_drop() { + let _ = ::env_logger::init(); + debug!("Starting TEST_CLOSE_ON_DROP"); + let mut poll = Poll::new().unwrap(); + + // The address to connect to - localhost + a unique port + let addr = localhost(); + + // == Create & setup server socket + let srv = TcpListener::bind(&addr).unwrap(); + + poll.register(&srv, SERVER, Ready::readable(), PollOpt::edge()).unwrap(); + + // == Create & setup client socket + let sock = TcpStream::connect(&addr).unwrap(); + + poll.register(&sock, CLIENT, Ready::writable(), PollOpt::edge()).unwrap(); + + // == Create storage for events + let mut events = Events::with_capacity(1024); + + // == Setup test handler + let mut handler = TestHandler::new(srv, sock); + + // == Run test + while !handler.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.readiness().is_readable() { + handler.handle_read(&mut poll, event.token(), event.readiness()); + } + + if event.readiness().is_writable() { + handler.handle_write(&mut poll, event.token(), event.readiness()); + } + } + } + assert!(handler.state == AfterRead, "actual={:?}", handler.state); +} diff --git a/third_party/rust/mio/test/test_custom_evented.rs b/third_party/rust/mio/test/test_custom_evented.rs new file mode 100644 index 000000000000..8c996f4a77b4 --- /dev/null +++ b/third_party/rust/mio/test/test_custom_evented.rs @@ -0,0 +1,394 @@ +use mio::{Events, Poll, PollOpt, Ready, Registration, SetReadiness, Token}; +use mio::event::Evented; +use std::time::Duration; + +#[test] +fn smoke() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(128); + + let (r, set) = Registration::new2(); + r.register(&poll, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + let n = poll.poll(&mut events, Some(Duration::from_millis(0))).unwrap(); + assert_eq!(n, 0); + + set.set_readiness(Ready::readable()).unwrap(); + + let n = poll.poll(&mut events, Some(Duration::from_millis(0))).unwrap(); + assert_eq!(n, 1); + + assert_eq!(events.get(0).unwrap().token(), Token(0)); +} + +#[test] +fn set_readiness_before_register() { + use std::sync::{Arc, Barrier}; + use std::thread; + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(128); + + for _ in 0..5_000 { + let (r, set) = Registration::new2(); + + let b1 = Arc::new(Barrier::new(2)); + let b2 = b1.clone(); + + let th = thread::spawn(move || { + // set readiness before register + set.set_readiness(Ready::readable()).unwrap(); + + // run into barrier so both can pass + b2.wait(); + }); + + // wait for readiness + b1.wait(); + + // now register + poll.register(&r, Token(123), Ready::readable(), PollOpt::edge()).unwrap(); + + loop { + let n = poll.poll(&mut events, None).unwrap(); + + if n == 0 { + continue; + } + + assert_eq!(n, 1); + assert_eq!(events.get(0).unwrap().token(), Token(123)); + break; + } + + th.join().unwrap(); + } +} + +#[cfg(any(target_os = "linux", target_os = "macos", target_os = "windows"))] +mod stress { + use mio::{Events, Poll, PollOpt, Ready, Registration, SetReadiness, Token}; + use mio::event::Evented; + use std::time::Duration; + + #[test] + fn single_threaded_poll() { + use std::sync::Arc; + use std::sync::atomic::AtomicUsize; + use std::sync::atomic::Ordering::{Acquire, Release}; + use std::thread; + + const NUM_ATTEMPTS: usize = 30; + const NUM_ITERS: usize = 500; + const NUM_THREADS: usize = 4; + const NUM_REGISTRATIONS: usize = 128; + + for _ in 0..NUM_ATTEMPTS { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(NUM_REGISTRATIONS); + + let registrations: Vec<_> = (0..NUM_REGISTRATIONS).map(|i| { + let (r, s) = Registration::new2(); + r.register(&poll, Token(i), Ready::readable(), PollOpt::edge()).unwrap(); + (r, s) + }).collect(); + + let mut ready: Vec<_> = (0..NUM_REGISTRATIONS).map(|_| Ready::empty()).collect(); + + let remaining = Arc::new(AtomicUsize::new(NUM_THREADS)); + + for _ in 0..NUM_THREADS { + let remaining = remaining.clone(); + + let set_readiness: Vec = + registrations.iter().map(|r| r.1.clone()).collect(); + + thread::spawn(move || { + for _ in 0..NUM_ITERS { + for i in 0..NUM_REGISTRATIONS { + set_readiness[i].set_readiness(Ready::readable()).unwrap(); + set_readiness[i].set_readiness(Ready::empty()).unwrap(); + set_readiness[i].set_readiness(Ready::writable()).unwrap(); + set_readiness[i].set_readiness(Ready::readable() | Ready::writable()).unwrap(); + set_readiness[i].set_readiness(Ready::empty()).unwrap(); + } + } + + for i in 0..NUM_REGISTRATIONS { + set_readiness[i].set_readiness(Ready::readable()).unwrap(); + } + + remaining.fetch_sub(1, Release); + }); + } + + while remaining.load(Acquire) > 0 { + // Set interest + for (i, &(ref r, _)) in registrations.iter().enumerate() { + r.reregister(&poll, Token(i), Ready::writable(), PollOpt::edge()).unwrap(); + } + + poll.poll(&mut events, Some(Duration::from_millis(0))).unwrap(); + + for event in &events { + ready[event.token().0] = event.readiness(); + } + + // Update registration + // Set interest + for (i, &(ref r, _)) in registrations.iter().enumerate() { + r.reregister(&poll, Token(i), Ready::readable(), PollOpt::edge()).unwrap(); + } + } + + // Finall polls, repeat until readiness-queue empty + loop { + // Might not read all events from custom-event-queue at once, implementation dependend + poll.poll(&mut events, Some(Duration::from_millis(0))).unwrap(); + if events.is_empty() { + // no more events in readiness queue pending + break; + } + for event in &events { + ready[event.token().0] = event.readiness(); + } + } + + // Everything should be flagged as readable + for ready in ready { + assert_eq!(ready, Ready::readable()); + } + } + } + + #[test] + fn multi_threaded_poll() { + use std::sync::{Arc, Barrier}; + use std::sync::atomic::{AtomicUsize}; + use std::sync::atomic::Ordering::{Relaxed, SeqCst}; + use std::thread; + + const ENTRIES: usize = 10_000; + const PER_ENTRY: usize = 16; + const THREADS: usize = 4; + const NUM: usize = ENTRIES * PER_ENTRY; + + struct Entry { + #[allow(dead_code)] + registration: Registration, + set_readiness: SetReadiness, + num: AtomicUsize, + } + + impl Entry { + fn fire(&self) { + self.set_readiness.set_readiness(Ready::readable()).unwrap(); + } + } + + let poll = Arc::new(Poll::new().unwrap()); + let mut entries = vec![]; + + // Create entries + for i in 0..ENTRIES { + let (registration, set_readiness) = Registration::new2(); + registration.register(&poll, Token(i), Ready::readable(), PollOpt::edge()).unwrap(); + + entries.push(Entry { + registration: registration, + set_readiness: set_readiness, + num: AtomicUsize::new(0), + }); + } + + let total = Arc::new(AtomicUsize::new(0)); + let entries = Arc::new(entries); + let barrier = Arc::new(Barrier::new(THREADS)); + + let mut threads = vec![]; + + for th in 0..THREADS { + let poll = poll.clone(); + let total = total.clone(); + let entries = entries.clone(); + let barrier = barrier.clone(); + + threads.push(thread::spawn(move || { + let mut events = Events::with_capacity(128); + + barrier.wait(); + + // Prime all the registrations + let mut i = th; + while i < ENTRIES { + entries[i].fire(); + i += THREADS; + } + + let mut n = 0; + + + while total.load(SeqCst) < NUM { + // A poll timeout is necessary here because there may be more + // than one threads blocked in `poll` when the final wakeup + // notification arrives (and only notifies one thread). + n += poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + + let mut num_this_tick = 0; + + for event in &events { + let e = &entries[event.token().0]; + + let mut num = e.num.load(Relaxed); + + loop { + if num < PER_ENTRY { + let actual = e.num.compare_and_swap(num, num + 1, Relaxed); + + if actual == num { + num_this_tick += 1; + e.fire(); + break; + } + + num = actual; + } else { + break; + } + } + } + + total.fetch_add(num_this_tick, SeqCst); + } + + n + })); + } + + let _: Vec<_> = threads.into_iter() + .map(|th| th.join().unwrap()) + .collect(); + + for entry in entries.iter() { + assert_eq!(PER_ENTRY, entry.num.load(Relaxed)); + } + } + + #[test] + fn with_small_events_collection() { + const N: usize = 8; + const ITER: usize = 1_000; + + use std::sync::{Arc, Barrier}; + use std::sync::atomic::AtomicBool; + use std::sync::atomic::Ordering::{Acquire, Release}; + use std::thread; + + let poll = Poll::new().unwrap(); + let mut registrations = vec![]; + + let barrier = Arc::new(Barrier::new(N + 1)); + let done = Arc::new(AtomicBool::new(false)); + + for i in 0..N { + let (registration, set_readiness) = Registration::new2(); + poll.register(®istration, Token(i), Ready::readable(), PollOpt::edge()).unwrap(); + + registrations.push(registration); + + let barrier = barrier.clone(); + let done = done.clone(); + + thread::spawn(move || { + barrier.wait(); + + while !done.load(Acquire) { + set_readiness.set_readiness(Ready::readable()).unwrap(); + } + + // Set one last time + set_readiness.set_readiness(Ready::readable()).unwrap(); + }); + } + + let mut events = Events::with_capacity(4); + + barrier.wait(); + + for _ in 0..ITER { + poll.poll(&mut events, None).unwrap(); + } + + done.store(true, Release); + + let mut final_ready = vec![false; N]; + + + for _ in 0..5 { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + final_ready[event.token().0] = true; + } + + if final_ready.iter().all(|v| *v) { + return; + } + + thread::sleep(Duration::from_millis(10)); + } + + panic!("dead lock?"); + } +} + +#[test] +fn drop_registration_from_non_main_thread() { + use std::thread; + use std::sync::mpsc::channel; + + const THREADS: usize = 8; + const ITERS: usize = 50_000; + + let mut poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut senders = Vec::with_capacity(THREADS); + let mut token_index = 0; + + // spawn threads, which will send messages to single receiver + for _ in 0..THREADS { + let (tx, rx) = channel::<(Registration, SetReadiness)>(); + senders.push(tx); + + thread::spawn(move || { + for (registration, set_readiness) in rx { + let _ = set_readiness.set_readiness(Ready::readable()); + drop(registration); + drop(set_readiness); + } + }); + } + + let mut index: usize = 0; + for _ in 0..ITERS { + let (registration, set_readiness) = Registration::new2(); + registration.register(&mut poll, Token(token_index), Ready::readable(), PollOpt::edge()).unwrap(); + let _ = senders[index].send((registration, set_readiness)); + + token_index += 1; + index += 1; + if index == THREADS { + index = 0; + + let (registration, set_readiness) = Registration::new2(); + registration.register(&mut poll, Token(token_index), Ready::readable(), PollOpt::edge()).unwrap(); + let _ = set_readiness.set_readiness(Ready::readable()); + drop(registration); + drop(set_readiness); + token_index += 1; + + thread::park_timeout(Duration::from_millis(0)); + let _ = poll.poll(&mut events, None).unwrap(); + } + } +} diff --git a/third_party/rust/mio/test/test_double_register.rs b/third_party/rust/mio/test/test_double_register.rs new file mode 100644 index 000000000000..c3d011c81ed9 --- /dev/null +++ b/third_party/rust/mio/test/test_double_register.rs @@ -0,0 +1,17 @@ +//! A smoke test for windows compatibility + +#[test] +#[cfg(any(target_os = "linux", target_os = "windows"))] +pub fn test_double_register() { + use mio::*; + use mio::net::TcpListener; + + let poll = Poll::new().unwrap(); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + assert!(poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).is_err()); +} diff --git a/third_party/rust/mio/test/test_echo_server.rs b/third_party/rust/mio/test/test_echo_server.rs new file mode 100644 index 000000000000..c0eda942ba32 --- /dev/null +++ b/third_party/rust/mio/test/test_echo_server.rs @@ -0,0 +1,303 @@ +use {localhost, TryRead, TryWrite}; +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::net::{TcpListener, TcpStream}; +use bytes::{Buf, ByteBuf, MutByteBuf, SliceBuf}; +use slab::Slab; +use std::io; + +const SERVER: Token = Token(10_000_000); +const CLIENT: Token = Token(10_000_001); + +struct EchoConn { + sock: TcpStream, + buf: Option, + mut_buf: Option, + token: Option, + interest: Ready +} + +impl EchoConn { + fn new(sock: TcpStream) -> EchoConn { + EchoConn { + sock: sock, + buf: None, + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: None, + interest: Ready::empty(), + } + } + + fn writable(&mut self, poll: &mut Poll) -> io::Result<()> { + let mut buf = self.buf.take().unwrap(); + + match self.sock.try_write_buf(&mut buf) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + + self.buf = Some(buf); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CONN : we wrote {} bytes!", r); + + self.mut_buf = Some(buf.flip()); + + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e), + } + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + poll.reregister(&self.sock, self.token.unwrap(), self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } + + fn readable(&mut self, poll: &mut Poll) -> io::Result<()> { + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CONN : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + debug!("CONN : we read {} bytes!", r); + + // prepare to provide this to writable + self.buf = Some(buf.flip()); + + self.interest.remove(Ready::readable()); + self.interest.insert(Ready::writable()); + } + Err(e) => { + debug!("not implemented; client err={:?}", e); + self.interest.remove(Ready::readable()); + } + + }; + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + poll.reregister(&self.sock, self.token.unwrap(), self.interest, + PollOpt::edge()) + } +} + +struct EchoServer { + sock: TcpListener, + conns: Slab +} + +impl EchoServer { + fn accept(&mut self, poll: &mut Poll) -> io::Result<()> { + debug!("server accepting socket"); + + let sock = self.sock.accept().unwrap().0; + let conn = EchoConn::new(sock,); + let tok = self.conns.insert(conn); + + // Register the connection + self.conns[tok].token = Some(Token(tok)); + poll.register(&self.conns[tok].sock, Token(tok), Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()) + .ok().expect("could not register socket with event loop"); + + Ok(()) + } + + fn conn_readable(&mut self, poll: &mut Poll, + tok: Token) -> io::Result<()> { + debug!("server conn readable; tok={:?}", tok); + self.conn(tok).readable(poll) + } + + fn conn_writable(&mut self, poll: &mut Poll, + tok: Token) -> io::Result<()> { + debug!("server conn writable; tok={:?}", tok); + self.conn(tok).writable(poll) + } + + fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn { + &mut self.conns[tok.into()] + } +} + +struct EchoClient { + sock: TcpStream, + msgs: Vec<&'static str>, + tx: SliceBuf<'static>, + rx: SliceBuf<'static>, + mut_buf: Option, + token: Token, + interest: Ready, + shutdown: bool, +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl EchoClient { + fn new(sock: TcpStream, tok: Token, mut msgs: Vec<&'static str>) -> EchoClient { + let curr = msgs.remove(0); + + EchoClient { + sock: sock, + msgs: msgs, + tx: SliceBuf::wrap(curr.as_bytes()), + rx: SliceBuf::wrap(curr.as_bytes()), + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: tok, + interest: Ready::empty(), + shutdown: false, + } + } + + fn readable(&mut self, poll: &mut Poll) -> io::Result<()> { + debug!("client socket readable"); + + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CLIENT : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + debug!("CLIENT : We read {} bytes!", r); + + // prepare for reading + let mut buf = buf.flip(); + + while buf.has_remaining() { + let actual = buf.read_byte().unwrap(); + let expect = self.rx.read_byte().unwrap(); + + assert!(actual == expect, "actual={}; expect={}", actual, expect); + } + + self.mut_buf = Some(buf.flip()); + + self.interest.remove(Ready::readable()); + + if !self.rx.has_remaining() { + self.next_msg(poll).unwrap(); + } + } + Err(e) => { + panic!("not implemented; client err={:?}", e); + } + }; + + if !self.interest.is_empty() { + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + poll.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot())?; + } + + Ok(()) + } + + fn writable(&mut self, poll: &mut Poll) -> io::Result<()> { + debug!("client socket writable"); + + match self.sock.try_write_buf(&mut self.tx) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CLIENT : we wrote {} bytes!", r); + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e) + } + + if self.interest.is_readable() || self.interest.is_writable() { + try!(poll.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot())); + } + + Ok(()) + } + + fn next_msg(&mut self, poll: &mut Poll) -> io::Result<()> { + if self.msgs.is_empty() { + self.shutdown = true; + return Ok(()); + } + + let curr = self.msgs.remove(0); + + debug!("client prepping next message"); + self.tx = SliceBuf::wrap(curr.as_bytes()); + self.rx = SliceBuf::wrap(curr.as_bytes()); + + self.interest.insert(Ready::writable()); + poll.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct Echo { + server: EchoServer, + client: EchoClient, +} + +impl Echo { + fn new(srv: TcpListener, client: TcpStream, msgs: Vec<&'static str>) -> Echo { + Echo { + server: EchoServer { + sock: srv, + conns: Slab::with_capacity(128) + }, + client: EchoClient::new(client, CLIENT, msgs) + } + } +} + +#[test] +pub fn test_echo_server() { + debug!("Starting TEST_ECHO_SERVER"); + let mut poll = Poll::new().unwrap(); + + let addr = localhost(); + let srv = TcpListener::bind(&addr).unwrap(); + + info!("listen for connections"); + poll.register(&srv, SERVER, Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let sock = TcpStream::connect(&addr).unwrap(); + + // Connect to the server + poll.register(&sock, CLIENT, Ready::writable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + // == Create storage for events + let mut events = Events::with_capacity(1024); + + let mut handler = Echo::new(srv, sock, vec!["foo", "bar"]); + + // Start the event loop + while !handler.client.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + debug!("ready {:?} {:?}", event.token(), event.readiness()); + if event.readiness().is_readable() { + match event.token() { + SERVER => handler.server.accept(&mut poll).unwrap(), + CLIENT => handler.client.readable(&mut poll).unwrap(), + i => handler.server.conn_readable(&mut poll, i).unwrap() + } + } + + if event.readiness().is_writable() { + match event.token() { + SERVER => panic!("received writable for token 0"), + CLIENT => handler.client.writable(&mut poll).unwrap(), + i => handler.server.conn_writable(&mut poll, i).unwrap() + }; + } + } + } +} diff --git a/third_party/rust/mio/test/test_fuchsia_handles.rs b/third_party/rust/mio/test/test_fuchsia_handles.rs new file mode 100644 index 000000000000..85a14327f927 --- /dev/null +++ b/third_party/rust/mio/test/test_fuchsia_handles.rs @@ -0,0 +1,30 @@ +use mio::*; +use mio::fuchsia::EventedHandle; +use zircon::{self, AsHandleRef}; +use std::time::Duration; + +const MS: u64 = 1_000; + +#[test] +pub fn test_fuchsia_channel() { + let poll = Poll::new().unwrap(); + let mut event_buffer = Events::with_capacity(1); + let event_buffer = &mut event_buffer; + + let (channel0, channel1) = zircon::Channel::create(zircon::ChannelOpts::Normal).unwrap(); + let channel1_evented = unsafe { EventedHandle::new(channel1.raw_handle()) }; + + poll.register(&channel1_evented, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + poll.poll(event_buffer, Some(Duration::from_millis(MS))).unwrap(); + assert_eq!(event_buffer.len(), 0); + + channel0.write(&[1, 2, 3], &mut vec![], 0).unwrap(); + + poll.poll(event_buffer, Some(Duration::from_millis(MS))).unwrap(); + let event = event_buffer.get(0).unwrap(); + assert_eq!(event.token(), Token(1)); + assert!(event.readiness().is_readable()); + + poll.deregister(&channel1_evented).unwrap(); +} \ No newline at end of file diff --git a/third_party/rust/mio/test/test_local_addr_ready.rs b/third_party/rust/mio/test/test_local_addr_ready.rs new file mode 100644 index 000000000000..2e97f5244950 --- /dev/null +++ b/third_party/rust/mio/test/test_local_addr_ready.rs @@ -0,0 +1,67 @@ +use {TryWrite}; +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::net::{TcpListener, TcpStream}; + +const LISTEN: Token = Token(0); +const CLIENT: Token = Token(1); +const SERVER: Token = Token(2); + +struct MyHandler { + listener: TcpListener, + connected: TcpStream, + accepted: Option, + shutdown: bool, +} + +#[test] +fn local_addr_ready() { + let addr = "127.0.0.1:0".parse().unwrap(); + let server = TcpListener::bind(&addr).unwrap(); + let addr = server.local_addr().unwrap(); + + let poll = Poll::new().unwrap(); + poll.register(&server, LISTEN, Ready::readable(), + PollOpt::edge()).unwrap(); + + let sock = TcpStream::connect(&addr).unwrap(); + poll.register(&sock, CLIENT, Ready::readable(), + PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + + let mut handler = MyHandler { + listener: server, + connected: sock, + accepted: None, + shutdown: false, + }; + + while !handler.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + match event.token() { + LISTEN => { + let sock = handler.listener.accept().unwrap().0; + poll.register(&sock, + SERVER, + Ready::writable(), + PollOpt::edge()).unwrap(); + handler.accepted = Some(sock); + } + SERVER => { + handler.accepted.as_ref().unwrap().peer_addr().unwrap(); + handler.accepted.as_ref().unwrap().local_addr().unwrap(); + handler.accepted.as_mut().unwrap().try_write(&[1, 2, 3]).unwrap(); + handler.accepted = None; + } + CLIENT => { + handler.connected.peer_addr().unwrap(); + handler.connected.local_addr().unwrap(); + handler.shutdown = true; + } + _ => panic!("unexpected token"), + } + } + } +} diff --git a/third_party/rust/mio/test/test_multicast.rs b/third_party/rust/mio/test/test_multicast.rs new file mode 100644 index 000000000000..09efcbd7adbd --- /dev/null +++ b/third_party/rust/mio/test/test_multicast.rs @@ -0,0 +1,113 @@ +// TODO: This doesn't pass on android 64bit CI... +// Figure out why! +#![cfg(not(target_os = "android"))] + +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::net::UdpSocket; +use bytes::{Buf, MutBuf, RingBuf, SliceBuf}; +use std::str; +use std::net::IpAddr; +use localhost; + +const LISTENER: Token = Token(0); +const SENDER: Token = Token(1); + +pub struct UdpHandler { + tx: UdpSocket, + rx: UdpSocket, + msg: &'static str, + buf: SliceBuf<'static>, + rx_buf: RingBuf, + localhost: IpAddr, + shutdown: bool, +} + +impl UdpHandler { + fn new(tx: UdpSocket, rx: UdpSocket, msg: &'static str) -> UdpHandler { + let sock = UdpSocket::bind(&"127.0.0.1:12345".parse().unwrap()).unwrap(); + UdpHandler { + tx: tx, + rx: rx, + msg: msg, + buf: SliceBuf::wrap(msg.as_bytes()), + rx_buf: RingBuf::new(1024), + localhost: sock.local_addr().unwrap().ip(), + shutdown: false, + } + } + + fn handle_read(&mut self, _: &mut Poll, token: Token, _: Ready) { + match token { + LISTENER => { + debug!("We are receiving a datagram now..."); + match unsafe { self.rx.recv_from(self.rx_buf.mut_bytes()) } { + Ok((cnt, addr)) => { + unsafe { MutBuf::advance(&mut self.rx_buf, cnt); } + assert_eq!(addr.ip(), self.localhost); + } + res => panic!("unexpected result: {:?}", res), + } + assert!(str::from_utf8(self.rx_buf.bytes()).unwrap() == self.msg); + self.shutdown = true; + }, + _ => () + } + } + + fn handle_write(&mut self, _: &mut Poll, token: Token, _: Ready) { + match token { + SENDER => { + let addr = self.rx.local_addr().unwrap(); + let cnt = self.tx.send_to(self.buf.bytes(), &addr).unwrap(); + self.buf.advance(cnt); + }, + _ => () + } + } +} + +#[test] +pub fn test_multicast() { + drop(::env_logger::init()); + debug!("Starting TEST_UDP_CONNECTIONLESS"); + let mut poll = Poll::new().unwrap(); + + let addr = localhost(); + let any = "0.0.0.0:0".parse().unwrap(); + + let tx = UdpSocket::bind(&any).unwrap(); + let rx = UdpSocket::bind(&addr).unwrap(); + + info!("Joining group 227.1.1.100"); + let any = "0.0.0.0".parse().unwrap(); + rx.join_multicast_v4(&"227.1.1.100".parse().unwrap(), &any).unwrap(); + + info!("Joining group 227.1.1.101"); + rx.join_multicast_v4(&"227.1.1.101".parse().unwrap(), &any).unwrap(); + + info!("Registering SENDER"); + poll.register(&tx, SENDER, Ready::writable(), PollOpt::edge()).unwrap(); + + info!("Registering LISTENER"); + poll.register(&rx, LISTENER, Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + + let mut handler = UdpHandler::new(tx, rx, "hello world"); + + info!("Starting event loop to test with..."); + + while !handler.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.readiness().is_readable() { + handler.handle_read(&mut poll, event.token(), event.readiness()); + } + + if event.readiness().is_writable() { + handler.handle_write(&mut poll, event.token(), event.readiness()); + } + } + } +} diff --git a/third_party/rust/mio/test/test_notify.rs b/third_party/rust/mio/test/test_notify.rs new file mode 100644 index 000000000000..c2b36b5cda67 --- /dev/null +++ b/third_party/rust/mio/test/test_notify.rs @@ -0,0 +1,192 @@ +use {localhost, sleep_ms}; +use mio::*; +use mio::deprecated::{EventLoop, EventLoopBuilder, Handler, Sender, NotifyError}; +use mio::net::TcpListener; +use std::thread; + +struct TestHandler { + sender: Sender, + notify: usize +} + +impl TestHandler { + fn new(sender: Sender) -> TestHandler { + TestHandler { + sender: sender, + notify: 0 + } + } +} + +impl Handler for TestHandler { + type Timeout = usize; + type Message = String; + + fn notify(&mut self, event_loop: &mut EventLoop, msg: String) { + match self.notify { + 0 => { + assert!(msg == "First", "actual={}", msg); + self.sender.send("Second".to_string()).unwrap(); + } + 1 => { + assert!(msg == "Second", "actual={}", msg); + event_loop.shutdown(); + } + v => panic!("unexpected value for notify; val={}", v) + } + + self.notify += 1; + } +} + +#[test] +pub fn test_notify() { + debug!("Starting TEST_NOTIFY"); + let mut event_loop = EventLoop::new().unwrap(); + + let addr = localhost(); + + // Setup a server socket so that the event loop blocks + let srv = TcpListener::bind(&addr).unwrap(); + + event_loop.register(&srv, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let sender = event_loop.channel(); + + thread::spawn(move || { + sleep_ms(1_000); + sender.send("First".to_string()).unwrap(); + }); + + let sender = event_loop.channel(); + let mut handler = TestHandler::new(sender); + + // Start the event loop + event_loop.run(&mut handler).unwrap(); + + assert!(handler.notify == 2, "actual={}", handler.notify); +} + +#[test] +pub fn test_notify_capacity() { + use std::sync::mpsc::*; + use std::thread; + + struct Capacity(Receiver); + + impl Handler for Capacity { + type Message = i32; + type Timeout = (); + + fn notify(&mut self, event_loop: &mut EventLoop, msg: i32) { + if msg == 1 { + self.0.recv().unwrap(); + } else if msg == 3 { + event_loop.shutdown(); + } + } + } + + let mut builder = EventLoopBuilder::new(); + builder.notify_capacity(1); + + let (tx, rx) = channel::(); + let mut event_loop = builder.build().unwrap(); + let notify = event_loop.channel(); + + let handle = thread::spawn(move || { + let mut handler = Capacity(rx); + event_loop.run(&mut handler).unwrap(); + }); + + assert!(notify.send(1).is_ok()); + + loop { + if notify.send(2).is_err() { + break; + } + } + + tx.send(1).unwrap(); + + loop { + if notify.send(3).is_ok() { + break; + } + } + + handle.join().unwrap(); +} + +#[test] +pub fn test_notify_drop() { + use std::sync::mpsc::{self,Sender}; + use std::thread; + + struct MessageDrop(Sender); + + impl Drop for MessageDrop { + fn drop(&mut self) { + self.0.send(0).unwrap(); + } + } + + struct DummyHandler; + + impl Handler for DummyHandler { + type Timeout = (); + type Message = MessageDrop; + + fn notify(&mut self, event_loop: &mut EventLoop, msg: MessageDrop) { + msg.0.send(1).unwrap(); + drop(msg); + // We stop after the first message + event_loop.shutdown(); + } + } + + let (tx_notif_1, rx_notif_1) = mpsc::channel(); + let (tx_notif_2, rx_notif_2) = mpsc::channel(); + let (tx_notif_3, _unused) = mpsc::channel(); + let (tx_exit_loop, rx_exit_loop) = mpsc::channel(); + let (tx_drop_loop, rx_drop_loop) = mpsc::channel(); + + let mut event_loop = EventLoop::new().unwrap(); + let notify = event_loop.channel(); + + let handle = thread::spawn(move || { + let mut handler = DummyHandler; + event_loop.run(&mut handler).unwrap(); + + // Confirmation we exited the loop + tx_exit_loop.send(()).unwrap(); + + // Order to drop the loop + rx_drop_loop.recv().unwrap(); + drop(event_loop); + }); + notify.send(MessageDrop(tx_notif_1)).unwrap(); + assert_eq!(rx_notif_1.recv().unwrap(), 1); // Response from the loop + assert_eq!(rx_notif_1.recv().unwrap(), 0); // Drop notification + + // We wait for the event loop to exit before sending the second notification + rx_exit_loop.recv().unwrap(); + notify.send(MessageDrop(tx_notif_2)).unwrap(); + + // We ensure the message is indeed stuck in the queue + sleep_ms(100); + assert!(rx_notif_2.try_recv().is_err()); + + // Give the order to drop the event loop + tx_drop_loop.send(()).unwrap(); + assert_eq!(rx_notif_2.recv().unwrap(), 0); // Drop notification + + // Check that sending a new notification will return an error + // We should also get our message back + match notify.send(MessageDrop(tx_notif_3)).unwrap_err() { + NotifyError::Closed(Some(..)) => {} + _ => panic!(), + } + + handle.join().unwrap(); +} diff --git a/third_party/rust/mio/test/test_oneshot.rs b/third_party/rust/mio/test/test_oneshot.rs new file mode 100644 index 000000000000..4dca219b7300 --- /dev/null +++ b/third_party/rust/mio/test/test_oneshot.rs @@ -0,0 +1,64 @@ +use mio::*; +use mio::net::{TcpListener, TcpStream}; +use std::io::*; +use std::time::Duration; + +const MS: u64 = 1_000; + +#[test] +pub fn test_tcp_edge_oneshot() { + let _ = ::env_logger::init(); + + let mut poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::level()).unwrap(); + + // Connect a socket, we are going to write to it + let mut s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + poll.register(&s1, Token(1), Ready::writable(), PollOpt::level()).unwrap(); + + wait_for(&mut poll, &mut events, Token(0)); + + // Get pair + let (mut s2, _) = l.accept().unwrap(); + poll.register(&s2, Token(2), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + wait_for(&mut poll, &mut events, Token(1)); + + let res = s1.write(b"foo").unwrap(); + assert_eq!(3, res); + + let mut buf = [0; 1]; + + for byte in b"foo" { + wait_for(&mut poll, &mut events, Token(2)); + + assert_eq!(1, s2.read(&mut buf).unwrap()); + assert_eq!(*byte, buf[0]); + + poll.reregister(&s2, Token(2), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + if *byte == b'o' { + poll.reregister(&s2, Token(2), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + } + } +} + +fn wait_for(poll: &mut Poll, events: &mut Events, token: Token) { + loop { + poll.poll(events, Some(Duration::from_millis(MS))).unwrap(); + + let cnt = (0..events.len()).map(|i| events.get(i).unwrap()) + .filter(|e| e.token() == token) + .count(); + + assert!(cnt < 2, "token appeared multiple times in poll results; cnt={:}", cnt); + + if cnt == 1 { return }; + } +} diff --git a/third_party/rust/mio/test/test_poll.rs b/third_party/rust/mio/test/test_poll.rs new file mode 100644 index 000000000000..e259d89e2413 --- /dev/null +++ b/third_party/rust/mio/test/test_poll.rs @@ -0,0 +1,18 @@ +use mio::*; +use std::time::Duration; + +#[test] +fn test_poll_closes_fd() { + for _ in 0..2000 { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(4); + let (registration, set_readiness) = Registration::new2(); + + poll.register(®istration, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + poll.poll(&mut events, Some(Duration::from_millis(0))).unwrap(); + + drop(poll); + drop(set_readiness); + drop(registration); + } +} diff --git a/third_party/rust/mio/test/test_poll_channel.rs b/third_party/rust/mio/test/test_poll_channel.rs new file mode 100644 index 000000000000..f7ce050537aa --- /dev/null +++ b/third_party/rust/mio/test/test_poll_channel.rs @@ -0,0 +1,285 @@ +use {expect_events, sleep_ms}; +use mio::{channel, Events, Poll, PollOpt, Ready, Token}; +use mio::event::Event; +use std::sync::mpsc::TryRecvError; +use std::thread; +use std::time::Duration; + +#[test] +pub fn test_poll_channel_edge() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + poll.register(&rx, Token(123), Ready::readable(), PollOpt::edge()).unwrap(); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push the value + tx.send("hello").unwrap(); + + // Polling will contain the event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(1, num); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + + // Poll again and there should be no events + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Read the value + assert_eq!("hello", rx.try_recv().unwrap()); + + // Poll again, nothing + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push a value + tx.send("goodbye").unwrap(); + + // Have an event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(1, num); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + + // Read the value + rx.try_recv().unwrap(); + + // Drop the sender half + drop(tx); + + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(1, num); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + + match rx.try_recv() { + Err(TryRecvError::Disconnected) => {} + no => panic!("unexpected value {:?}", no), + } + +} + +#[test] +pub fn test_poll_channel_oneshot() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + poll.register(&rx, Token(123), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push the value + tx.send("hello").unwrap(); + + // Polling will contain the event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(1, num); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + + // Poll again and there should be no events + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Read the value + assert_eq!("hello", rx.try_recv().unwrap()); + + // Poll again, nothing + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push a value + tx.send("goodbye").unwrap(); + + // Poll again, nothing + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Reregistering will re-trigger the notification + for _ in 0..3 { + poll.reregister(&rx, Token(123), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Have an event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(1, num); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + } + + // Get the value + assert_eq!("goodbye", rx.try_recv().unwrap()); + + poll.reregister(&rx, Token(123), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Have an event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + poll.reregister(&rx, Token(123), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Have an event + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); +} + +#[test] +pub fn test_poll_channel_level() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + poll.register(&rx, Token(123), Ready::readable(), PollOpt::level()).unwrap(); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push the value + tx.send("hello").unwrap(); + + // Polling will contain the event + for i in 0..5 { + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert!(1 == num, "actually got {} on iteration {}", num, i); + + let event = events.get(0).unwrap(); + assert_eq!(event.token(), Token(123)); + assert_eq!(event.readiness(), Ready::readable()); + } + + // Read the value + assert_eq!("hello", rx.try_recv().unwrap()); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); +} + +#[test] +pub fn test_poll_channel_writable() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + poll.register(&rx, Token(123), Ready::writable(), PollOpt::edge()).unwrap(); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); + + // Push the value + tx.send("hello").unwrap(); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); +} + +#[test] +pub fn test_dropping_receive_before_poll() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + poll.register(&rx, Token(123), Ready::readable(), PollOpt::edge()).unwrap(); + + // Push the value + tx.send("hello").unwrap(); + + // Drop the receive end + drop(rx); + + // Wait, but nothing should happen + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(0, num); +} + +#[test] +pub fn test_mixing_channel_with_socket() { + use mio::net::{TcpListener, TcpStream}; + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let (tx, rx) = channel::channel(); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + poll.register(&rx, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + // Push a value onto the channel + tx.send("hello").unwrap(); + + // Connect a TCP socket + let s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + + // Register the socket + poll.register(&s1, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); + + // Sleep a bit to ensure it arrives at dest + sleep_ms(250); + + expect_events(&poll, &mut events, 2, vec![ + Event::new(Ready::empty(), Token(0)), + Event::new(Ready::empty(), Token(1)), + ]); +} + +#[test] +pub fn test_sending_from_other_thread_while_polling() { + const ITERATIONS: usize = 20; + const THREADS: usize = 5; + + // Make sure to run multiple times + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + + for _ in 0..ITERATIONS { + let (tx, rx) = channel::channel(); + poll.register(&rx, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + for _ in 0..THREADS { + let tx = tx.clone(); + + thread::spawn(move || { + sleep_ms(50); + tx.send("ping").unwrap(); + }); + } + + let mut recv = 0; + + while recv < THREADS { + let num = poll.poll(&mut events, None).unwrap(); + + if num != 0 { + assert_eq!(1, num); + assert_eq!(events.get(0).unwrap().token(), Token(0)); + + while let Ok(_) = rx.try_recv() { + recv += 1; + } + } + } + } +} diff --git a/third_party/rust/mio/test/test_register_deregister.rs b/third_party/rust/mio/test/test_register_deregister.rs new file mode 100644 index 000000000000..a82698f9152e --- /dev/null +++ b/third_party/rust/mio/test/test_register_deregister.rs @@ -0,0 +1,123 @@ +use {expect_events, localhost, TryWrite}; +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::event::Event; +use mio::net::{TcpListener, TcpStream}; +use bytes::SliceBuf; +use std::time::Duration; + +const SERVER: Token = Token(0); +const CLIENT: Token = Token(1); + +struct TestHandler { + server: TcpListener, + client: TcpStream, + state: usize, +} + +impl TestHandler { + fn new(srv: TcpListener, cli: TcpStream) -> TestHandler { + TestHandler { + server: srv, + client: cli, + state: 0, + } + } + + fn handle_read(&mut self, poll: &mut Poll, token: Token) { + match token { + SERVER => { + trace!("handle_read; token=SERVER"); + let mut sock = self.server.accept().unwrap().0; + sock.try_write_buf(&mut SliceBuf::wrap("foobar".as_bytes())).unwrap(); + } + CLIENT => { + trace!("handle_read; token=CLIENT"); + assert!(self.state == 0, "unexpected state {}", self.state); + self.state = 1; + poll.reregister(&self.client, CLIENT, Ready::writable(), PollOpt::level()).unwrap(); + } + _ => panic!("unexpected token"), + } + } + + fn handle_write(&mut self, poll: &mut Poll, token: Token) { + debug!("handle_write; token={:?}; state={:?}", token, self.state); + + assert!(token == CLIENT, "unexpected token {:?}", token); + assert!(self.state == 1, "unexpected state {}", self.state); + + self.state = 2; + poll.deregister(&self.client).unwrap(); + poll.deregister(&self.server).unwrap(); + } +} + +#[test] +pub fn test_register_deregister() { + let _ = ::env_logger::init(); + + debug!("Starting TEST_REGISTER_DEREGISTER"); + let mut poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + + let addr = localhost(); + + let server = TcpListener::bind(&addr).unwrap(); + + info!("register server socket"); + poll.register(&server, SERVER, Ready::readable(), PollOpt::edge()).unwrap(); + + let client = TcpStream::connect(&addr).unwrap(); + + // Register client socket only as writable + poll.register(&client, CLIENT, Ready::readable(), PollOpt::level()).unwrap(); + + let mut handler = TestHandler::new(server, client); + + loop { + poll.poll(&mut events, None).unwrap(); + + if let Some(event) = events.get(0) { + if event.readiness().is_readable() { + handler.handle_read(&mut poll, event.token()); + } + + if event.readiness().is_writable() { + handler.handle_write(&mut poll, event.token()); + break; + } + } + } + + poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + assert_eq!(events.len(), 0); +} + +#[test] +pub fn test_register_empty_interest() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let addr = localhost(); + + let sock = TcpListener::bind(&addr).unwrap(); + + poll.register(&sock, Token(0), Ready::empty(), PollOpt::edge()).unwrap(); + + let client = TcpStream::connect(&addr).unwrap(); + + // The connect is not guaranteed to have started until it is registered + // https://docs.rs/mio/0.6.10/mio/struct.Poll.html#registering-handles + poll.register(&client, Token(1), Ready::empty(), PollOpt::edge()).unwrap(); + + // sock is registered with empty interest, we should not receive any event + poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + assert_eq!(events.len(), 0, "Received unexpected event: {:?}", events.get(0).unwrap()); + + // now sock is reregistered with readable, we should receive the pending event + poll.reregister(&sock, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + expect_events(&poll, &mut events, 2, vec![ + Event::new(Ready::readable(), Token(0)) + ]); + + poll.reregister(&sock, Token(0), Ready::empty(), PollOpt::edge()).unwrap(); +} diff --git a/third_party/rust/mio/test/test_register_multiple_event_loops.rs b/third_party/rust/mio/test/test_register_multiple_event_loops.rs new file mode 100644 index 000000000000..9204afaf688e --- /dev/null +++ b/third_party/rust/mio/test/test_register_multiple_event_loops.rs @@ -0,0 +1,63 @@ +use localhost; +use mio::*; +use mio::net::{TcpListener, TcpStream, UdpSocket}; +use std::io::ErrorKind; + +#[test] +fn test_tcp_register_multiple_event_loops() { + let addr = localhost(); + let listener = TcpListener::bind(&addr).unwrap(); + + let poll1 = Poll::new().unwrap(); + poll1.register(&listener, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let poll2 = Poll::new().unwrap(); + + // Try registering the same socket with the initial one + let res = poll2.register(&listener, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); + + // Try cloning the socket and registering it again + let listener2 = listener.try_clone().unwrap(); + let res = poll2.register(&listener2, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); + + // Try the stream + let stream = TcpStream::connect(&addr).unwrap(); + + poll1.register(&stream, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let res = poll2.register(&stream, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); + + // Try cloning the socket and registering it again + let stream2 = stream.try_clone().unwrap(); + let res = poll2.register(&stream2, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); +} + +#[test] +fn test_udp_register_multiple_event_loops() { + let addr = localhost(); + let socket = UdpSocket::bind(&addr).unwrap(); + + let poll1 = Poll::new().unwrap(); + poll1.register(&socket, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let poll2 = Poll::new().unwrap(); + + // Try registering the same socket with the initial one + let res = poll2.register(&socket, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); + + // Try cloning the socket and registering it again + let socket2 = socket.try_clone().unwrap(); + let res = poll2.register(&socket2, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()); + assert!(res.is_err()); + assert_eq!(res.unwrap_err().kind(), ErrorKind::Other); +} diff --git a/third_party/rust/mio/test/test_reregister_without_poll.rs b/third_party/rust/mio/test/test_reregister_without_poll.rs new file mode 100644 index 000000000000..45d5aca49cd5 --- /dev/null +++ b/third_party/rust/mio/test/test_reregister_without_poll.rs @@ -0,0 +1,28 @@ +use {sleep_ms}; +use mio::*; +use mio::net::{TcpListener, TcpStream}; +use std::time::Duration; + +const MS: u64 = 1_000; + +#[test] +pub fn test_reregister_different_without_poll() { + let mut events = Events::with_capacity(1024); + let poll = Poll::new().unwrap(); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + poll.register(&s1, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); + + sleep_ms(MS); + + poll.reregister(&l, Token(0), Ready::writable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + poll.poll(&mut events, Some(Duration::from_millis(MS))).unwrap(); + assert_eq!(events.len(), 0); +} diff --git a/third_party/rust/mio/test/test_smoke.rs b/third_party/rust/mio/test/test_smoke.rs new file mode 100644 index 000000000000..96f7d3c9e40f --- /dev/null +++ b/third_party/rust/mio/test/test_smoke.rs @@ -0,0 +1,23 @@ +extern crate mio; + +use mio::{Events, Poll, Token, Ready, PollOpt}; +use mio::net::TcpListener; +use std::time::Duration; + +#[test] +fn run_once_with_nothing() { + let mut events = Events::with_capacity(1024); + let poll = Poll::new().unwrap(); + poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); +} + +#[test] +fn add_then_drop() { + let mut events = Events::with_capacity(1024); + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let poll = Poll::new().unwrap(); + poll.register(&l, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + drop(l); + poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + +} diff --git a/third_party/rust/mio/test/test_subprocess_pipe.rs b/third_party/rust/mio/test/test_subprocess_pipe.rs new file mode 100644 index 000000000000..2bcf132486e4 --- /dev/null +++ b/third_party/rust/mio/test/test_subprocess_pipe.rs @@ -0,0 +1,249 @@ +use {TryRead, TryWrite}; +use std::mem; +use mio::*; +use std::io; +use mio::deprecated::{EventLoop, Handler}; +use mio::deprecated::unix::{PipeReader, PipeWriter}; +use std::process::{Command, Stdio, Child}; + + +struct SubprocessClient { + stdin: Option, + stdout: Option, + stderr: Option, + stdin_token : Token, + stdout_token : Token, + stderr_token : Token, + output : Vec, + output_stderr : Vec, + input : Vec, + input_offset : usize, + buf : [u8; 65536], +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl SubprocessClient { + fn new(stdin: Option, stdout : Option, stderr : Option, data : &[u8]) -> SubprocessClient { + SubprocessClient { + stdin: stdin, + stdout: stdout, + stderr: stderr, + stdin_token : Token(0), + stdout_token : Token(1), + stderr_token : Token(2), + output : Vec::::new(), + output_stderr : Vec::::new(), + buf : [0; 65536], + input : data.to_vec(), + input_offset : 0, + } + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut eof = false; + match self.stdout { + None => unreachable!(), + Some (ref mut stdout) => match stdout.try_read(&mut self.buf[..]) { + Ok(None) => { + } + Ok(Some(r)) => { + if r == 0 { + eof = true; + } else { + self.output.extend(&self.buf[0..r]); + } + } + Err(e) => { + return Err(e); + } + } + }; + if eof { + drop(self.stdout.take()); + match self.stderr { + None => event_loop.shutdown(), + Some(_) => {}, + } + } + return Ok(()); + } + + fn readable_stderr(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut eof = false; + match self.stderr { + None => unreachable!(), + Some(ref mut stderr) => match stderr.try_read(&mut self.buf[..]) { + Ok(None) => { + } + Ok(Some(r)) => { + if r == 0 { + eof = true; + } else { + self.output_stderr.extend(&self.buf[0..r]); + } + } + Err(e) => { + return Err(e); + } + } + }; + if eof { + drop(self.stderr.take()); + match self.stdout { + None => event_loop.shutdown(), + Some(_) => {}, + } + } + return Ok(()); + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut ok = true; + match self.stdin { + None => unreachable!(), + Some(ref mut stdin) => match stdin.try_write(&(&self.input)[self.input_offset..]) { + Ok(None) => { + }, + Ok(Some(r)) => { + if r == 0 { + ok = false; + } else { + self.input_offset += r; + } + }, + Err(_) => { + ok = false; + }, + } + } + if self.input_offset == self.input.len() || !ok { + drop(self.stdin.take()); + match self.stderr { + None => match self.stdout { + None => event_loop.shutdown(), + Some(_) => {}, + }, + Some(_) => {}, + } + } + return Ok(()); + } + +} + +impl Handler for SubprocessClient { + type Timeout = usize; + type Message = (); + + fn ready(&mut self, event_loop: &mut EventLoop, token: Token, + _: Ready) { + if token == self.stderr_token { + let _x = self.readable_stderr(event_loop); + } else { + let _x = self.readable(event_loop); + } + if token == self.stdin_token { + let _y = self.writable(event_loop); + } + } +} + + + + +const TEST_DATA : [u8; 1024 * 4096] = [42; 1024 * 4096]; +pub fn subprocess_communicate(mut process : Child, input : &[u8]) -> (Vec, Vec) { + let mut event_loop = EventLoop::::new().unwrap(); + let stdin : Option; + let stdin_exists : bool; + match process.stdin { + None => stdin_exists = false, + Some(_) => stdin_exists = true, + } + if stdin_exists { + match PipeWriter::from_stdin(process.stdin.take().unwrap()) { + Err(e) => panic!(e), + Ok(pipe) => stdin = Some(pipe), + } + } else { + stdin = None; + } + let stdout_exists : bool; + let stdout : Option; + match process.stdout { + None => stdout_exists = false, + Some(_) => stdout_exists = true, + } + if stdout_exists { + match PipeReader::from_stdout(process.stdout.take().unwrap()) { + Err(e) => panic!(e), + Ok(pipe) => stdout = Some(pipe), + } + } else { + stdout = None; + } + let stderr_exists : bool; + let stderr : Option; + match process.stderr { + None => stderr_exists = false, + Some(_) => stderr_exists = true, + } + if stderr_exists { + match PipeReader::from_stderr(process.stderr.take().unwrap()) { + Err(e) => panic!(e), + Ok(pipe) => stderr = Some(pipe), + } + } else { + stderr = None + } + + let mut subprocess = SubprocessClient::new(stdin, + stdout, + stderr, + input); + match subprocess.stdout { + Some(ref sub_stdout) => event_loop.register(sub_stdout, subprocess.stdout_token, Ready::readable(), + PollOpt::level()).unwrap(), + None => {}, + } + + match subprocess.stderr { + Some(ref sub_stderr) => event_loop.register(sub_stderr, subprocess.stderr_token, Ready::readable(), + PollOpt::level()).unwrap(), + None => {}, + } + + // Connect to the server + match subprocess.stdin { + Some (ref sub_stdin) => event_loop.register(sub_stdin, subprocess.stdin_token, Ready::writable(), + PollOpt::level()).unwrap(), + None => {}, + } + + // Start the event loop + event_loop.run(&mut subprocess).unwrap(); + let _ = process.wait(); + + let ret_stdout = mem::replace(&mut subprocess.output, Vec::::new()); + let ret_stderr = mem::replace(&mut subprocess.output_stderr, Vec::::new()); + return (ret_stdout, ret_stderr); +} + +#[test] +fn test_subprocess_pipe() { + let process = + Command::new("/bin/cat") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn().unwrap(); + let (ret_stdout, ret_stderr) = subprocess_communicate(process, &TEST_DATA[..]); + assert_eq!(TEST_DATA.len(), ret_stdout.len()); + assert_eq!(0usize, ret_stderr.len()); + let mut i : usize = 0; + for item in TEST_DATA.iter() { + assert_eq!(*item, ret_stdout[i]); + i += 1; + } +} diff --git a/third_party/rust/mio/test/test_tcp.rs b/third_party/rust/mio/test/test_tcp.rs new file mode 100644 index 000000000000..2587c314facb --- /dev/null +++ b/third_party/rust/mio/test/test_tcp.rs @@ -0,0 +1,660 @@ +use std::cmp; +use std::io::prelude::*; +use std::io; +use std::net; +use std::sync::mpsc::channel; +use std::thread; +use std::time::Duration; + +use net2::{self, TcpStreamExt}; + +use {TryRead, TryWrite}; +use mio::{Token, Ready, PollOpt, Poll, Events}; +use iovec::IoVec; +use mio::net::{TcpListener, TcpStream}; + +#[test] +fn accept() { + struct H { hit: bool, listener: TcpListener, shutdown: bool } + + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap(); + }); + + let poll = Poll::new().unwrap(); + + poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { hit: false, listener: l, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + h.hit = true; + assert_eq!(event.token(), Token(1)); + assert!(event.readiness().is_readable()); + assert!(h.listener.accept().is_ok()); + h.shutdown = true; + } + } + assert!(h.hit); + assert!(h.listener.accept().unwrap_err().kind() == io::ErrorKind::WouldBlock); + t.join().unwrap(); +} + +#[test] +fn connect() { + struct H { hit: u32, shutdown: bool } + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let (tx, rx) = channel(); + let (tx2, rx2) = channel(); + let t = thread::spawn(move || { + let s = l.accept().unwrap(); + rx.recv().unwrap(); + drop(s); + tx2.send(()).unwrap(); + }); + + let poll = Poll::new().unwrap(); + let s = TcpStream::connect(&addr).unwrap(); + + poll.register(&s, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { hit: 0, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + assert_eq!(event.token(), Token(1)); + match h.hit { + 0 => assert!(event.readiness().is_writable()), + 1 => assert!(event.readiness().is_readable()), + _ => panic!(), + } + h.hit += 1; + h.shutdown = true; + } + } + assert_eq!(h.hit, 1); + tx.send(()).unwrap(); + rx2.recv().unwrap(); + h.shutdown = false; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + assert_eq!(event.token(), Token(1)); + match h.hit { + 0 => assert!(event.readiness().is_writable()), + 1 => assert!(event.readiness().is_readable()), + _ => panic!(), + } + h.hit += 1; + h.shutdown = true; + } + } + assert_eq!(h.hit, 2); + t.join().unwrap(); +} + +#[test] +fn read() { + const N: usize = 16 * 1024 * 1024; + struct H { amt: usize, socket: TcpStream, shutdown: bool } + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let b = [0; 1024]; + let mut amt = 0; + while amt < N { + amt += s.write(&b).unwrap(); + } + }); + + let poll = Poll::new().unwrap(); + let s = TcpStream::connect(&addr).unwrap(); + + poll.register(&s, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { amt: 0, socket: s, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + assert_eq!(event.token(), Token(1)); + let mut b = [0; 1024]; + loop { + if let Some(amt) = h.socket.try_read(&mut b).unwrap() { + h.amt += amt; + } else { + break + } + if h.amt >= N { + h.shutdown = true; + break + } + } + } + } + t.join().unwrap(); +} + +#[test] +fn peek() { + const N: usize = 16 * 1024 * 1024; + struct H { amt: usize, socket: TcpStream, shutdown: bool } + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let b = [0; 1024]; + let mut amt = 0; + while amt < N { + amt += s.write(&b).unwrap(); + } + }); + + let poll = Poll::new().unwrap(); + let s = TcpStream::connect(&addr).unwrap(); + + poll.register(&s, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { amt: 0, socket: s, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + assert_eq!(event.token(), Token(1)); + let mut b = [0; 1024]; + match h.socket.peek(&mut b) { + Ok(_) => (), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + continue + }, + Err(e) => panic!("unexpected error: {:?}", e), + } + + loop { + if let Some(amt) = h.socket.try_read(&mut b).unwrap() { + h.amt += amt; + } else { + break + } + if h.amt >= N { + h.shutdown = true; + break + } + } + } + } + t.join().unwrap(); +} + +#[test] +fn read_bufs() { + const N: usize = 16 * 1024 * 1024; + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let b = [1; 1024]; + let mut amt = 0; + while amt < N { + amt += s.write(&b).unwrap(); + } + }); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(128); + + let s = TcpStream::connect(&addr).unwrap(); + + poll.register(&s, Token(1), Ready::readable(), PollOpt::level()).unwrap(); + + let b1 = &mut [0; 10][..]; + let b2 = &mut [0; 383][..]; + let b3 = &mut [0; 28][..]; + let b4 = &mut [0; 8][..]; + let b5 = &mut [0; 128][..]; + let mut b: [&mut IoVec; 5] = [ + b1.into(), + b2.into(), + b3.into(), + b4.into(), + b5.into(), + ]; + + let mut so_far = 0; + loop { + for buf in b.iter_mut() { + for byte in buf.as_mut_bytes() { + *byte = 0; + } + } + + poll.poll(&mut events, None).unwrap(); + + match s.read_bufs(&mut b) { + Ok(0) => { + assert_eq!(so_far, N); + break + } + Ok(mut n) => { + so_far += n; + for buf in b.iter() { + let buf = buf.as_bytes(); + for byte in buf[..cmp::min(n, buf.len())].iter() { + assert_eq!(*byte, 1); + } + n = n.saturating_sub(buf.len()); + if n == 0 { + break + } + } + assert_eq!(n, 0); + } + Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), + } + } + + t.join().unwrap(); +} + +#[test] +fn write() { + const N: usize = 16 * 1024 * 1024; + struct H { amt: usize, socket: TcpStream, shutdown: bool } + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let mut b = [0; 1024]; + let mut amt = 0; + while amt < N { + amt += s.read(&mut b).unwrap(); + } + }); + + let poll = Poll::new().unwrap(); + let s = TcpStream::connect(&addr).unwrap(); + + poll.register(&s, Token(1), Ready::writable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { amt: 0, socket: s, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + assert_eq!(event.token(), Token(1)); + let b = [0; 1024]; + loop { + if let Some(amt) = h.socket.try_write(&b).unwrap() { + h.amt += amt; + } else { + break + } + if h.amt >= N { + h.shutdown = true; + break + } + } + } + } + t.join().unwrap(); +} + +#[test] +fn write_bufs() { + const N: usize = 16 * 1024 * 1024; + + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let mut b = [0; 1024]; + let mut amt = 0; + while amt < N { + for byte in b.iter_mut() { + *byte = 0; + } + let n = s.read(&mut b).unwrap(); + amt += n; + for byte in b[..n].iter() { + assert_eq!(*byte, 1); + } + } + }); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(128); + let s = TcpStream::connect(&addr).unwrap(); + poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); + + let b1 = &[1; 10][..]; + let b2 = &[1; 383][..]; + let b3 = &[1; 28][..]; + let b4 = &[1; 8][..]; + let b5 = &[1; 128][..]; + let b: [&IoVec; 5] = [ + b1.into(), + b2.into(), + b3.into(), + b4.into(), + b5.into(), + ]; + + let mut so_far = 0; + while so_far < N { + poll.poll(&mut events, None).unwrap(); + + match s.write_bufs(&b) { + Ok(n) => so_far += n, + Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), + } + } + + t.join().unwrap(); +} + +#[test] +fn connect_then_close() { + struct H { listener: TcpListener, shutdown: bool } + + let poll = Poll::new().unwrap(); + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let s = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + + poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + poll.register(&s, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(128); + + let mut h = H { listener: l, shutdown: false }; + while !h.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.token() == Token(1) { + let s = h.listener.accept().unwrap().0; + poll.register(&s, Token(3), Ready::readable() | Ready::writable(), + PollOpt::edge()).unwrap(); + drop(s); + } else if event.token() == Token(2) { + h.shutdown = true; + } + } + } +} + +#[test] +fn listen_then_close() { + let poll = Poll::new().unwrap(); + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + drop(l); + + let mut events = Events::with_capacity(128); + + poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); + + for event in &events { + if event.token() == Token(1) { + panic!("recieved ready() on a closed TcpListener") + } + } +} + +fn assert_send() { +} + +fn assert_sync() { +} + +#[test] +fn test_tcp_sockets_are_send() { + assert_send::(); + assert_send::(); + assert_sync::(); + assert_sync::(); +} + +#[test] +fn bind_twice_bad() { + let l1 = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = l1.local_addr().unwrap(); + assert!(TcpListener::bind(&addr).is_err()); +} + +#[test] +fn multiple_writes_immediate_success() { + const N: usize = 16; + let l = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = l.local_addr().unwrap(); + + let t = thread::spawn(move || { + let mut s = l.accept().unwrap().0; + let mut b = [0; 1024]; + let mut amt = 0; + while amt < 1024*N { + for byte in b.iter_mut() { + *byte = 0; + } + let n = s.read(&mut b).unwrap(); + amt += n; + for byte in b[..n].iter() { + assert_eq!(*byte, 1); + } + } + }); + + let poll = Poll::new().unwrap(); + let mut s = TcpStream::connect(&addr).unwrap(); + poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); + let mut events = Events::with_capacity(16); + + // Wait for our TCP stream to connect + 'outer: loop { + poll.poll(&mut events, None).unwrap(); + for event in events.iter() { + if event.token() == Token(1) && event.readiness().is_writable() { + break 'outer + } + } + } + + for _ in 0..N { + s.write(&[1; 1024]).unwrap(); + } + + t.join().unwrap(); +} + +#[test] +fn connection_reset_by_peer() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(16); + let mut buf = [0u8; 16]; + + // Create listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = l.local_addr().unwrap(); + + // Connect client + let client = net2::TcpBuilder::new_v4().unwrap() + .to_tcp_stream().unwrap(); + + client.set_linger(Some(Duration::from_millis(0))).unwrap(); + client.connect(&addr).unwrap(); + + // Convert to Mio stream + let client = TcpStream::from_stream(client).unwrap(); + + // Register server + poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + // Register interest in the client + poll.register(&client, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + // Wait for listener to be ready + let mut server; + 'outer: + loop { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.token() == Token(0) { + match l.accept() { + Ok((sock, _)) => { + server = sock; + break 'outer; + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} + Err(e) => panic!("unexpected error {:?}", e), + } + } + } + } + + // Close the connection + drop(client); + + // Wait a moment + thread::sleep(Duration::from_millis(100)); + + // Register interest in the server socket + poll.register(&server, Token(3), Ready::readable(), PollOpt::edge()).unwrap(); + + + loop { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.token() == Token(3) { + assert!(event.readiness().is_readable()); + + match server.read(&mut buf) { + Ok(0) | + Err(_) => {}, + + Ok(x) => panic!("expected empty buffer but read {} bytes", x), + } + return; + } + } + } + +} + +#[test] +#[cfg_attr(target_os = "fuchsia", ignore)] +fn connect_error() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(16); + + // Pick a "random" port that shouldn't be in use. + let l = match TcpStream::connect(&"127.0.0.1:38381".parse().unwrap()) { + Ok(l) => l, + Err(ref e) if e.kind() == io::ErrorKind::ConnectionRefused => { + // Connection failed synchronously. This is not a bug, but it + // unfortunately doesn't get us the code coverage we want. + return; + }, + Err(e) => panic!("TcpStream::connect unexpected error {:?}", e) + }; + + poll.register(&l, Token(0), Ready::writable(), PollOpt::edge()).unwrap(); + + 'outer: + loop { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.token() == Token(0) { + assert!(event.readiness().is_writable()); + break 'outer + } + } + } + + assert!(l.take_error().unwrap().is_some()); +} + +#[test] +fn write_error() { + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(16); + let (tx, rx) = channel(); + + let listener = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + let t = thread::spawn(move || { + let (conn, _addr) = listener.accept().unwrap(); + rx.recv().unwrap(); + drop(conn); + }); + + let mut s = TcpStream::connect(&addr).unwrap(); + poll.register(&s, + Token(0), + Ready::readable() | Ready::writable(), + PollOpt::edge()).unwrap(); + + let mut wait_writable = || { + 'outer: + loop { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.token() == Token(0) && event.readiness().is_writable() { + break 'outer + } + } + } + }; + + wait_writable(); + + tx.send(()).unwrap(); + t.join().unwrap(); + + let buf = [0; 1024]; + loop { + match s.write(&buf) { + Ok(_) => {} + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + wait_writable() + } + Err(e) => { + println!("good error: {}", e); + break + } + } + } +} diff --git a/third_party/rust/mio/test/test_tcp_level.rs b/third_party/rust/mio/test/test_tcp_level.rs new file mode 100644 index 000000000000..e52385c3d7aa --- /dev/null +++ b/third_party/rust/mio/test/test_tcp_level.rs @@ -0,0 +1,142 @@ +use {expect_events, sleep_ms, TryRead}; +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::event::Event; +use mio::net::{TcpListener, TcpStream}; +use std::io::Write; +use std::time::Duration; + +const MS: u64 = 1_000; + +#[test] +pub fn test_tcp_listener_level_triggered() { + let poll = Poll::new().unwrap(); + let mut pevents = Events::with_capacity(1024); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::level()).unwrap(); + + let s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + poll.register(&s1, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + while filter(&pevents, Token(0)).len() == 0 { + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + } + let events = filter(&pevents, Token(0)); + + assert_eq!(events.len(), 1); + assert_eq!(events[0], Event::new(Ready::readable(), Token(0))); + + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + let events = filter(&pevents, Token(0)); + assert_eq!(events.len(), 1); + assert_eq!(events[0], Event::new(Ready::readable(), Token(0))); + + // Accept the connection then test that the events stop + let _ = l.accept().unwrap(); + + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + let events = filter(&pevents, Token(0)); + assert!(events.is_empty(), "actual={:?}", events); + + let s3 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + poll.register(&s3, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); + + while filter(&pevents, Token(0)).len() == 0 { + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + } + let events = filter(&pevents, Token(0)); + + assert_eq!(events.len(), 1); + assert_eq!(events[0], Event::new(Ready::readable(), Token(0))); + + drop(l); + + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + let events = filter(&pevents, Token(0)); + assert!(events.is_empty()); +} + +#[test] +pub fn test_tcp_stream_level_triggered() { + drop(::env_logger::init()); + let poll = Poll::new().unwrap(); + let mut pevents = Events::with_capacity(1024); + + // Create the listener + let l = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Register the listener with `Poll` + poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut s1 = TcpStream::connect(&l.local_addr().unwrap()).unwrap(); + poll.register(&s1, Token(1), Ready::readable() | Ready::writable(), PollOpt::level()).unwrap(); + + // Sleep a bit to ensure it arrives at dest + sleep_ms(250); + + expect_events(&poll, &mut pevents, 2, vec![ + Event::new(Ready::readable(), Token(0)), + Event::new(Ready::writable(), Token(1)), + ]); + + // Server side of socket + let (mut s1_tx, _) = l.accept().unwrap(); + + // Sleep a bit to ensure it arrives at dest + sleep_ms(250); + + expect_events(&poll, &mut pevents, 2, vec![ + Event::new(Ready::writable(), Token(1)) + ]); + + // Register the socket + poll.register(&s1_tx, Token(123), Ready::readable(), PollOpt::edge()).unwrap(); + + debug!("writing some data ----------"); + + // Write some data + let res = s1_tx.write(b"hello world!"); + assert!(res.unwrap() > 0); + + // Sleep a bit to ensure it arrives at dest + sleep_ms(250); + + debug!("looking at rx end ----------"); + + // Poll rx end + expect_events(&poll, &mut pevents, 2, vec![ + Event::new(Ready::readable(), Token(1)) + ]); + + debug!("reading ----------"); + + // Reading the data should clear it + let mut res = vec![]; + while s1.try_read_buf(&mut res).unwrap().is_some() { + } + + assert_eq!(res, b"hello world!"); + + debug!("checking just read ----------"); + + expect_events(&poll, &mut pevents, 1, vec![ + Event::new(Ready::writable(), Token(1))]); + + // Closing the socket clears all active level events + drop(s1); + + debug!("checking everything is gone ----------"); + + poll.poll(&mut pevents, Some(Duration::from_millis(MS))).unwrap(); + let events = filter(&pevents, Token(1)); + assert!(events.is_empty()); +} + +fn filter(events: &Events, token: Token) -> Vec { + (0..events.len()).map(|i| events.get(i).unwrap()) + .filter(|e| e.token() == token) + .collect() +} diff --git a/third_party/rust/mio/test/test_tick.rs b/third_party/rust/mio/test/test_tick.rs new file mode 100644 index 000000000000..76587eb3208e --- /dev/null +++ b/third_party/rust/mio/test/test_tick.rs @@ -0,0 +1,64 @@ +use mio::*; +use mio::deprecated::{EventLoop, Handler}; +use mio::net::{TcpListener, TcpStream}; +use {sleep_ms}; + +struct TestHandler { + tick: usize, + state: usize, +} + +impl TestHandler { + fn new() -> TestHandler { + TestHandler { + tick: 0, + state: 0, + } + } +} + +impl Handler for TestHandler { + type Timeout = usize; + type Message = String; + + fn tick(&mut self, _event_loop: &mut EventLoop) { + debug!("Handler::tick()"); + self.tick += 1; + + assert_eq!(self.state, 1); + self.state = 0; + } + + fn ready(&mut self, _event_loop: &mut EventLoop, token: Token, events: Ready) { + debug!("READY: {:?} - {:?}", token, events); + if events.is_readable() { + debug!("Handler::ready() readable event"); + assert_eq!(token, Token(0)); + assert_eq!(self.state, 0); + self.state = 1; + } + } +} + +#[test] +pub fn test_tick() { + debug!("Starting TEST_TICK"); + let mut event_loop = EventLoop::new().ok().expect("Couldn't make event loop"); + + let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + event_loop.register(&listener, Token(0), Ready::readable(), PollOpt::level()).unwrap(); + + let client = TcpStream::connect(&listener.local_addr().unwrap()).unwrap(); + event_loop.register(&client, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); + + sleep_ms(250); + + let mut handler = TestHandler::new(); + + for _ in 0..2 { + event_loop.run_once(&mut handler, None).unwrap(); + } + + assert!(handler.tick == 2, "actual={}", handler.tick); + assert!(handler.state == 0, "actual={}", handler.state); +} diff --git a/third_party/rust/mio/test/test_timer.rs b/third_party/rust/mio/test/test_timer.rs new file mode 100644 index 000000000000..63d7fb53d291 --- /dev/null +++ b/third_party/rust/mio/test/test_timer.rs @@ -0,0 +1,433 @@ +use {sleep_ms, TryRead, TryWrite}; +use mio::*; +use mio::deprecated::{EventLoop, Handler}; +use mio::timer::{Timer}; + +use mio::net::{TcpListener, TcpStream}; +use bytes::{Buf, ByteBuf, SliceBuf}; +use localhost; +use std::time::Duration; + +use self::TestState::{Initial, AfterRead}; + +#[test] +fn test_basic_timer_without_poll() { + let mut timer = Timer::default(); + + // Set the timeout + timer.set_timeout(Duration::from_millis(200), "hello").unwrap(); + + // Nothing when polled immediately + assert!(timer.poll().is_none()); + + // Wait for the timeout + sleep_ms(200); + + assert_eq!(Some("hello"), timer.poll()); + assert!(timer.poll().is_none()); +} + +#[test] +fn test_basic_timer_with_poll_edge_set_timeout_after_register() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + timer.set_timeout(Duration::from_millis(200), "hello").unwrap(); + + let elapsed = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(200, elapsed), "actual={:?}", elapsed); + assert_eq!("hello", timer.poll().unwrap()); + assert_eq!(None, timer.poll()); +} + +#[test] +fn test_basic_timer_with_poll_edge_set_timeout_before_register() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + timer.set_timeout(Duration::from_millis(200), "hello").unwrap(); + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + let elapsed = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(200, elapsed), "actual={:?}", elapsed); + assert_eq!("hello", timer.poll().unwrap()); + assert_eq!(None, timer.poll()); +} + +#[test] +fn test_setting_later_timeout_then_earlier_one() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + timer.set_timeout(Duration::from_millis(600), "hello").unwrap(); + timer.set_timeout(Duration::from_millis(200), "world").unwrap(); + + let elapsed = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(200, elapsed), "actual={:?}", elapsed); + assert_eq!("world", timer.poll().unwrap()); + assert_eq!(None, timer.poll()); + + let elapsed = self::elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(400, elapsed), "actual={:?}", elapsed); + assert_eq!("hello", timer.poll().unwrap()); + assert_eq!(None, timer.poll()); +} + +#[test] +fn test_timer_with_looping_wheel() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = timer::Builder::default() + .num_slots(2) + .build(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + const TOKENS: &'static [ &'static str ] = &[ "hello", "world", "some", "thing" ]; + + for (i, msg) in TOKENS.iter().enumerate() { + timer.set_timeout(Duration::from_millis(500 * (i as u64 + 1)), msg).unwrap(); + } + + for msg in TOKENS { + let elapsed = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(500, elapsed), "actual={:?}; msg={:?}", elapsed, msg); + assert_eq!(Some(msg), timer.poll()); + assert_eq!(None, timer.poll()); + + } +} + +#[test] +fn test_edge_without_polling() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + timer.set_timeout(Duration::from_millis(400), "hello").unwrap(); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(400, ms), "actual={:?}", ms); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(num, 0); + }); + + assert!(is_about(300, ms), "actual={:?}", ms); +} + +#[test] +fn test_level_triggered() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::level()).unwrap(); + + timer.set_timeout(Duration::from_millis(400), "hello").unwrap(); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(400, ms), "actual={:?}", ms); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + assert_eq!(num, 1); + assert_eq!(Token(0), events.get(0).unwrap().token()); + assert_eq!(Ready::readable(), events.get(0).unwrap().readiness()); + }); + + assert!(is_about(0, ms), "actual={:?}", ms); +} + +#[test] +fn test_edge_oneshot_triggered() { + let _ = ::env_logger::init(); + + let poll = Poll::new().unwrap(); + let mut events = Events::with_capacity(1024); + let mut timer = Timer::default(); + + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + timer.set_timeout(Duration::from_millis(200), "hello").unwrap(); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + assert_eq!(num, 1); + }); + + assert!(is_about(200, ms), "actual={:?}", ms); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, Some(Duration::from_millis(300))).unwrap(); + assert_eq!(num, 0); + }); + + assert!(is_about(300, ms), "actual={:?}", ms); + + poll.reregister(&timer, Token(0), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let ms = elapsed(|| { + let num = poll.poll(&mut events, None).unwrap(); + assert_eq!(num, 1); + }); + + assert!(is_about(0, ms)); +} + +#[test] +fn test_cancel_timeout() { + use std::time::Instant; + + let _ = ::env_logger::init(); + + let mut timer: Timer = Default::default(); + let timeout = timer.set_timeout(Duration::from_millis(200), 1).unwrap(); + timer.cancel_timeout(&timeout); + + let poll = Poll::new().unwrap(); + poll.register(&timer, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(16); + + let now = Instant::now(); + let dur = Duration::from_millis(500); + let mut i = 0; + + while Instant::now() - now < dur { + if i > 10 { + panic!("iterated too many times"); + } + + i += 1; + + let elapsed = Instant::now() - now; + + poll.poll(&mut events, Some(dur - elapsed)).unwrap(); + + while let Some(_) = timer.poll() { + panic!("did not expect to receive timeout"); + } + } +} + +fn elapsed(mut f: F) -> u64 { + use std::time::Instant; + + let now = Instant::now(); + + f(); + + let elapsed = now.elapsed(); + elapsed.as_secs() * 1000 + (elapsed.subsec_nanos() / 1_000_000) as u64 +} + +fn is_about(expect: u64, val: u64) -> bool { + const WINDOW: i64 = 200; + + ((expect as i64) - (val as i64)).abs() <= WINDOW +} + +/* + * + * ===== OLD TIMER ===== + * + */ + +const SERVER: Token = Token(0); +const CLIENT: Token = Token(1); +const CONN: Token = Token(2); + +#[derive(Debug, PartialEq)] +enum TestState { + Initial, + AfterRead, +} + +struct TestHandler { + srv: TcpListener, + cli: TcpStream, + state: TestState +} + +impl TestHandler { + fn new(srv: TcpListener, cli: TcpStream) -> TestHandler { + TestHandler { + srv: srv, + cli: cli, + state: Initial + } + } + + fn handle_read(&mut self, event_loop: &mut EventLoop, + tok: Token, _events: Ready) { + match tok { + SERVER => { + debug!("server connection ready for accept"); + let conn = self.srv.accept().unwrap().0; + event_loop.register(&conn, CONN, Ready::readable() | Ready::writable(), + PollOpt::edge()).unwrap(); + event_loop.timeout(conn, Duration::from_millis(200)).unwrap(); + + event_loop.reregister(&self.srv, SERVER, Ready::readable(), + PollOpt::edge()).unwrap(); + } + CLIENT => { + debug!("client readable"); + + match self.state { + Initial => self.state = AfterRead, + AfterRead => {} + } + + let mut buf = ByteBuf::mut_with_capacity(2048); + + match self.cli.try_read_buf(&mut buf) { + Ok(Some(0)) => return event_loop.shutdown(), + Ok(n) => { + debug!("read {:?} bytes", n); + assert!(b"zomg" == buf.flip().bytes()); + } + Err(e) => { + debug!("client sock failed to read; err={:?}", e.kind()); + } + } + + event_loop.reregister(&self.cli, CLIENT, + Ready::readable() | Ready::hup(), + PollOpt::edge()).unwrap(); + } + CONN => {} + _ => panic!("received unknown token {:?}", tok), + } + } + + fn handle_write(&mut self, event_loop: &mut EventLoop, + tok: Token, _: Ready) { + match tok { + SERVER => panic!("received writable for token 0"), + CLIENT => debug!("client connected"), + CONN => {} + _ => panic!("received unknown token {:?}", tok), + } + + event_loop.reregister(&self.cli, CLIENT, Ready::readable(), + PollOpt::edge()).unwrap(); + } +} + +impl Handler for TestHandler { + type Timeout = TcpStream; + type Message = (); + + fn ready(&mut self, event_loop: &mut EventLoop, tok: Token, events: Ready) { + if events.is_readable() { + self.handle_read(event_loop, tok, events); + } + + if events.is_writable() { + self.handle_write(event_loop, tok, events); + } + } + + fn timeout(&mut self, _event_loop: &mut EventLoop, mut sock: TcpStream) { + debug!("timeout handler : writing to socket"); + sock.try_write_buf(&mut SliceBuf::wrap(b"zomg")).unwrap().unwrap(); + } +} + +#[test] +pub fn test_old_timer() { + let _ = ::env_logger::init(); + + debug!("Starting TEST_TIMER"); + let mut event_loop = EventLoop::new().unwrap(); + + let addr = localhost(); + + let srv = TcpListener::bind(&addr).unwrap(); + + info!("listening for connections"); + + event_loop.register(&srv, SERVER, Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + let sock = TcpStream::connect(&addr).unwrap(); + + // Connect to the server + event_loop.register(&sock, CLIENT, Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); + + // Init the handler + let mut handler = TestHandler::new(srv, sock); + // Start the event loop + event_loop.run(&mut handler).unwrap(); + + assert!(handler.state == AfterRead, "actual={:?}", handler.state); +} diff --git a/third_party/rust/mio/test/test_udp_level.rs b/third_party/rust/mio/test/test_udp_level.rs new file mode 100644 index 000000000000..7e19d54b3e3f --- /dev/null +++ b/third_party/rust/mio/test/test_udp_level.rs @@ -0,0 +1,52 @@ +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::event::Event; +use mio::net::UdpSocket; +use {expect_events, sleep_ms}; + +#[test] +pub fn test_udp_level_triggered() { + let poll = Poll::new().unwrap(); + let poll = &poll; + let mut events = Events::with_capacity(1024); + let events = &mut events; + + // Create the listener + let tx = UdpSocket::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let rx = UdpSocket::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + + poll.register(&tx, Token(0), Ready::readable() | Ready::writable(), PollOpt::level()).unwrap(); + poll.register(&rx, Token(1), Ready::readable() | Ready::writable(), PollOpt::level()).unwrap(); + + + for _ in 0..2 { + expect_events(poll, events, 2, vec![ + Event::new(Ready::writable(), Token(0)), + Event::new(Ready::writable(), Token(1)), + ]); + } + + tx.send_to(b"hello world!", &rx.local_addr().unwrap()).unwrap(); + + sleep_ms(250); + + for _ in 0..2 { + expect_events(poll, events, 2, vec![ + Event::new(Ready::readable() | Ready::writable(), Token(1)) + ]); + } + + let mut buf = [0; 200]; + while rx.recv_from(&mut buf).is_ok() {} + + for _ in 0..2 { + expect_events(poll, events, 4, vec![Event::new(Ready::writable(), Token(1))]); + } + + tx.send_to(b"hello world!", &rx.local_addr().unwrap()).unwrap(); + sleep_ms(250); + + expect_events(poll, events, 10, + vec![Event::new(Ready::readable() | Ready::writable(), Token(1))]); + + drop(rx); +} diff --git a/third_party/rust/mio/test/test_udp_socket.rs b/third_party/rust/mio/test/test_udp_socket.rs new file mode 100644 index 000000000000..dcb2e6cb950c --- /dev/null +++ b/third_party/rust/mio/test/test_udp_socket.rs @@ -0,0 +1,175 @@ +use mio::{Events, Poll, PollOpt, Ready, Token}; +use mio::net::UdpSocket; +use bytes::{Buf, RingBuf, SliceBuf, MutBuf}; +use std::io::ErrorKind; +use std::str; +use std::time; +use localhost; + +const LISTENER: Token = Token(0); +const SENDER: Token = Token(1); + +pub struct UdpHandlerSendRecv { + tx: UdpSocket, + rx: UdpSocket, + msg: &'static str, + buf: SliceBuf<'static>, + rx_buf: RingBuf, + connected: bool, + shutdown: bool, +} + +impl UdpHandlerSendRecv { + fn new(tx: UdpSocket, rx: UdpSocket, connected: bool, msg : &'static str) -> UdpHandlerSendRecv { + UdpHandlerSendRecv { + tx: tx, + rx: rx, + msg: msg, + buf: SliceBuf::wrap(msg.as_bytes()), + rx_buf: RingBuf::new(1024), + connected: connected, + shutdown: false, + } + } +} + +fn assert_send() { +} + +fn assert_sync() { +} + +#[cfg(test)] +fn test_send_recv_udp(tx: UdpSocket, rx: UdpSocket, connected: bool) { + debug!("Starting TEST_UDP_SOCKETS"); + let poll = Poll::new().unwrap(); + + assert_send::(); + assert_sync::(); + + // ensure that the sockets are non-blocking + let mut buf = [0; 128]; + assert_eq!(ErrorKind::WouldBlock, rx.recv_from(&mut buf).unwrap_err().kind()); + + info!("Registering SENDER"); + poll.register(&tx, SENDER, Ready::writable(), PollOpt::edge()).unwrap(); + + info!("Registering LISTENER"); + poll.register(&rx, LISTENER, Ready::readable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + + info!("Starting event loop to test with..."); + let mut handler = UdpHandlerSendRecv::new(tx, rx, connected, "hello world"); + + while !handler.shutdown { + poll.poll(&mut events, None).unwrap(); + + for event in &events { + if event.readiness().is_readable() { + match event.token() { + LISTENER => { + debug!("We are receiving a datagram now..."); + let cnt = unsafe { + if !handler.connected { + handler.rx.recv_from(handler.rx_buf.mut_bytes()).unwrap().0 + } else { + handler.rx.recv(handler.rx_buf.mut_bytes()).unwrap() + } + }; + + unsafe { MutBuf::advance(&mut handler.rx_buf, cnt); } + assert!(str::from_utf8(handler.rx_buf.bytes()).unwrap() == handler.msg); + handler.shutdown = true; + }, + _ => () + } + } + + if event.readiness().is_writable() { + match event.token() { + SENDER => { + let cnt = if !handler.connected { + let addr = handler.rx.local_addr().unwrap(); + handler.tx.send_to(handler.buf.bytes(), &addr).unwrap() + } else { + handler.tx.send(handler.buf.bytes()).unwrap() + }; + + handler.buf.advance(cnt); + }, + _ => {} + } + } + } + } +} + +#[test] +pub fn test_udp_socket() { + let addr = localhost(); + let any = localhost(); + + let tx = UdpSocket::bind(&any).unwrap(); + let rx = UdpSocket::bind(&addr).unwrap(); + + test_send_recv_udp(tx, rx, false); +} + +#[test] +pub fn test_udp_socket_send_recv() { + let addr = localhost(); + let any = localhost(); + + let tx = UdpSocket::bind(&any).unwrap(); + let rx = UdpSocket::bind(&addr).unwrap(); + + let tx_addr = tx.local_addr().unwrap(); + let rx_addr = rx.local_addr().unwrap(); + + assert!(tx.connect(rx_addr).is_ok()); + assert!(rx.connect(tx_addr).is_ok()); + + test_send_recv_udp(tx, rx, true); +} + +#[test] +pub fn test_udp_socket_discard() { + let addr = localhost(); + let any = localhost(); + let outside = localhost(); + + let tx = UdpSocket::bind(&any).unwrap(); + let rx = UdpSocket::bind(&addr).unwrap(); + let udp_outside = UdpSocket::bind(&outside).unwrap(); + + let tx_addr = tx.local_addr().unwrap(); + let rx_addr = rx.local_addr().unwrap(); + + assert!(tx.connect(rx_addr).is_ok()); + assert!(udp_outside.connect(rx_addr).is_ok()); + assert!(rx.connect(tx_addr).is_ok()); + + let poll = Poll::new().unwrap(); + + let r = udp_outside.send("hello world".as_bytes()); + assert!(r.is_ok() || r.unwrap_err().kind() == ErrorKind::WouldBlock); + + poll.register(&rx, LISTENER, Ready::readable(), PollOpt::edge()).unwrap(); + poll.register(&tx, SENDER, Ready::writable(), PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + + poll.poll(&mut events, Some(time::Duration::from_secs(5))).unwrap(); + + for event in &events { + if event.readiness().is_readable() { + match event.token() { + LISTENER => { + assert!(false, "Expected to no receive a packet but got something") + }, + _ => () + } + } + } +} diff --git a/third_party/rust/mio/test/test_uds_shutdown.rs b/third_party/rust/mio/test/test_uds_shutdown.rs new file mode 100644 index 000000000000..58d2431e8ed6 --- /dev/null +++ b/third_party/rust/mio/test/test_uds_shutdown.rs @@ -0,0 +1,300 @@ +use {TryRead, TryWrite}; +use mio::*; +use mio::deprecated::{EventLoop, Handler}; +use mio::deprecated::unix::*; +use bytes::{Buf, ByteBuf, MutByteBuf, SliceBuf}; +use slab::Slab; +use std::io; +use std::path::PathBuf; +use tempdir::TempDir; + +const SERVER: Token = Token(10_000_000); +const CLIENT: Token = Token(10_000_001); + +struct EchoConn { + sock: UnixStream, + buf: Option, + mut_buf: Option, + token: Option, + interest: Ready +} + +impl EchoConn { + fn new(sock: UnixStream) -> EchoConn { + EchoConn { + sock: sock, + buf: None, + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: None, + interest: Ready::hup() + } + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut buf = self.buf.take().unwrap(); + + match self.sock.try_write_buf(&mut buf) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + + self.buf = Some(buf); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CONN : we wrote {} bytes!", r); + + self.mut_buf = Some(buf.flip()); + + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + match self.sock.shutdown(Shutdown::Write) { + Err(e) => panic!(e), + _ => {}, + } + } + Err(e) => debug!("not implemented; client err={:?}", e), + } + + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CONN : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + debug!("CONN : we read {} bytes!", r); + + // prepare to provide this to writable + self.buf = Some(buf.flip()); + + self.interest.remove(Ready::readable()); + self.interest.insert(Ready::writable()); + } + Err(e) => { + debug!("not implemented; client err={:?}", e); + self.interest.remove(Ready::readable()); + } + + }; + + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, + PollOpt::edge()) + } +} + +struct EchoServer { + sock: UnixListener, + conns: Slab +} + +impl EchoServer { + fn accept(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("server accepting socket"); + + let sock = self.sock.accept().unwrap(); + let conn = EchoConn::new(sock,); + let tok = self.conns.insert(conn); + + // Register the connection + self.conns[tok].token = Some(Token(tok)); + event_loop.register(&self.conns[tok].sock, Token(tok), Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()) + .ok().expect("could not register socket with event loop"); + + Ok(()) + } + + fn conn_readable(&mut self, event_loop: &mut EventLoop, + tok: Token) -> io::Result<()> { + debug!("server conn readable; tok={:?}", tok); + self.conn(tok).readable(event_loop) + } + + fn conn_writable(&mut self, event_loop: &mut EventLoop, + tok: Token) -> io::Result<()> { + debug!("server conn writable; tok={:?}", tok); + self.conn(tok).writable(event_loop) + } + + fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn { + &mut self.conns[tok.into()] + } +} + +struct EchoClient { + sock: UnixStream, + msgs: Vec<&'static str>, + tx: SliceBuf<'static>, + rx: SliceBuf<'static>, + mut_buf: Option, + token: Token, + interest: Ready +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl EchoClient { + fn new(sock: UnixStream, tok: Token, mut msgs: Vec<&'static str>) -> EchoClient { + let curr = msgs.remove(0); + + EchoClient { + sock: sock, + msgs: msgs, + tx: SliceBuf::wrap(curr.as_bytes()), + rx: SliceBuf::wrap(curr.as_bytes()), + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: tok, + interest: Ready::none() + } + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket readable"); + + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CLIENT : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + if r == 0 { + self.interest.remove(Ready::readable()); + event_loop.shutdown(); + } else { + debug!("CLIENT : We read {} bytes!", r); + + // prepare for reading + let mut buf = buf.flip(); + + while buf.has_remaining() { + let actual = buf.read_byte().unwrap(); + let expect = self.rx.read_byte().unwrap(); + + assert!(actual == expect, "actual={}; expect={}", actual, expect); + } + + self.mut_buf = Some(buf.flip()); + if !self.rx.has_remaining() { + self.next_msg(event_loop).unwrap(); + } + } + } + Err(e) => { + panic!("not implemented; client err={:?}", e); + } + }; + + event_loop.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket writable"); + + match self.sock.try_write_buf(&mut self.tx) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CLIENT : we wrote {} bytes!", r); + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e) + } + + event_loop.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } + + fn next_msg(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + if self.msgs.is_empty() { + + return Ok(()); + } + + let curr = self.msgs.remove(0); + + debug!("client prepping next message"); + self.tx = SliceBuf::wrap(curr.as_bytes()); + self.rx = SliceBuf::wrap(curr.as_bytes()); + + self.interest.insert(Ready::writable()); + event_loop.reregister(&self.sock, self.token, self.interest, + PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct Echo { + server: EchoServer, + client: EchoClient, +} + +impl Echo { + fn new(srv: UnixListener, client: UnixStream, msgs: Vec<&'static str>) -> Echo { + Echo { + server: EchoServer { + sock: srv, + conns: Slab::with_capacity(128) + }, + client: EchoClient::new(client, CLIENT, msgs) + } + } +} + +impl Handler for Echo { + type Timeout = usize; + type Message = (); + + fn ready(&mut self, event_loop: &mut EventLoop, token: Token, + events: Ready) { + debug!("ready {:?} {:?}", token, events); + if events.is_readable() { + match token { + SERVER => self.server.accept(event_loop).unwrap(), + CLIENT => self.client.readable(event_loop).unwrap(), + i => self.server.conn_readable(event_loop, i).unwrap() + } + } + + if events.is_writable() { + match token { + SERVER => panic!("received writable for token 0"), + CLIENT => self.client.writable(event_loop).unwrap(), + _ => self.server.conn_writable(event_loop, token).unwrap() + }; + } + } +} + +#[test] +pub fn test_echo_server() { + debug!("Starting TEST_ECHO_SERVER"); + let mut event_loop = EventLoop::new().unwrap(); + + let tmp_dir = TempDir::new("mio").unwrap(); + let addr = tmp_dir.path().join(&PathBuf::from("sock")); + + let srv = UnixListener::bind(&addr).unwrap(); + + event_loop.register(&srv, SERVER, Ready::readable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let sock = UnixStream::connect(&addr).unwrap(); + + // Connect to the server + event_loop.register(&sock, CLIENT, Ready::writable(), + PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Start the event loop + event_loop.run(&mut Echo::new(srv, sock, vec!["foo", "bar"])).unwrap(); +} diff --git a/third_party/rust/mio/test/test_unix_echo_server.rs b/third_party/rust/mio/test/test_unix_echo_server.rs new file mode 100644 index 000000000000..6f3dd4b69d2f --- /dev/null +++ b/third_party/rust/mio/test/test_unix_echo_server.rs @@ -0,0 +1,292 @@ +use {TryRead, TryWrite}; +use mio::*; +use mio::deprecated::{EventLoop, Handler}; +use mio::deprecated::unix::*; +use bytes::{Buf, ByteBuf, MutByteBuf, SliceBuf}; +use slab::Slab; +use std::path::PathBuf; +use std::io; +use tempdir::TempDir; + +const SERVER: Token = Token(10_000_000); +const CLIENT: Token = Token(10_000_001); + +struct EchoConn { + sock: UnixStream, + buf: Option, + mut_buf: Option, + token: Option, + interest: Ready, +} + +impl EchoConn { + fn new(sock: UnixStream) -> EchoConn { + EchoConn { + sock: sock, + buf: None, + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: None, + interest: Ready::hup(), + } + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut buf = self.buf.take().unwrap(); + + match self.sock.try_write_buf(&mut buf) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + + self.buf = Some(buf); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CONN : we wrote {} bytes!", r); + + self.mut_buf = Some(buf.flip()); + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e), + } + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot()) + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CONN : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + debug!("CONN : we read {} bytes!", r); + + // prepare to provide this to writable + self.buf = Some(buf.flip()); + + self.interest.remove(Ready::readable()); + self.interest.insert(Ready::writable()); + } + Err(e) => { + debug!("not implemented; client err={:?}", e); + self.interest.remove(Ready::readable()); + } + + }; + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct EchoServer { + sock: UnixListener, + conns: Slab +} + +impl EchoServer { + fn accept(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("server accepting socket"); + + let sock = self.sock.accept().unwrap(); + let conn = EchoConn::new(sock); + let tok = self.conns.insert(conn); + + // Register the connection + self.conns[tok].token = Some(Token(tok)); + event_loop.register(&self.conns[tok].sock, Token(tok), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()) + .ok().expect("could not register socket with event loop"); + + Ok(()) + } + + fn conn_readable(&mut self, event_loop: &mut EventLoop, tok: Token) -> io::Result<()> { + debug!("server conn readable; tok={:?}", tok); + self.conn(tok).readable(event_loop) + } + + fn conn_writable(&mut self, event_loop: &mut EventLoop, tok: Token) -> io::Result<()> { + debug!("server conn writable; tok={:?}", tok); + self.conn(tok).writable(event_loop) + } + + fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn { + &mut self.conns[tok.into()] + } +} + +struct EchoClient { + sock: UnixStream, + msgs: Vec<&'static str>, + tx: SliceBuf<'static>, + rx: SliceBuf<'static>, + mut_buf: Option, + token: Token, + interest: Ready, +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl EchoClient { + fn new(sock: UnixStream, tok: Token, mut msgs: Vec<&'static str>) -> EchoClient { + let curr = msgs.remove(0); + + EchoClient { + sock: sock, + msgs: msgs, + tx: SliceBuf::wrap(curr.as_bytes()), + rx: SliceBuf::wrap(curr.as_bytes()), + mut_buf: Some(ByteBuf::mut_with_capacity(2048)), + token: tok, + interest: Ready::none(), + } + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket readable"); + + let mut buf = self.mut_buf.take().unwrap(); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + debug!("CLIENT : spurious read wakeup"); + self.mut_buf = Some(buf); + } + Ok(Some(r)) => { + debug!("CLIENT : We read {} bytes!", r); + + // prepare for reading + let mut buf = buf.flip(); + + debug!("CLIENT : buf = {:?} -- rx = {:?}", buf.bytes(), self.rx.bytes()); + while buf.has_remaining() { + let actual = buf.read_byte().unwrap(); + let expect = self.rx.read_byte().unwrap(); + + assert!(actual == expect, "actual={}; expect={}", actual, expect); + } + + self.mut_buf = Some(buf.flip()); + + self.interest.remove(Ready::readable()); + + if !self.rx.has_remaining() { + self.next_msg(event_loop).unwrap(); + } + } + Err(e) => { + panic!("not implemented; client err={:?}", e); + } + }; + + if !self.interest.is_none() { + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot())?; + } + + Ok(()) + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket writable"); + + match self.sock.try_write_buf(&mut self.tx) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CLIENT : we wrote {} bytes!", r); + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e) + } + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) + } + + fn next_msg(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + if self.msgs.is_empty() { + event_loop.shutdown(); + return Ok(()); + } + + let curr = self.msgs.remove(0); + + debug!("client prepping next message"); + self.tx = SliceBuf::wrap(curr.as_bytes()); + self.rx = SliceBuf::wrap(curr.as_bytes()); + + self.interest.insert(Ready::writable()); + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct Echo { + server: EchoServer, + client: EchoClient, +} + +impl Echo { + fn new(srv: UnixListener, client: UnixStream, msgs: Vec<&'static str>) -> Echo { + Echo { + server: EchoServer { + sock: srv, + conns: Slab::with_capacity(128) + }, + client: EchoClient::new(client, CLIENT, msgs) + } + } +} + +impl Handler for Echo { + type Timeout = usize; + type Message = (); + + fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: Ready) { + if events.is_readable() { + match token { + SERVER => self.server.accept(event_loop).unwrap(), + CLIENT => self.client.readable(event_loop).unwrap(), + i => self.server.conn_readable(event_loop, i).unwrap() + }; + } + + if events.is_writable() { + match token { + SERVER => panic!("received writable for token 0"), + CLIENT => self.client.writable(event_loop).unwrap(), + _ => self.server.conn_writable(event_loop, token).unwrap() + }; + } + } +} + +#[test] +pub fn test_unix_echo_server() { + debug!("Starting TEST_UNIX_ECHO_SERVER"); + let mut event_loop = EventLoop::new().unwrap(); + + let tmp_dir = TempDir::new("mio").unwrap(); + let addr = tmp_dir.path().join(&PathBuf::from("sock")); + + let srv = UnixListener::bind(&addr).unwrap(); + + info!("listen for connections"); + event_loop.register(&srv, SERVER, Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let sock = UnixStream::connect(&addr).unwrap(); + + // Connect to the server + event_loop.register(&sock, CLIENT, Ready::writable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Start the event loop + event_loop.run(&mut Echo::new(srv, sock, vec!["foo", "bar"])).unwrap(); +} diff --git a/third_party/rust/mio/test/test_unix_pass_fd.rs b/third_party/rust/mio/test/test_unix_pass_fd.rs new file mode 100644 index 000000000000..f43ec2273c6a --- /dev/null +++ b/third_party/rust/mio/test/test_unix_pass_fd.rs @@ -0,0 +1,306 @@ +use {TryRead, TryWrite}; +use mio::*; +use mio::deprecated::{EventLoop, Handler}; +use mio::deprecated::unix::*; +use bytes::{Buf, ByteBuf, SliceBuf}; +use slab::Slab; +use std::path::PathBuf; +use std::io::{self, Read}; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use tempdir::TempDir; + +const SERVER: Token = Token(10_000_000); +const CLIENT: Token = Token(10_000_001); + +struct EchoConn { + sock: UnixStream, + pipe_fd: Option, + token: Option, + interest: Ready, +} + +impl EchoConn { + fn new(sock: UnixStream) -> EchoConn { + EchoConn { + sock: sock, + pipe_fd: None, + token: None, + interest: Ready::hup(), + } + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let fd = self.pipe_fd.take().unwrap(); + + match self.sock.try_write_send_fd(b"x", fd.as_raw_fd()) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + + self.pipe_fd = Some(fd); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CONN : we wrote {} bytes!", r); + + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e), + } + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot()) + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + let mut buf = ByteBuf::mut_with_capacity(2048); + + match self.sock.try_read_buf(&mut buf) { + Ok(None) => { + panic!("We just got readable, but were unable to read from the socket?"); + } + Ok(Some(r)) => { + debug!("CONN : we read {} bytes!", r); + self.interest.remove(Ready::readable()); + self.interest.insert(Ready::writable()); + } + Err(e) => { + debug!("not implemented; client err={:?}", e); + self.interest.remove(Ready::readable()); + } + + }; + + // create fd to pass back. Assume that the write will work + // without blocking, for simplicity -- we're only testing that + // the FD makes it through somehow + let (rd, mut wr) = pipe().unwrap(); + let mut buf = buf.flip(); + match wr.try_write_buf(&mut buf) { + Ok(None) => { + panic!("writing to our own pipe blocked :("); + } + Ok(Some(r)) => { + debug!("CONN: we wrote {} bytes to the FD", r); + } + Err(e) => { + panic!("not implemented; client err={:?}", e); + } + } + self.pipe_fd = Some(rd); + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token.unwrap(), self.interest, PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct EchoServer { + sock: UnixListener, + conns: Slab +} + +impl EchoServer { + fn accept(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("server accepting socket"); + + let sock = self.sock.accept().unwrap(); + let conn = EchoConn::new(sock); + let tok = self.conns.insert(conn); + + // Register the connection + self.conns[tok].token = Some(Token(tok)); + event_loop.register(&self.conns[tok].sock, Token(tok), Ready::readable(), PollOpt::edge() | PollOpt::oneshot()) + .ok().expect("could not register socket with event loop"); + + Ok(()) + } + + fn conn_readable(&mut self, event_loop: &mut EventLoop, tok: Token) -> io::Result<()> { + debug!("server conn readable; tok={:?}", tok); + self.conn(tok).readable(event_loop) + } + + fn conn_writable(&mut self, event_loop: &mut EventLoop, tok: Token) -> io::Result<()> { + debug!("server conn writable; tok={:?}", tok); + self.conn(tok).writable(event_loop) + } + + fn conn<'a>(&'a mut self, tok: Token) -> &'a mut EchoConn { + &mut self.conns[tok.into()] + } +} + +struct EchoClient { + sock: UnixStream, + msgs: Vec<&'static str>, + tx: SliceBuf<'static>, + rx: SliceBuf<'static>, + token: Token, + interest: Ready, +} + + +// Sends a message and expects to receive the same exact message, one at a time +impl EchoClient { + fn new(sock: UnixStream, tok: Token, mut msgs: Vec<&'static str>) -> EchoClient { + let curr = msgs.remove(0); + + EchoClient { + sock: sock, + msgs: msgs, + tx: SliceBuf::wrap(curr.as_bytes()), + rx: SliceBuf::wrap(curr.as_bytes()), + token: tok, + interest: Ready::none(), + } + } + + fn readable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket readable"); + + let mut pipe: PipeReader; + let mut buf = [0; 256]; + + match self.sock.read_recv_fd(&mut buf) { + Ok((_, None)) => { + panic!("Did not receive passed file descriptor"); + } + Ok((r, Some(fd))) => { + assert_eq!(r, 1); + assert_eq!(b'x', buf[0]); + debug!("CLIENT : We read {} bytes!", r); + pipe = From::::from(unsafe { Io::from_raw_fd(fd) }); + } + Err(e) => { + panic!("not implemented; client err={:?}", e); + } + }; + + // read the data out of the FD itself + let n = match pipe.read(&mut buf) { + Ok(r) => { + debug!("CLIENT : We read {} bytes from the FD", r); + r + } + Err(e) => { + panic!("not implemented, client err={:?}", e); + } + }; + + for &actual in buf[0..n].iter() { + let expect = self.rx.read_byte().unwrap(); + assert!(actual == expect, "actual={}; expect={}", actual, expect); + } + + self.interest.remove(Ready::readable()); + + if !self.rx.has_remaining() { + self.next_msg(event_loop).unwrap(); + } + + if !self.interest.is_none() { + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot())?; + } + + Ok(()) + } + + fn writable(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + debug!("client socket writable"); + + match self.sock.try_write_buf(&mut self.tx) { + Ok(None) => { + debug!("client flushing buf; WOULDBLOCK"); + self.interest.insert(Ready::writable()); + } + Ok(Some(r)) => { + debug!("CLIENT : we wrote {} bytes!", r); + self.interest.insert(Ready::readable()); + self.interest.remove(Ready::writable()); + } + Err(e) => debug!("not implemented; client err={:?}", e) + } + + assert!(self.interest.is_readable() || self.interest.is_writable(), "actual={:?}", self.interest); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) + } + + fn next_msg(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { + if self.msgs.is_empty() { + event_loop.shutdown(); + return Ok(()); + } + + let curr = self.msgs.remove(0); + + debug!("client prepping next message"); + self.tx = SliceBuf::wrap(curr.as_bytes()); + self.rx = SliceBuf::wrap(curr.as_bytes()); + + self.interest.insert(Ready::writable()); + event_loop.reregister(&self.sock, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) + } +} + +struct Echo { + server: EchoServer, + client: EchoClient, +} + +impl Echo { + fn new(srv: UnixListener, client: UnixStream, msgs: Vec<&'static str>) -> Echo { + Echo { + server: EchoServer { + sock: srv, + conns: Slab::with_capacity(128) + }, + client: EchoClient::new(client, CLIENT, msgs) + } + } +} + +impl Handler for Echo { + type Timeout = usize; + type Message = (); + + fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: Ready) { + if events.is_readable() { + match token { + SERVER => self.server.accept(event_loop).unwrap(), + CLIENT => self.client.readable(event_loop).unwrap(), + i => self.server.conn_readable(event_loop, i).unwrap() + }; + } + + if events.is_writable() { + match token { + SERVER => panic!("received writable for token 0"), + CLIENT => self.client.writable(event_loop).unwrap(), + _ => self.server.conn_writable(event_loop, token).unwrap() + }; + } + } +} + +#[test] +pub fn test_unix_pass_fd() { + debug!("Starting TEST_UNIX_PASS_FD"); + let mut event_loop = EventLoop::new().unwrap(); + + let tmp_dir = TempDir::new("mio").unwrap(); + let addr = tmp_dir.path().join(&PathBuf::from("sock")); + + let srv = UnixListener::bind(&addr).unwrap(); + + info!("listen for connections"); + event_loop.register(&srv, SERVER, Ready::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + let sock = UnixStream::connect(&addr).unwrap(); + + // Connect to the server + event_loop.register(&sock, CLIENT, Ready::writable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); + + // Start the event loop + event_loop.run(&mut Echo::new(srv, sock, vec!["foo", "bar"])).unwrap(); +} diff --git a/third_party/rust/mio/test/test_write_then_drop.rs b/third_party/rust/mio/test/test_write_then_drop.rs new file mode 100644 index 000000000000..aa478eaf4a0c --- /dev/null +++ b/third_party/rust/mio/test/test_write_then_drop.rs @@ -0,0 +1,123 @@ +use std::io::{Write, Read}; + +use mio::event::Evented; +use mio::net::{TcpListener, TcpStream}; +use mio::{Poll, Events, Ready, PollOpt, Token}; + +#[test] +fn write_then_drop() { + drop(::env_logger::init()); + + let a = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = a.local_addr().unwrap(); + let mut s = TcpStream::connect(&addr).unwrap(); + + let poll = Poll::new().unwrap(); + + a.register(&poll, + Token(1), + Ready::readable(), + PollOpt::edge()).unwrap(); + s.register(&poll, + Token(3), + Ready::empty(), + PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(1)); + + let mut s2 = a.accept().unwrap().0; + + s2.register(&poll, + Token(2), + Ready::writable(), + PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(2)); + + s2.write(&[1, 2, 3, 4]).unwrap(); + drop(s2); + + s.reregister(&poll, + Token(3), + Ready::readable(), + PollOpt::edge()).unwrap(); + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(3)); + + let mut buf = [0; 10]; + assert_eq!(s.read(&mut buf).unwrap(), 4); + assert_eq!(&buf[0..4], &[1, 2, 3, 4]); +} + +#[test] +fn write_then_deregister() { + drop(::env_logger::init()); + + let a = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = a.local_addr().unwrap(); + let mut s = TcpStream::connect(&addr).unwrap(); + + let poll = Poll::new().unwrap(); + + a.register(&poll, + Token(1), + Ready::readable(), + PollOpt::edge()).unwrap(); + s.register(&poll, + Token(3), + Ready::empty(), + PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(1)); + + let mut s2 = a.accept().unwrap().0; + + s2.register(&poll, + Token(2), + Ready::writable(), + PollOpt::edge()).unwrap(); + + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(2)); + + s2.write(&[1, 2, 3, 4]).unwrap(); + s2.deregister(&poll).unwrap(); + + s.reregister(&poll, + Token(3), + Ready::readable(), + PollOpt::edge()).unwrap(); + let mut events = Events::with_capacity(1024); + while events.len() == 0 { + poll.poll(&mut events, None).unwrap(); + } + assert_eq!(events.len(), 1); + assert_eq!(events.get(0).unwrap().token(), Token(3)); + + let mut buf = [0; 10]; + assert_eq!(s.read(&mut buf).unwrap(), 4); + assert_eq!(&buf[0..4], &[1, 2, 3, 4]); +} diff --git a/third_party/rust/net2/.cargo-checksum.json b/third_party/rust/net2/.cargo-checksum.json index 6cd1eedcf0b6..9aafa5de95ff 100644 --- a/third_party/rust/net2/.cargo-checksum.json +++ b/third_party/rust/net2/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"4c3bd4917e81d86c9d1b9783e5bfaf690f2f87ac8a53d4518fab5c57851d74a5","Cargo.toml":"71b916e2e9121e4d1ddd49750b04ed92cf292eb94872c5576deed51fe1a540b6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"c367926adcb26dd48a679ba370b127efd37fc7a51354b3c9176f0576f2efb17b","appveyor.yml":"6e3173d907ccfa65e289c99042cb29d4a23b9d1f4ec5bf7afa6c0d65365bab54","src/ext.rs":"0b5d073592fc0720b3ca9f8821c79f3f7ca0fbb62e5ca618a5c5d6daac506568","src/lib.rs":"c8eedd4215f5a71f7faf4117c6c65b766bfecb7480013937b548d2310eda7d42","src/socket.rs":"a76f72198e33de37b7cf46e7ecf03b1f5c29a20174fd189e0cb97a60975d15a9","src/sys/unix/impls.rs":"05f123226e8fe7559317d50864021650b2455d25d01a9aff1c65c01ae26cf4ef","src/sys/unix/mod.rs":"1ac3a75714fd7a5ad11d9b97a25e2dbb6a0fa6db529f2752a0d83ff0fc212eaf","src/sys/windows/impls.rs":"5e8824f5477184a57e79809a0ca8c00db75ba230648d321aec44019cc9c1a362","src/sys/windows/mod.rs":"de6896d64217816719c8b974fd2c7ba78379edcd6e33ae33ea8abe2d19b6e68d","src/tcp.rs":"0bebf5cca75714151de30c8f2d7697ca519c57da065e93ba81796bce04673f8d","src/udp.rs":"8af5a55a4ae5e4120ffe18dcc4dc24072e18da34bf3591a02b18653e5d3e8ac8","src/unix.rs":"fe9cdbd75ef2e1fafc128d1abb5f32500eaf0b674aa618d837e06ab1d0dc6687","src/utils.rs":"d31de5333a6fa2f5c99b64cc937be596888d9863264632e6bc6b36b30197fa5b","tests/all.rs":"12cb4616c842f655ece1b477664d41821f95b3051053da641b1d85026ee18274"},"package":"3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"} \ No newline at end of file +{"files":{".travis.yml":"662ec1f762f251e9bee762b82386240948c2973d2a985cf5a6a765e7c28c9590","Cargo.toml":"b545fbbaf6d6a66d31e0c47edf28489e160c29bff3f8e1df01c43efeebc9cfe6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"07bdbd3752d286b8993d0954657485521f2a9da96cc54c7c298965a65c3d2b3e","appveyor.yml":"6e3173d907ccfa65e289c99042cb29d4a23b9d1f4ec5bf7afa6c0d65365bab54","src/ext.rs":"8b58f79463ab528b7c28734d27feb7e1bb9893a120881a567d7ed709afe0360d","src/lib.rs":"db08d8dcd2dcf0323b7f6bdd597827982ed2ccf377572a0db722de376597a7e9","src/socket.rs":"541c8f2d92074758d9c30802963870e0dc813795141a466c039061af14652dba","src/sys/unix/impls.rs":"05f123226e8fe7559317d50864021650b2455d25d01a9aff1c65c01ae26cf4ef","src/sys/unix/mod.rs":"bb28a7275eac021132b01206d134802902d616c0be3e82100b45a3ff1eba4c02","src/sys/windows/impls.rs":"bee70b7cd45055c4eaa1967f7aad7ec46639de458c71ed6f3e97a2f7b2c49281","src/sys/windows/mod.rs":"0706f1587af0f693f75e8597e4196075a3f673c89727dd865447da306974bf35","src/tcp.rs":"0bebf5cca75714151de30c8f2d7697ca519c57da065e93ba81796bce04673f8d","src/udp.rs":"8af5a55a4ae5e4120ffe18dcc4dc24072e18da34bf3591a02b18653e5d3e8ac8","src/unix.rs":"fe9cdbd75ef2e1fafc128d1abb5f32500eaf0b674aa618d837e06ab1d0dc6687","src/utils.rs":"d31de5333a6fa2f5c99b64cc937be596888d9863264632e6bc6b36b30197fa5b","tests/all.rs":"12cb4616c842f655ece1b477664d41821f95b3051053da641b1d85026ee18274"},"package":"9044faf1413a1057267be51b5afba8eb1090bd2231c693664aa1db716fe1eae0"} \ No newline at end of file diff --git a/third_party/rust/net2/.travis.yml b/third_party/rust/net2/.travis.yml index b6073dadd04a..b21f2723f8d9 100644 --- a/third_party/rust/net2/.travis.yml +++ b/third_party/rust/net2/.travis.yml @@ -1,31 +1,32 @@ language: rust -rust: - - stable - - beta - - nightly sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + +matrix: + include: + - rust: 1.21.0 + - rust: stable + - os: osx + - rust: beta + - rust: nightly + script: + - cargo test + - cargo test --feature snightly + + - rust: nightly + before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + script: + - cargo doc --no-deps --all-features + after_success: + - travis-cargo doc-upload + script: - - if [ "$TRAVIS_RUST_VERSION" = "1.1.0" ]; then - cargo test --no-default-features; - else - cargo test; - fi - - if [ "$TRAVIS_RUST_VERSION" = "nightly" ]; then - cargo test --features nightly; - cargo doc --features nightly --no-deps; - fi - - rustdoc --test README.md -L target/debug -L target/debug/deps -after_success: - - travis-cargo --only nightly doc-upload -notifications: - email: - on_success: never + - cargo test + env: global: secure: "n7C4SSdi9Q1WcPxc9BKQi/vhPhhxCfK/ljqG0l8PpNhkJ1BzFgb/0O4zA2W1/JzHxp0VB7rGwRCTSZkePvH1ycZuNtIezFc2HktElpRGwmo8X2OHp2+GLkTKozjhV0qZho+XoQnB0zgZRAdTq+MSN2EpTUKsaNvZwrTK90MprUPKU06Hvq93oEWmDh0jyKee0LlMezS6ihTgNk43zIa6bNumIWoaUM9ePnN7IvKSnoiynKjrBU52GPF5cWKih35mTXNxXW89Ht2h1NhIAHwmUpNfyOsBb2LOfvathVitfqk81R6+1qWzFyWSHdFoDAM0HHs8sySFK3P2YVcAp4tNIBw29oAtCpSGK6XeDyxmEU9VAq2H7DzEaBnkZM5A4oNnJWsValBmY+8m21OwV/XRed+eiqg5WUfnjeEoBn/5BJxMsc+kkVztS1Yos1meHZazTIaSpICxJ8fieHnzTOKD3wKgHwXSQaCAQHAErM301DRlChkXj61txDCmLVrU4qVRSMrAQQFPUBeploNaQtvCr/JI7huOhw5A6DphnGH8bbNivwATuUnbvQRJF+VGU3yOkJieJAQzArjGQ1A+qMds+DKlfFH/mPMxQcKv7bEE7cTZ3DY8ZzJfMzsh6YIa/YP0hpDZ5z4tJaeEXRyNVPEAwMGk4pCITP949WaTs97XOpM=" -os: - - linux - - osx +notifications: + email: + on_success: never diff --git a/third_party/rust/net2/Cargo.toml b/third_party/rust/net2/Cargo.toml index 3f2ae0becce7..1b0578f51964 100644 --- a/third_party/rust/net2/Cargo.toml +++ b/third_party/rust/net2/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "net2" -version = "0.2.31" +version = "0.2.32" authors = ["Alex Crichton "] description = "Extensions to the standard library's networking types as proposed in RFC 1158.\n" homepage = "https://github.com/rust-lang-nursery/net2-rs" @@ -24,24 +24,19 @@ repository = "https://github.com/rust-lang-nursery/net2-rs" version = "0.1" [features] -nightly = [] default = ["duration"] duration = [] -[target.i686-unknown-linux-gnu.dependencies.libc] -version = "0.2.16" -[target.x86_64-unknown-linux-gnu.dependencies.libc] -version = "0.2.16" -[target.i686-apple-darwin.dependencies.libc] -version = "0.2.16" -[target.x86_64-apple-darwin.dependencies.libc] -version = "0.2.16" -[target."cfg(windows)".dependencies.kernel32-sys] -version = "0.2" - -[target."cfg(windows)".dependencies.winapi] -version = "0.2" - -[target."cfg(windows)".dependencies.ws2_32-sys] -version = "0.2" +nightly = [] [target."cfg(unix)".dependencies.libc] -version = "0.2.16" +version = "0.2.37" +[target."cfg(windows)".dependencies.winapi] +version = "0.3" +features = ["handleapi", "winsock2", "ws2def", "ws2ipdef", "ws2tcpip"] +[target.i686-apple-darwin.dependencies.libc] +version = "0.2.37" +[target.i686-unknown-linux-gnu.dependencies.libc] +version = "0.2.37" +[target.x86_64-apple-darwin.dependencies.libc] +version = "0.2.37" +[target.x86_64-unknown-linux-gnu.dependencies.libc] +version = "0.2.37" diff --git a/third_party/rust/net2/README.md b/third_party/rust/net2/README.md index dffad8fc2a2f..081c0ca23df6 100644 --- a/third_party/rust/net2/README.md +++ b/third_party/rust/net2/README.md @@ -19,8 +19,17 @@ net2 = "0.2" # License -`net2-rs` is primarily distributed under the terms of both the MIT license and -the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. +This project is licensed under either of -See LICENSE-APACHE, and LICENSE-MIT for details. + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/net2/src/ext.rs b/third_party/rust/net2/src/ext.rs index 32444d45b32e..50a85cf40cc0 100644 --- a/third_party/rust/net2/src/ext.rs +++ b/third_party/rust/net2/src/ext.rs @@ -17,6 +17,7 @@ use std::net::ToSocketAddrs; use {TcpBuilder, UdpBuilder, FromInner}; use sys; +use sys::c; use socket; cfg_if! { @@ -42,8 +43,7 @@ use std::time::Duration; #[cfg(unix)] use libc::*; #[cfg(windows)] pub type Socket = SOCKET; #[cfg(windows)] use std::os::windows::prelude::*; -#[cfg(windows)] use ws2_32::*; -#[cfg(windows)] use winapi::*; +#[cfg(windows)] use sys::c::*; #[cfg(windows)] const SIO_KEEPALIVE_VALS: DWORD = 0x98000004; #[cfg(windows)] @@ -54,7 +54,7 @@ struct tcp_keepalive { keepaliveinterval: c_ulong, } -#[cfg(windows)] fn v(opt: IPPROTO) -> c_int { opt.0 as c_int } +#[cfg(windows)] fn v(opt: IPPROTO) -> c_int { opt as c_int } #[cfg(unix)] fn v(opt: c_int) -> c_int { opt } pub fn set_opt(sock: Socket, opt: c_int, val: c_int, @@ -418,6 +418,12 @@ pub trait UdpSocketExt { /// [link]: #tymethod.set_multicast_ttl_v4 fn multicast_ttl_v4(&self) -> io::Result; + /// Sets the value of the `IPV6_MULTICAST_HOPS` option for this socket + fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()>; + + /// Gets the value of the `IPV6_MULTICAST_HOPS` option for this socket + fn multicast_hops_v6(&self) -> io::Result; + /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. /// /// Controls whether this socket sees the multicast packets it sends itself. @@ -432,6 +438,27 @@ pub trait UdpSocketExt { /// [link]: #tymethod.set_multicast_loop_v6 fn multicast_loop_v6(&self) -> io::Result; + /// Sets the value of the `IP_MULTICAST_IF` option for this socket. + /// + /// Specifies the interface to use for routing multicast packets. + fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()>; + + /// Gets the value of the `IP_MULTICAST_IF` option for this socket. + /// + /// Returns the interface to use for routing multicast packets. + fn multicast_if_v4(&self) -> io::Result; + + + /// Sets the value of the `IPV6_MULTICAST_IF` option for this socket. + /// + /// Specifies the interface to use for routing multicast packets. + fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()>; + + /// Gets the value of the `IPV6_MULTICAST_IF` option for this socket. + /// + /// Returns the interface to use for routing multicast packets. + fn multicast_if_v6(&self) -> io::Result; + /// Sets the value for the `IP_TTL` option on this socket. /// /// This is the same as [`TcpStreamExt::set_ttl`][other]. @@ -447,6 +474,16 @@ pub trait UdpSocketExt { /// [link]: trait.TcpStreamExt.html#tymethod.set_ttl fn ttl(&self) -> io::Result; + /// Sets the value for the `IPV6_UNICAST_HOPS` option on this socket. + /// + /// Specifies the hop limit for ipv6 unicast packets + fn set_unicast_hops_v6(&self, ttl: u32) -> io::Result<()>; + + /// Gets the value of the `IPV6_UNICAST_HOPS` option for this socket. + /// + /// Specifies the hop limit for ipv6 unicast packets + fn unicast_hops_v6(&self) -> io::Result; + /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// For more information about this option, see @@ -604,7 +641,7 @@ impl AsSock for T { } #[cfg(windows)] impl AsSock for T { - fn as_sock(&self) -> Socket { self.as_raw_socket() } + fn as_sock(&self) -> Socket { self.as_raw_socket() as Socket } } cfg_if! { @@ -929,14 +966,27 @@ impl UdpSocketExt for UdpSocket { get_opt(self.as_sock(), IPPROTO_IP, IP_MULTICAST_LOOP) .map(int2bool) } + fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { set_opt(self.as_sock(), IPPROTO_IP, IP_MULTICAST_TTL, multicast_ttl_v4 as c_int) } + fn multicast_ttl_v4(&self) -> io::Result { get_opt::(self.as_sock(), IPPROTO_IP, IP_MULTICAST_TTL) .map(|b| b as u32) } + + fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { + set_opt(self.as_sock(), v(IPPROTO_IPV6), IPV6_MULTICAST_HOPS, + hops as c_int) + } + + fn multicast_hops_v6(&self) -> io::Result { + get_opt::(self.as_sock(), v(IPPROTO_IPV6), IPV6_MULTICAST_HOPS) + .map(|b| b as u32) + } + fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { set_opt(self.as_sock(), v(IPPROTO_IPV6), IPV6_MULTICAST_LOOP, multicast_loop_v6 as c_int) @@ -946,6 +996,22 @@ impl UdpSocketExt for UdpSocket { .map(int2bool) } + fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { + set_opt(self.as_sock(), IPPROTO_IP, IP_MULTICAST_IF, ip2in_addr(interface)) + } + + fn multicast_if_v4(&self) -> io::Result { + get_opt(self.as_sock(), IPPROTO_IP, IP_MULTICAST_IF).map(in_addr2ip) + } + + fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { + set_opt(self.as_sock(), v(IPPROTO_IPV6), IPV6_MULTICAST_IF, to_ipv6mr_interface(interface)) + } + + fn multicast_if_v6(&self) -> io::Result { + get_opt::(self.as_sock(), v(IPPROTO_IPV6), IPV6_MULTICAST_IF).map(|b| b as u32) + } + fn set_ttl(&self, ttl: u32) -> io::Result<()> { set_opt(self.as_sock(), IPPROTO_IP, IP_TTL, ttl as c_int) } @@ -955,6 +1021,15 @@ impl UdpSocketExt for UdpSocket { .map(|b| b as u32) } + fn set_unicast_hops_v6(&self, ttl: u32) -> io::Result<()> { + set_opt(self.as_sock(), v(IPPROTO_IPV6), IPV6_UNICAST_HOPS, ttl as c_int) + } + + fn unicast_hops_v6(&self) -> io::Result { + get_opt::(self.as_sock(), IPPROTO_IP, IPV6_UNICAST_HOPS) + .map(|b| b as u32) + } + fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { set_opt(self.as_sock(), v(IPPROTO_IPV6), IPV6_V6ONLY, only_v6 as c_int) } @@ -1128,14 +1203,29 @@ fn ip2in_addr(ip: &Ipv4Addr) -> in_addr { #[cfg(windows)] fn ip2in_addr(ip: &Ipv4Addr) -> in_addr { let oct = ip.octets(); - in_addr { - S_un: ::hton(((oct[0] as u32) << 24) | - ((oct[1] as u32) << 16) | - ((oct[2] as u32) << 8) | - ((oct[3] as u32) << 0)), + unsafe { + let mut S_un: in_addr_S_un = mem::zeroed(); + *S_un.S_addr_mut() = ::hton(((oct[0] as u32) << 24) | + ((oct[1] as u32) << 16) | + ((oct[2] as u32) << 8) | + ((oct[3] as u32) << 0)); + in_addr { + S_un: S_un, + } } } +fn in_addr2ip(ip: &in_addr) -> Ipv4Addr { + let h_addr = c::in_addr_to_u32(ip); + + let a: u8 = (h_addr >> 24) as u8; + let b: u8 = (h_addr >> 16) as u8; + let c: u8 = (h_addr >> 8) as u8; + let d: u8 = (h_addr >> 0) as u8; + + Ipv4Addr::new(a,b,c,d) +} + #[cfg(target_os = "android")] fn to_ipv6mr_interface(value: u32) -> c_int { value as c_int @@ -1149,7 +1239,7 @@ fn to_ipv6mr_interface(value: u32) -> c_uint { fn ip2in6_addr(ip: &Ipv6Addr) -> in6_addr { let mut ret: in6_addr = unsafe { mem::zeroed() }; let seg = ip.segments(); - ret.s6_addr = [ + let bytes = [ (seg[0] >> 8) as u8, (seg[0] >> 0) as u8, (seg[1] >> 8) as u8, @@ -1167,6 +1257,9 @@ fn ip2in6_addr(ip: &Ipv6Addr) -> in6_addr { (seg[7] >> 8) as u8, (seg[7] >> 0) as u8, ]; + #[cfg(windows)] unsafe { *ret.u.Byte_mut() = bytes; } + #[cfg(not(windows))] { ret.s6_addr = bytes; } + return ret } diff --git a/third_party/rust/net2/src/lib.rs b/third_party/rust/net2/src/lib.rs index 4cd38fa5a1fa..e96530b2b01c 100644 --- a/third_party/rust/net2/src/lib.rs +++ b/third_party/rust/net2/src/lib.rs @@ -44,9 +44,7 @@ #[cfg(unix)] extern crate libc; -#[cfg(windows)] extern crate kernel32; #[cfg(windows)] extern crate winapi; -#[cfg(windows)] extern crate ws2_32; #[macro_use] extern crate cfg_if; diff --git a/third_party/rust/net2/src/socket.rs b/third_party/rust/net2/src/socket.rs index a23713c2628d..3886eabf627a 100644 --- a/third_party/rust/net2/src/socket.rs +++ b/third_party/rust/net2/src/socket.rs @@ -15,7 +15,7 @@ use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; #[cfg(unix)] use libc::c_int; #[cfg(windows)] -use winapi::c_int; +use winapi::ctypes::c_int; use sys; use sys::c; @@ -119,7 +119,8 @@ fn raw2addr(storage: &c::sockaddr_storage, len: c::socklen_t) -> io::Result= mem::size_of::()); let sa = storage as *const _ as *const c::sockaddr_in6; - let arr = (*sa).sin6_addr.s6_addr; + #[cfg(windows)] let arr = (*sa).sin6_addr.u.Byte(); + #[cfg(not(windows))] let arr = (*sa).sin6_addr.s6_addr; let ip = Ipv6Addr::new( (arr[0] as u16) << 8 | (arr[1] as u16), @@ -132,10 +133,13 @@ fn raw2addr(storage: &c::sockaddr_storage, len: c::socklen_t) -> io::Result Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid argument")), diff --git a/third_party/rust/net2/src/sys/unix/mod.rs b/third_party/rust/net2/src/sys/unix/mod.rs index 3aa997e4f3ce..e13f97c1a348 100644 --- a/third_party/rust/net2/src/sys/unix/mod.rs +++ b/third_party/rust/net2/src/sys/unix/mod.rs @@ -25,6 +25,10 @@ pub mod c { pub fn sockaddr_in_u32(sa: &sockaddr_in) -> u32 { ::ntoh((*sa).sin_addr.s_addr) } + + pub fn in_addr_to_u32(addr: &in_addr) -> u32 { + ::ntoh(addr.s_addr) + } } pub struct Socket { diff --git a/third_party/rust/net2/src/sys/windows/impls.rs b/third_party/rust/net2/src/sys/windows/impls.rs index 59c120716f08..48e787ca20b3 100644 --- a/third_party/rust/net2/src/sys/windows/impls.rs +++ b/third_party/rust/net2/src/sys/windows/impls.rs @@ -8,37 +8,37 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::os::windows::io::{FromRawSocket, AsRawSocket}; -use winapi::SOCKET; +use std::os::windows::io::{FromRawSocket, RawSocket, AsRawSocket}; +use winapi::um::winsock2::SOCKET; use {TcpBuilder, UdpBuilder, FromInner, AsInner}; use socket::Socket; use sys; impl FromRawSocket for TcpBuilder { - unsafe fn from_raw_socket(fd: SOCKET) -> TcpBuilder { - let sock = sys::Socket::from_inner(fd); + unsafe fn from_raw_socket(fd: RawSocket) -> TcpBuilder { + let sock = sys::Socket::from_inner(fd as SOCKET); TcpBuilder::from_inner(Socket::from_inner(sock)) } } impl AsRawSocket for TcpBuilder { - fn as_raw_socket(&self) -> SOCKET { + fn as_raw_socket(&self) -> RawSocket { // TODO: this unwrap() is very bad - self.as_inner().borrow().as_ref().unwrap().as_inner().raw() + self.as_inner().borrow().as_ref().unwrap().as_inner().raw() as RawSocket } } impl FromRawSocket for UdpBuilder { - unsafe fn from_raw_socket(fd: SOCKET) -> UdpBuilder { - let sock = sys::Socket::from_inner(fd); + unsafe fn from_raw_socket(fd: RawSocket) -> UdpBuilder { + let sock = sys::Socket::from_inner(fd as SOCKET); UdpBuilder::from_inner(Socket::from_inner(sock)) } } impl AsRawSocket for UdpBuilder { - fn as_raw_socket(&self) -> SOCKET { + fn as_raw_socket(&self) -> RawSocket { // TODO: this unwrap() is very bad - self.as_inner().borrow().as_ref().unwrap().as_inner().raw() + self.as_inner().borrow().as_ref().unwrap().as_inner().raw() as RawSocket } } diff --git a/third_party/rust/net2/src/sys/windows/mod.rs b/third_party/rust/net2/src/sys/windows/mod.rs index 6399466a3872..b2da96d2f91b 100644 --- a/third_party/rust/net2/src/sys/windows/mod.rs +++ b/third_party/rust/net2/src/sys/windows/mod.rs @@ -13,29 +13,43 @@ use std::io; use std::mem; use std::net::{TcpListener, TcpStream, UdpSocket}; -use std::os::windows::io::FromRawSocket; +use std::os::windows::io::{RawSocket, FromRawSocket}; use std::sync::{Once, ONCE_INIT}; -use winapi::*; -use ws2_32::*; -use kernel32::*; - -const WSA_FLAG_OVERLAPPED: DWORD = 0x01; const HANDLE_FLAG_INHERIT: DWORD = 0x00000001; pub mod c { - pub use winapi::*; - pub use ws2_32::*; - - pub use winapi::SOCKADDR as sockaddr; - pub use winapi::SOCKADDR_STORAGE as sockaddr_storage; - pub use winapi::SOCKADDR_IN as sockaddr_in; + pub use winapi::ctypes::*; + pub use winapi::um::handleapi::*; + pub use winapi::um::winbase::*; + pub use winapi::um::winsock2::*; + pub use winapi::um::ws2tcpip::*; + + pub use winapi::shared::inaddr::*; + pub use winapi::shared::in6addr::*; + pub use winapi::shared::minwindef::*; + pub use winapi::shared::ntdef::*; + pub use winapi::shared::ws2def::*; + pub use winapi::shared::ws2def::{SOCK_STREAM, SOCK_DGRAM}; + pub use winapi::shared::ws2def::SOCKADDR as sockaddr; + pub use winapi::shared::ws2def::SOCKADDR_STORAGE as sockaddr_storage; + pub use winapi::shared::ws2def::SOCKADDR_IN as sockaddr_in; + pub use winapi::shared::ws2ipdef::*; + pub use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH as sockaddr_in6; + pub use winapi::shared::ws2ipdef::IP_MREQ as ip_mreq; + pub use winapi::shared::ws2ipdef::IPV6_MREQ as ipv6_mreq; pub fn sockaddr_in_u32(sa: &sockaddr_in) -> u32 { - ::ntoh(sa.sin_addr.S_un) + ::ntoh(unsafe { *sa.sin_addr.S_un.S_addr() }) + } + + pub fn in_addr_to_u32(addr: &in_addr) -> u32 { + ::ntoh(unsafe { *addr.S_un.S_addr() }) } } +use self::c::*; + mod impls; fn init() { @@ -76,15 +90,15 @@ impl Socket { } pub fn into_tcp_listener(self) -> TcpListener { - unsafe { TcpListener::from_raw_socket(self.into_socket()) } + unsafe { TcpListener::from_raw_socket(self.into_socket() as RawSocket) } } pub fn into_tcp_stream(self) -> TcpStream { - unsafe { TcpStream::from_raw_socket(self.into_socket()) } + unsafe { TcpStream::from_raw_socket(self.into_socket() as RawSocket) } } pub fn into_udp_socket(self) -> UdpSocket { - unsafe { UdpSocket::from_raw_socket(self.into_socket()) } + unsafe { UdpSocket::from_raw_socket(self.into_socket() as RawSocket) } } fn set_no_inherit(&self) -> io::Result<()> { diff --git a/third_party/rust/rand-0.3.22/.cargo-checksum.json b/third_party/rust/rand-0.3.22/.cargo-checksum.json new file mode 100644 index 000000000000..ee98b9a871a2 --- /dev/null +++ b/third_party/rust/rand-0.3.22/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"d6038b33878806f8a5aea752262338d5bcb4d7cc17cc7fdd6dd1fd7aa1884cf8","Cargo.toml":"8728561c8ff137b00ca07c2d2040b81bb5bc6360a17d1a7d371420ccb2fc2fe7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"73faa8e1032381321d2f8746976d3780d26e122d43f755080d9a67acdaa4ada2","appveyor.yml":"6bf2f0c8f17d2dc4957dd39aba9a88ced3130200cf847a76d47b6c8fdcc2cbd8","src/distributions/mod.rs":"7d75162ba315a9c5ccb6277bc949e5ffb1c9522cd380a0c31078b573ac885137","src/lib.rs":"15f68f5b31fc79db77ad1931ac85df15828382ad611b1684e4c1b56f2af093c9","src/rand_impls.rs":"958d0056a2c3804b187fc8e1d825a8335361fba2b129c165bd88709d5cdf595d"},"package":"15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"} \ No newline at end of file diff --git a/third_party/rust/rand-0.3.22/.travis.yml b/third_party/rust/rand-0.3.22/.travis.yml new file mode 100644 index 000000000000..f4be3790ab32 --- /dev/null +++ b/third_party/rust/rand-0.3.22/.travis.yml @@ -0,0 +1,31 @@ +language: rust +sudo: false + +matrix: + include: + - rust: 1.15.0 + - rust: stable + - rust: stable + os: osx + - rust: beta + - rust: nightly + before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + script: + - cargo doc --no-deps --all-features + - cargo bench + - cargo test --features nightly + after_success: + - travis-cargo --only nightly doc-upload + +script: + - cargo test + - cargo test --manifest-path rand-derive/Cargo.toml + +env: + global: + secure: "BdDntVHSompN+Qxz5Rz45VI4ZqhD72r6aPl166FADlnkIwS6N6FLWdqs51O7G5CpoMXEDvyYrjmRMZe/GYLIG9cmqmn/wUrWPO+PauGiIuG/D2dmfuUNvSTRcIe7UQLXrfP3yyfZPgqsH6pSnNEVopquQKy3KjzqepgriOJtbyY=" + +notifications: + email: + on_success: never diff --git a/third_party/rust/rand-0.3.22/Cargo.toml b/third_party/rust/rand-0.3.22/Cargo.toml new file mode 100644 index 000000000000..31b6f526031a --- /dev/null +++ b/third_party/rust/rand-0.3.22/Cargo.toml @@ -0,0 +1,38 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "rand" +version = "0.3.22" +authors = ["The Rust Project Developers"] +description = "Random number generators and other randomness functionality.\n" +homepage = "https://github.com/rust-lang-nursery/rand" +documentation = "https://docs.rs/rand" +readme = "README.md" +keywords = ["random", "rng"] +categories = ["algorithms"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang-nursery/rand" + +[lib] +doctest = false +[dependencies.libc] +version = "0.2" + +[dependencies.rand] +version = "0.4" + +[features] +i128_support = [] +nightly = ["i128_support"] +[target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon] +version = "0.3.2" diff --git a/third_party/rust/rand-0.3.22/LICENSE-APACHE b/third_party/rust/rand-0.3.22/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/rand-0.3.22/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/bitflags-0.7.0/LICENSE-MIT b/third_party/rust/rand-0.3.22/LICENSE-MIT similarity index 100% rename from third_party/rust/bitflags-0.7.0/LICENSE-MIT rename to third_party/rust/rand-0.3.22/LICENSE-MIT diff --git a/third_party/rust/rand-0.3.22/README.md b/third_party/rust/rand-0.3.22/README.md new file mode 100644 index 000000000000..1dec3f1cceaf --- /dev/null +++ b/third_party/rust/rand-0.3.22/README.md @@ -0,0 +1,92 @@ +rand +==== + +A Rust library for random number generators and other randomness functionality. + +[![Build Status](https://travis-ci.org/rust-lang-nursery/rand.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/rand) +[![Build status](https://ci.appveyor.com/api/projects/status/rm5c9o33k3jhchbw?svg=true)](https://ci.appveyor.com/project/alexcrichton/rand) + +[Documentation](https://docs.rs/rand) + +## Compatibility upgrade + +Version 0.3 has been replaced by a compatibility wrapper around `rand` 0.4. It +is recommended to update to 0.4. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +rand = "0.4" +``` + +and this to your crate root: + +```rust +extern crate rand; +``` + +## Examples + +There is built-in support for a random number generator (RNG) associated with each thread stored in thread-local storage. This RNG can be accessed via thread_rng, or used implicitly via random. This RNG is normally randomly seeded from an operating-system source of randomness, e.g. /dev/urandom on Unix systems, and will automatically reseed itself from this source after generating 32 KiB of random data. + +```rust +let tuple = rand::random::<(f64, char)>(); +println!("{:?}", tuple) +``` + +```rust +use rand::Rng; + +let mut rng = rand::thread_rng(); +if rng.gen() { // random bool + println!("i32: {}, u32: {}", rng.gen::(), rng.gen::()) +} +``` + +It is also possible to use other RNG types, which have a similar interface. The following uses the "ChaCha" algorithm instead of the default. + +```rust +use rand::{Rng, ChaChaRng}; + +let mut rng = rand::ChaChaRng::new_unseeded(); +println!("i32: {}, u32: {}", rng.gen::(), rng.gen::()) +``` + +# `derive(Rand)` + +You can derive the `Rand` trait for your custom type via the `#[derive(Rand)]` +directive. To use this first add this to your Cargo.toml: + +```toml +rand = "0.4" +rand_derive = "0.3" +``` + +Next in your crate: + +```rust +extern crate rand; +#[macro_use] +extern crate rand_derive; + +#[derive(Rand, Debug)] +struct MyStruct { + a: i32, + b: u32, +} + +fn main() { + println!("{:?}", rand::random::()); +} +``` + + +# License + +`rand` is primarily distributed under the terms of both the MIT +license and the Apache License (Version 2.0). + +See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/third_party/rust/rand-0.3.22/appveyor.yml b/third_party/rust/rand-0.3.22/appveyor.yml new file mode 100644 index 000000000000..39c6a180be82 --- /dev/null +++ b/third_party/rust/rand-0.3.22/appveyor.yml @@ -0,0 +1,37 @@ +environment: + + # At the time this was added AppVeyor was having troubles with checking + # revocation of SSL certificates of sites like static.rust-lang.org and what + # we think is crates.io. The libcurl HTTP client by default checks for + # revocation on Windows and according to a mailing list [1] this can be + # disabled. + # + # The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL + # revocation checking on Windows in libcurl. Note, though, that rustup, which + # we're using to download Rust here, also uses libcurl as the default backend. + # Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation + # checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to + # use the Hyper instead of libcurl backend. Both Hyper and libcurl use + # schannel on Windows but it appears that Hyper configures it slightly + # differently such that revocation checking isn't turned on by default. + # + # [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html + RUSTUP_USE_HYPER: 1 + CARGO_HTTP_CHECK_REVOKE: false + + matrix: + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc +install: + - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host %TARGET% --default-toolchain nightly + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test + - cargo test --features nightly + - cargo test --manifest-path rand-derive/Cargo.toml diff --git a/third_party/rust/rand-0.3.22/src/distributions/mod.rs b/third_party/rust/rand-0.3.22/src/distributions/mod.rs new file mode 100644 index 000000000000..bdb92c4cc350 --- /dev/null +++ b/third_party/rust/rand-0.3.22/src/distributions/mod.rs @@ -0,0 +1,159 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Sampling from random distributions. +//! +//! This is a generalization of `Rand` to allow parameters to control the +//! exact properties of the generated values, e.g. the mean and standard +//! deviation of a normal distribution. The `Sample` trait is the most +//! general, and allows for generating values that change some state +//! internally. The `IndependentSample` trait is for generating values +//! that do not need to record state. + +pub use rand4::distributions::Range; +pub use rand4::distributions::{Gamma, ChiSquared, FisherF, StudentT}; +pub use rand4::distributions::{Normal, LogNormal}; +pub use rand4::distributions::Exp; + +pub use rand4::distributions::{range, gamma, normal, exponential}; + +pub use rand4::distributions::{Sample, IndependentSample, RandSample}; +pub use rand4::distributions::{Weighted, WeightedChoice}; + +#[cfg(test)] +mod tests { + + use {Rng, Rand}; + use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample}; + + #[derive(PartialEq, Debug)] + struct ConstRand(usize); + impl Rand for ConstRand { + fn rand(_: &mut R) -> ConstRand { + ConstRand(0) + } + } + + // 0, 1, 2, 3, ... + struct CountingRng { i: u32 } + impl Rng for CountingRng { + fn next_u32(&mut self) -> u32 { + self.i += 1; + self.i - 1 + } + fn next_u64(&mut self) -> u64 { + self.next_u32() as u64 + } + } + + #[test] + fn test_rand_sample() { + let mut rand_sample = RandSample::::new(); + + assert_eq!(rand_sample.sample(&mut ::test::rng()), ConstRand(0)); + assert_eq!(rand_sample.ind_sample(&mut ::test::rng()), ConstRand(0)); + } + #[test] + fn test_weighted_choice() { + // this makes assumptions about the internal implementation of + // WeightedChoice, specifically: it doesn't reorder the items, + // it doesn't do weird things to the RNG (so 0 maps to 0, 1 to + // 1, internally; modulo a modulo operation). + + macro_rules! t { + ($items:expr, $expected:expr) => {{ + let mut items = $items; + let wc = WeightedChoice::new(&mut items); + let expected = $expected; + + let mut rng = CountingRng { i: 0 }; + + for &val in expected.iter() { + assert_eq!(wc.ind_sample(&mut rng), val) + } + }} + } + + t!(vec!(Weighted { weight: 1, item: 10}), [10]); + + // skip some + t!(vec!(Weighted { weight: 0, item: 20}, + Weighted { weight: 2, item: 21}, + Weighted { weight: 0, item: 22}, + Weighted { weight: 1, item: 23}), + [21,21, 23]); + + // different weights + t!(vec!(Weighted { weight: 4, item: 30}, + Weighted { weight: 3, item: 31}), + [30,30,30,30, 31,31,31]); + + // check that we're binary searching + // correctly with some vectors of odd + // length. + t!(vec!(Weighted { weight: 1, item: 40}, + Weighted { weight: 1, item: 41}, + Weighted { weight: 1, item: 42}, + Weighted { weight: 1, item: 43}, + Weighted { weight: 1, item: 44}), + [40, 41, 42, 43, 44]); + t!(vec!(Weighted { weight: 1, item: 50}, + Weighted { weight: 1, item: 51}, + Weighted { weight: 1, item: 52}, + Weighted { weight: 1, item: 53}, + Weighted { weight: 1, item: 54}, + Weighted { weight: 1, item: 55}, + Weighted { weight: 1, item: 56}), + [50, 51, 52, 53, 54, 55, 56]); + } + + #[test] + fn test_weighted_clone_initialization() { + let initial : Weighted = Weighted {weight: 1, item: 1}; + let clone = initial.clone(); + assert_eq!(initial.weight, clone.weight); + assert_eq!(initial.item, clone.item); + } + + #[test] #[should_panic] + fn test_weighted_clone_change_weight() { + let initial : Weighted = Weighted {weight: 1, item: 1}; + let mut clone = initial.clone(); + clone.weight = 5; + assert_eq!(initial.weight, clone.weight); + } + + #[test] #[should_panic] + fn test_weighted_clone_change_item() { + let initial : Weighted = Weighted {weight: 1, item: 1}; + let mut clone = initial.clone(); + clone.item = 5; + assert_eq!(initial.item, clone.item); + + } + + #[test] #[should_panic] + fn test_weighted_choice_no_items() { + WeightedChoice::::new(&mut []); + } + #[test] #[should_panic] + fn test_weighted_choice_zero_weight() { + WeightedChoice::new(&mut [Weighted { weight: 0, item: 0}, + Weighted { weight: 0, item: 1}]); + } + #[test] #[should_panic] + fn test_weighted_choice_weight_overflows() { + let x = ::std::u32::MAX / 2; // x + x + 2 is the overflow + WeightedChoice::new(&mut [Weighted { weight: x, item: 0 }, + Weighted { weight: 1, item: 1 }, + Weighted { weight: x, item: 2 }, + Weighted { weight: 1, item: 3 }]); + } +} diff --git a/third_party/rust/rand-0.3.22/src/lib.rs b/third_party/rust/rand-0.3.22/src/lib.rs new file mode 100644 index 000000000000..f4657410e62c --- /dev/null +++ b/third_party/rust/rand-0.3.22/src/lib.rs @@ -0,0 +1,297 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Utilities for random number generation +//! +//! This release is a compatibility wrapper around `rand` version 0.4. Please +//! upgrade. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/rand/0.3")] + +#![deny(missing_debug_implementations)] + +#![cfg_attr(feature = "i128_support", feature(i128_type))] + +extern crate rand as rand4; + +pub use rand4::OsRng; + +pub use rand4::{IsaacRng, Isaac64Rng}; +pub use rand4::ChaChaRng; + +pub mod distributions; +pub use rand4::{isaac, chacha, reseeding, os, read}; +mod rand_impls; + +pub use rand4::Rng; +pub use rand4::Rand; +pub use rand4::SeedableRng; + +pub use rand4::{Generator, AsciiGenerator}; +pub use rand4::XorShiftRng; +pub use rand4::{Open01, Closed01}; +pub use rand4::StdRng; +pub use rand4::{weak_rng, ThreadRng, thread_rng, random}; + +#[allow(deprecated)] +pub use rand4::sample; + +#[allow(deprecated)] +#[cfg(test)] +mod test { + use super::{Rng, thread_rng, random, SeedableRng, StdRng, sample, + weak_rng}; + use std::iter::repeat; + + pub struct MyRng { inner: R } + + impl Rng for MyRng { + fn next_u32(&mut self) -> u32 { + fn next(t: &mut T) -> u32 { + t.next_u32() + } + next(&mut self.inner) + } + } + + pub fn rng() -> MyRng<::ThreadRng> { + MyRng { inner: ::thread_rng() } + } + + struct ConstRng { i: u64 } + impl Rng for ConstRng { + fn next_u32(&mut self) -> u32 { self.i as u32 } + fn next_u64(&mut self) -> u64 { self.i } + + // no fill_bytes on purpose + } + + pub fn iter_eq(i: I, j: J) -> bool + where I: IntoIterator, + J: IntoIterator, + I::Item: Eq + { + // make sure the iterators have equal length + let mut i = i.into_iter(); + let mut j = j.into_iter(); + loop { + match (i.next(), j.next()) { + (Some(ref ei), Some(ref ej)) if ei == ej => { } + (None, None) => return true, + _ => return false, + } + } + } + + #[test] + fn test_fill_bytes_default() { + let mut r = ConstRng { i: 0x11_22_33_44_55_66_77_88 }; + + // check every remainder mod 8, both in small and big vectors. + let lengths = [0, 1, 2, 3, 4, 5, 6, 7, + 80, 81, 82, 83, 84, 85, 86, 87]; + for &n in lengths.iter() { + let mut v = repeat(0u8).take(n).collect::>(); + r.fill_bytes(&mut v); + + // use this to get nicer error messages. + for (i, &byte) in v.iter().enumerate() { + if byte == 0 { + panic!("byte {} of {} is zero", i, n) + } + } + } + } + + #[test] + fn test_gen_range() { + let mut r = thread_rng(); + for _ in 0..1000 { + let a = r.gen_range(-3, 42); + assert!(a >= -3 && a < 42); + assert_eq!(r.gen_range(0, 1), 0); + assert_eq!(r.gen_range(-12, -11), -12); + } + + for _ in 0..1000 { + let a = r.gen_range(10, 42); + assert!(a >= 10 && a < 42); + assert_eq!(r.gen_range(0, 1), 0); + assert_eq!(r.gen_range(3_000_000, 3_000_001), 3_000_000); + } + + } + + #[test] + #[should_panic] + fn test_gen_range_panic_int() { + let mut r = thread_rng(); + r.gen_range(5, -2); + } + + #[test] + #[should_panic] + fn test_gen_range_panic_usize() { + let mut r = thread_rng(); + r.gen_range(5, 2); + } + + #[test] + fn test_gen_weighted_bool() { + let mut r = thread_rng(); + assert_eq!(r.gen_weighted_bool(0), true); + assert_eq!(r.gen_weighted_bool(1), true); + } + + #[test] + fn test_gen_ascii_str() { + let mut r = thread_rng(); + assert_eq!(r.gen_ascii_chars().take(0).count(), 0); + assert_eq!(r.gen_ascii_chars().take(10).count(), 10); + assert_eq!(r.gen_ascii_chars().take(16).count(), 16); + } + + #[test] + fn test_gen_vec() { + let mut r = thread_rng(); + assert_eq!(r.gen_iter::().take(0).count(), 0); + assert_eq!(r.gen_iter::().take(10).count(), 10); + assert_eq!(r.gen_iter::().take(16).count(), 16); + } + + #[test] + fn test_choose() { + let mut r = thread_rng(); + assert_eq!(r.choose(&[1, 1, 1]).map(|&x|x), Some(1)); + + let v: &[isize] = &[]; + assert_eq!(r.choose(v), None); + } + + #[test] + fn test_shuffle() { + let mut r = thread_rng(); + let empty: &mut [isize] = &mut []; + r.shuffle(empty); + let mut one = [1]; + r.shuffle(&mut one); + let b: &[_] = &[1]; + assert_eq!(one, b); + + let mut two = [1, 2]; + r.shuffle(&mut two); + assert!(two == [1, 2] || two == [2, 1]); + + let mut x = [1, 1, 1]; + r.shuffle(&mut x); + let b: &[_] = &[1, 1, 1]; + assert_eq!(x, b); + } + + #[test] + fn test_thread_rng() { + let mut r = thread_rng(); + r.gen::(); + let mut v = [1, 1, 1]; + r.shuffle(&mut v); + let b: &[_] = &[1, 1, 1]; + assert_eq!(v, b); + assert_eq!(r.gen_range(0, 1), 0); + } + + #[test] + fn test_rng_trait_object() { + let mut rng = thread_rng(); + { + let mut r = &mut rng as &mut Rng; + r.next_u32(); + (&mut r).gen::(); + let mut v = [1, 1, 1]; + (&mut r).shuffle(&mut v); + let b: &[_] = &[1, 1, 1]; + assert_eq!(v, b); + assert_eq!((&mut r).gen_range(0, 1), 0); + } + { + let mut r = Box::new(rng) as Box; + r.next_u32(); + r.gen::(); + let mut v = [1, 1, 1]; + r.shuffle(&mut v); + let b: &[_] = &[1, 1, 1]; + assert_eq!(v, b); + assert_eq!(r.gen_range(0, 1), 0); + } + } + + #[test] + fn test_random() { + // not sure how to test this aside from just getting some values + let _n : usize = random(); + let _f : f32 = random(); + let _o : Option> = random(); + let _many : ((), + (usize, + isize, + Option<(u32, (bool,))>), + (u8, i8, u16, i16, u32, i32, u64, i64), + (f32, (f64, (f64,)))) = random(); + } + + #[test] + fn test_sample() { + let min_val = 1; + let max_val = 100; + + let mut r = thread_rng(); + let vals = (min_val..max_val).collect::>(); + let small_sample = sample(&mut r, vals.iter(), 5); + let large_sample = sample(&mut r, vals.iter(), vals.len() + 5); + + assert_eq!(small_sample.len(), 5); + assert_eq!(large_sample.len(), vals.len()); + + assert!(small_sample.iter().all(|e| { + **e >= min_val && **e <= max_val + })); + } + + #[test] + fn test_std_rng_seeded() { + let s = thread_rng().gen_iter::().take(256).collect::>(); + let mut ra: StdRng = SeedableRng::from_seed(&s[..]); + let mut rb: StdRng = SeedableRng::from_seed(&s[..]); + assert!(iter_eq(ra.gen_ascii_chars().take(100), + rb.gen_ascii_chars().take(100))); + } + + #[test] + fn test_std_rng_reseed() { + let s = thread_rng().gen_iter::().take(256).collect::>(); + let mut r: StdRng = SeedableRng::from_seed(&s[..]); + let string1 = r.gen_ascii_chars().take(100).collect::(); + + r.reseed(&s); + + let string2 = r.gen_ascii_chars().take(100).collect::(); + assert_eq!(string1, string2); + } + + #[test] + fn test_weak_rng() { + let s = weak_rng().gen_iter::().take(256).collect::>(); + let mut ra: StdRng = SeedableRng::from_seed(&s[..]); + let mut rb: StdRng = SeedableRng::from_seed(&s[..]); + assert!(iter_eq(ra.gen_ascii_chars().take(100), + rb.gen_ascii_chars().take(100))); + } +} diff --git a/third_party/rust/rand-0.3.22/src/rand_impls.rs b/third_party/rust/rand-0.3.22/src/rand_impls.rs new file mode 100644 index 000000000000..ccffaf6d92f3 --- /dev/null +++ b/third_party/rust/rand-0.3.22/src/rand_impls.rs @@ -0,0 +1,63 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The implementations of `Rand` for the built-in types. + +#[cfg(test)] +mod tests { + use {Rng, thread_rng, Open01, Closed01}; + + struct ConstantRng(u64); + impl Rng for ConstantRng { + fn next_u32(&mut self) -> u32 { + let ConstantRng(v) = *self; + v as u32 + } + fn next_u64(&mut self) -> u64 { + let ConstantRng(v) = *self; + v + } + } + + #[test] + fn floating_point_edge_cases() { + // the test for exact equality is correct here. + assert!(ConstantRng(0xffff_ffff).gen::() != 1.0); + assert!(ConstantRng(0xffff_ffff_ffff_ffff).gen::() != 1.0); + } + + #[test] + fn rand_open() { + // this is unlikely to catch an incorrect implementation that + // generates exactly 0 or 1, but it keeps it sane. + let mut rng = thread_rng(); + for _ in 0..1_000 { + // strict inequalities + let Open01(f) = rng.gen::>(); + assert!(0.0 < f && f < 1.0); + + let Open01(f) = rng.gen::>(); + assert!(0.0 < f && f < 1.0); + } + } + + #[test] + fn rand_closed() { + let mut rng = thread_rng(); + for _ in 0..1_000 { + // strict inequalities + let Closed01(f) = rng.gen::>(); + assert!(0.0 <= f && f <= 1.0); + + let Closed01(f) = rng.gen::>(); + assert!(0.0 <= f && f <= 1.0); + } + } +} diff --git a/third_party/rust/rand/.cargo-checksum.json b/third_party/rust/rand/.cargo-checksum.json index 6107bd43914d..026a745d0fa7 100644 --- a/third_party/rust/rand/.cargo-checksum.json +++ b/third_party/rust/rand/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"c5edd03cb5679918a6d85f64c0a634ed83022ff85ea78f440f39fd281bd29c02","Cargo.toml":"3dbce5caad1d0265badcfe385d578ac29d4e9b755f1909b1c2b67dc498d39d71","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"51831128477b9c9db0ec632ed6b6164f4e70e2d5f21eb5b3a391ecb9ab35e727","appveyor.yml":"6bf2f0c8f17d2dc4957dd39aba9a88ced3130200cf847a76d47b6c8fdcc2cbd8","benches/bench.rs":"2d3481c524841c532b9b9705073b223fd4b308c86ed7c9188b7fcd8e595ad459","benches/distributions/exponential.rs":"99cb59c013a0b6bb390d34c5649b341fc3b88ea7df0caf2470bdda8798f9fe3d","benches/distributions/gamma.rs":"3533f311e4b55d743c5b01a7eb6529c94fd97726ef6702a6372f914f5f33666b","benches/distributions/mod.rs":"0028f1cb96f61152ed5b49a4fe91227d809ef6d19035592c36032a538af7f95e","benches/distributions/normal.rs":"4e10c18cb583ccb96301ea953c8e0aa9ee3b6662060271d1b8d19ca23364dc6b","src/chacha.rs":"529c20ca1eff845da4cdca9ac995bcb8e698e48a61fbae91f09e3b4600ac57c3","src/distributions/exponential.rs":"103c8412c8a581b71835f1c00e40f6370e7702adf9d499243933a793d132d4e7","src/distributions/gamma.rs":"7a3f85c8daad4e56e334586ddb9fc9d83df3b0699738ed681a6c41e4ed455be9","src/distributions/mod.rs":"c3188ec234261ceba8b0231450f1bcfa486bdb7ad6a5aa9e1880aca4f4e02c74","src/distributions/normal.rs":"1562b43f80e4d5f83a8deb5af18de5a18dfeeeeda11fefc577da26672b14c949","src/distributions/range.rs":"c0ac6858d6a3979de7996feca22d190fde0bfb6f758d43030efa04a1a0fdcc17","src/distributions/ziggurat_tables.rs":"4eacf94fc352c91c455a6623de6a721e53842e1690f13a5662b6a79c7fbb73de","src/isaac.rs":"1725114b2d63c6fe4c0f4f7e0c36fc993a47f0322350d13abc631b0806bb71ed","src/lib.rs":"60ecdc3088993488df0a1914bd090fe00e4ea13272d1f3d51fd887e29e4cda3e","src/os.rs":"a27abd65bc29296e447961977c0ce5b44469d6231990783751a84dba36fe1044","src/rand_impls.rs":"cf411028341f67fd196ccde6200eea563c993f59d360a030b3d7d3ee15447a7d","src/read.rs":"bd0eb508a6b659dc578d546fc2f231484aed80c73cfe8c475e0d65c8d699a769","src/reseeding.rs":"73b2539b86b4cb8068e54716c7fd53e0d70b6c0de787a0749431b17019c9d826"},"package":"6475140dfd8655aeb72e1fd4b7a1cc1c202be65d71669476e392fe62532b9edd"} \ No newline at end of file +{"files":{".travis.yml":"12868a81c3590d8f2c08d53ca51e6b4b9be39d854f81bd4a57f82466cbe8d79a","CHANGELOG.md":"460c7dbfdf227b4c725f7da8131fd51a54d0355b3c5804c0d388c70d3c1c49ec","Cargo.toml":"7d29da51fe4bf73964b3b3dea0af88040514569e2d184c9e8eb2f1746d540fb7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"fb8071c3bc1013107b16ebcb303f31ef614e81440f2d58a46bfb9ff1e311b792","appveyor.yml":"8796156caf7041ef2a43f7a313df21ea639de3f2563b6181bba1096b1c489f1b","benches/bench.rs":"35c4ab609f2a5f5aab6c52c257415258dc0780621b492b5a82bb12d048cab6db","benches/distributions/exponential.rs":"99cb59c013a0b6bb390d34c5649b341fc3b88ea7df0caf2470bdda8798f9fe3d","benches/distributions/gamma.rs":"3533f311e4b55d743c5b01a7eb6529c94fd97726ef6702a6372f914f5f33666b","benches/distributions/mod.rs":"0028f1cb96f61152ed5b49a4fe91227d809ef6d19035592c36032a538af7f95e","benches/distributions/normal.rs":"4e10c18cb583ccb96301ea953c8e0aa9ee3b6662060271d1b8d19ca23364dc6b","benches/generators.rs":"aaa2f1dbfb399df8323d8a5796b92add6210cd5f0f1d916895ffdd81d60f812b","benches/misc.rs":"bd2f7c5a16f0fcb59022d5aeef66ed3c94e89ebf6c06667851dd23d0b1595504","src/distributions/exponential.rs":"103c8412c8a581b71835f1c00e40f6370e7702adf9d499243933a793d132d4e7","src/distributions/gamma.rs":"7a3f85c8daad4e56e334586ddb9fc9d83df3b0699738ed681a6c41e4ed455be9","src/distributions/mod.rs":"7943c4f83721bac816f831cca3b1574b6136932f7b4927aa6101130080ba62c5","src/distributions/normal.rs":"1562b43f80e4d5f83a8deb5af18de5a18dfeeeeda11fefc577da26672b14c949","src/distributions/range.rs":"a72a538d3ec4ed23f8d632aa55fd4793c464f24a5872d04ce8095ddd5db92115","src/distributions/ziggurat_tables.rs":"4eacf94fc352c91c455a6623de6a721e53842e1690f13a5662b6a79c7fbb73de","src/jitter.rs":"befd4b84bf753c107370b5b9498ad49611c220bdae2e4be9ee4398e9fa497042","src/lib.rs":"fbdc5f56ce1a52b15c85b0aa70a555c64be8f65d9f6f90fa0a3555d7862666b4","src/os.rs":"4860f165f68b7c978b0488c75d264cd9aaf54e7e4484036736ee5c4f5b6bd78d","src/prng/chacha.rs":"558007276f9c22933d39e5b8e853f4dd9533e823ed66df8dc1f23ad6925b1d51","src/prng/isaac.rs":"a8a2ee8b38d312663308e3bdf03376e342fd91330655f39144e5bba7392b2a8e","src/prng/isaac64.rs":"f28f7596ccab910db265b42671116abb9d2039fa8a421cbc75312bd0e7715d3a","src/prng/mod.rs":"c1a73450f49e819a20942a5b591f84a08ebb5ac33aa0f65b18ac1dc9a19a3084","src/prng/xorshift.rs":"606c308747293652c868b46dc3cad847d0c3717629c04ba75681c887c7634114","src/rand_impls.rs":"e1f27077fc13d5855bb66235f8ccfb216e116337eb38424d9c30c090e112215c","src/read.rs":"bd0eb508a6b659dc578d546fc2f231484aed80c73cfe8c475e0d65c8d699a769","src/reseeding.rs":"a97b86387b87ea1adc5262ddea480fe735c9c2a86762abaace29119022ac9f6e","src/seq.rs":"76dd58af0f580aed2721c393a5c036322186dc7cb3b4abed33436620c7c49288","utils/ziggurat_tables.py":"a9fc0a2fdae9b5c798c238788f94b720c156e13fd96f2356c409aa533191eb94"},"package":"8356f47b32624fef5b3301c1be97e5944ecdd595409cc5da11d05f211db6cfbd"} \ No newline at end of file diff --git a/third_party/rust/rand/.travis.yml b/third_party/rust/rand/.travis.yml index 1cb2e68cbb88..f3d768871bfb 100644 --- a/third_party/rust/rand/.travis.yml +++ b/third_party/rust/rand/.travis.yml @@ -1,7 +1,5 @@ language: rust sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH matrix: include: @@ -11,16 +9,21 @@ matrix: os: osx - rust: beta - rust: nightly + + - rust: nightly + before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH script: - - cargo test + - cargo doc --no-deps --all-features + - cargo test --benches - cargo test --features nightly - - cargo test --manifest-path rand-derive/Cargo.toml - - cargo doc --no-deps --features nightly + after_success: + - travis-cargo --only nightly doc-upload + script: - cargo test - cargo test --manifest-path rand-derive/Cargo.toml -after_success: - - travis-cargo --only nightly doc-upload + env: global: secure: "BdDntVHSompN+Qxz5Rz45VI4ZqhD72r6aPl166FADlnkIwS6N6FLWdqs51O7G5CpoMXEDvyYrjmRMZe/GYLIG9cmqmn/wUrWPO+PauGiIuG/D2dmfuUNvSTRcIe7UQLXrfP3yyfZPgqsH6pSnNEVopquQKy3KjzqepgriOJtbyY=" diff --git a/third_party/rust/rand/CHANGELOG.md b/third_party/rust/rand/CHANGELOG.md new file mode 100644 index 000000000000..1811b458fb7f --- /dev/null +++ b/third_party/rust/rand/CHANGELOG.md @@ -0,0 +1,269 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [0.4.3] - 2018-08-16 +### Fixed +- Use correct syscall number for PowerPC (#589) + +## [0.4.2] - 2018-01-05 +### Changed +- Use winapi on Windows +- Update for Fuchsia OS +- Remove dev-dependency on `log` + +## [0.4.1] - 2017-12-17 +### Added +- `no_std` support + +## [0.4.0-pre.0] - 2017-12-11 +### Added +- `JitterRng` added as a high-quality alternative entropy source using the + system timer +- new `seq` module with `sample_iter`, `sample_slice`, etc. +- WASM support via dummy implementations (fail at run-time) +- Additional benchmarks, covering generators and new seq code + +### Changed +- `thread_rng` uses `JitterRng` if seeding from system time fails + (slower but more secure than previous method) + +### Deprecated + - `sample` function deprecated (replaced by `sample_iter`) + +## [0.3.18] - 2017-11-06 +### Changed +- `thread_rng` is seeded from the system time if `OsRng` fails +- `weak_rng` now uses `thread_rng` internally + + +## [0.3.17] - 2017-10-07 +### Changed + - Fuchsia: Magenta was renamed Zircon + +## [0.3.16] - 2017-07-27 +### Added +- Implement Debug for mote non-public types +- implement `Rand` for (i|u)i128 +- Support for Fuchsia + +### Changed +- Add inline attribute to SampleRange::construct_range. + This improves the benchmark for sample in 11% and for shuffle in 16%. +- Use `RtlGenRandom` instead of `CryptGenRandom` + + +## [0.3.15] - 2016-11-26 +### Added +- Add `Rng` trait method `choose_mut` +- Redox support + +### Changed +- Use `arc4rand` for `OsRng` on FreeBSD. +- Use `arc4random(3)` for `OsRng` on OpenBSD. + +### Fixed +- Fix filling buffers 4 GiB or larger with `OsRng::fill_bytes` on Windows + + +## [0.3.14] - 2016-02-13 +### Fixed +- Inline definitions from winapi/advapi32, wich decreases build times + + +## [0.3.13] - 2016-01-09 +### Fixed +- Compatible with Rust 1.7.0-nightly (needed some extra type annotations) + + +## [0.3.12] - 2015-11-09 +### Changed +- Replaced the methods in `next_f32` and `next_f64` with the technique described + Saito & Matsumoto at MCQMC'08. The new method should exhibit a slightly more + uniform distribution. +- Depend on libc 0.2 + +### Fixed +- Fix iterator protocol issue in `rand::sample` + + +## [0.3.11] - 2015-08-31 +### Added +- Implement `Rand` for arrays with n <= 32 + + +## [0.3.10] - 2015-08-17 +### Added +- Support for NaCl platforms + +### Changed +- Allow `Rng` to be `?Sized`, impl for `&mut R` and `Box` where `R: ?Sized + Rng` + + +## [0.3.9] - 2015-06-18 +### Changed +- Use `winapi` for Windows API things + +### Fixed +- Fixed test on stable/nightly +- Fix `getrandom` syscall number for aarch64-unknown-linux-gnu + + +## [0.3.8] - 2015-04-23 +### Changed +- `log` is a dev dependency + +### Fixed +- Fix race condition of atomics in `is_getrandom_available` + + +## [0.3.7] - 2015-04-03 +### Fixed +- Derive Copy/Clone changes + + +## [0.3.6] - 2015-04-02 +### Changed +- Move to stable Rust! + + +## [0.3.5] - 2015-04-01 +### Fixed +- Compatible with Rust master + + +## [0.3.4] - 2015-03-31 +### Added +- Implement Clone for `Weighted` + +### Fixed +- Compatible with Rust master + + +## [0.3.3] - 2015-03-26 +### Fixed +- Fix compile on Windows + + +## [0.3.2] - 2015-03-26 + + +## [0.3.1] - 2015-03-26 +### Fixed +- Fix compile on Windows + + +## [0.3.0] - 2015-03-25 +### Changed +- Update to use log version 0.3.x + + +## [0.2.1] - 2015-03-22 +### Fixed +- Compatible with Rust master +- Fixed iOS compilation + + +## [0.2.0] - 2015-03-06 +### Fixed +- Compatible with Rust master (move from `old_io` to `std::io`) + + +## [0.1.4] - 2015-03-04 +### Fixed +- Compatible with Rust master (use wrapping ops) + + +## [0.1.3] - 2015-02-20 +### Fixed +- Compatible with Rust master + +### Removed +- Removed Copy inplementaions from RNGs + + +## [0.1.2] - 2015-02-03 +### Added +- Imported functionality from `std::rand`, including: + - `StdRng`, `SeedableRng`, `TreadRng`, `weak_rng()` + - `ReaderRng`: A wrapper around any Reader to treat it as an RNG. +- Imported documentation from `std::rand` +- Imported tests from `std::rand` + + +## [0.1.1] - 2015-02-03 +### Added +- Migrate to a cargo-compatible directory structure. + +### Fixed +- Do not use entropy during `gen_weighted_bool(1)` + + +## [Rust 0.12.0] - 2014-10-09 +### Added +- Impl Rand for tuples of arity 11 and 12 +- Include ChaCha pseudorandom generator +- Add `next_f64` and `next_f32` to Rng +- Implement Clone for PRNGs + +### Changed +- Rename `TaskRng` to `ThreadRng` and `task_rng` to `thread_rng` (since a + runtime is removed from Rust). + +### Fixed +- Improved performance of ISAAC and ISAAC64 by 30% and 12 % respectively, by + informing the optimiser that indexing is never out-of-bounds. + +### Removed +- Removed the Deprecated `choose_option` + + +## [Rust 0.11.0] - 2014-07-02 +### Added +- document when to use `OSRng` in cryptographic context, and explain why we use `/dev/urandom` instead of `/dev/random` +- `Rng::gen_iter()` which will return an infinite stream of random values +- `Rng::gen_ascii_chars()` which will return an infinite stream of random ascii characters + +### Changed +- Now only depends on libcore! 2adf5363f88ffe06f6d2ea5c338d1b186d47f4a1 +- Remove `Rng.choose()`, rename `Rng.choose_option()` to `.choose()` +- Rename OSRng to OsRng +- The WeightedChoice structure is no longer built with a `Vec>`, + but rather a `&mut [Weighted]`. This means that the WeightedChoice + structure now has a lifetime associated with it. +- The `sample` method on `Rng` has been moved to a top-level function in the + `rand` module due to its dependence on `Vec`. + +### Removed +- `Rng::gen_vec()` was removed. Previous behavior can be regained with + `rng.gen_iter().take(n).collect()` +- `Rng::gen_ascii_str()` was removed. Previous behavior can be regained with + `rng.gen_ascii_chars().take(n).collect()` +- {IsaacRng, Isaac64Rng, XorShiftRng}::new() have all been removed. These all + relied on being able to use an OSRng for seeding, but this is no longer + available in librand (where these types are defined). To retain the same + functionality, these types now implement the `Rand` trait so they can be + generated with a random seed from another random number generator. This allows + the stdlib to use an OSRng to create seeded instances of these RNGs. +- Rand implementations for `Box` and `@T` were removed. These seemed to be + pretty rare in the codebase, and it allows for librand to not depend on + liballoc. Additionally, other pointer types like Rc and Arc were not + supported. +- Remove a slew of old deprecated functions + + +## [Rust 0.10] - 2014-04-03 +### Changed +- replace `Rng.shuffle's` functionality with `.shuffle_mut` +- bubble up IO errors when creating an OSRng + +### Fixed +- Use `fill()` instead of `read()` +- Rewrite OsRng in Rust for windows + +## [0.10-pre] - 2014-03-02 +### Added +- Seperate `rand` out of the standard library + diff --git a/third_party/rust/rand/Cargo.toml b/third_party/rust/rand/Cargo.toml index 106ed81305cb..4f80003e44f3 100644 --- a/third_party/rust/rand/Cargo.toml +++ b/third_party/rust/rand/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "rand" -version = "0.3.18" +version = "0.4.3" authors = ["The Rust Project Developers"] description = "Random number generators and other randomness functionality.\n" homepage = "https://github.com/rust-lang-nursery/rand" @@ -22,13 +22,18 @@ keywords = ["random", "rng"] categories = ["algorithms"] license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang-nursery/rand" -[dependencies.libc] -version = "0.2" -[dev-dependencies.log] -version = "0.3.0" [features] -nightly = ["i128_support"] +alloc = [] +default = ["std"] i128_support = [] +nightly = ["i128_support"] +std = ["libc"] [target."cfg(target_os = \"fuchsia\")".dependencies.fuchsia-zircon] -version = "^0.2.1" +version = "0.3.2" +[target."cfg(unix)".dependencies.libc] +version = "0.2" +optional = true +[target."cfg(windows)".dependencies.winapi] +version = "0.3" +features = ["minwindef", "ntsecapi", "profileapi", "winnt"] diff --git a/third_party/rust/rand/README.md b/third_party/rust/rand/README.md index cd4ee24983f5..f72bd51a3333 100644 --- a/third_party/rust/rand/README.md +++ b/third_party/rust/rand/README.md @@ -14,7 +14,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -rand = "0.3" +rand = "0.4" ``` and this to your crate root: @@ -23,6 +23,16 @@ and this to your crate root: extern crate rand; ``` +### Versions + +Version `0.4`was released in December 2017. It contains almost no breaking +changes since the `0.3` series, but nevertheless contains some significant +new code, including a new "external" entropy source (`JitterRng`) and `no_std` +support. + +Version `0.5` is in development and contains significant performance +improvements for the ISAAC random number generators. + ## Examples There is built-in support for a random number generator (RNG) associated with each thread stored in thread-local storage. This RNG can be accessed via thread_rng, or used implicitly via random. This RNG is normally randomly seeded from an operating-system source of randomness, e.g. /dev/urandom on Unix systems, and will automatically reseed itself from this source after generating 32 KiB of random data. @@ -50,13 +60,55 @@ let mut rng = rand::ChaChaRng::new_unseeded(); println!("i32: {}, u32: {}", rng.gen::(), rng.gen::()) ``` +## Features + +By default, `rand` is built with all stable features available. The following +optional features are available: + +- `i128_support` enables support for generating `u128` and `i128` values +- `nightly` enables all unstable features (`i128_support`) +- `std` enabled by default; by setting "default-features = false" `no_std` + mode is activated; this removes features depending on `std` functionality: + + - `OsRng` is entirely unavailable + - `JitterRng` code is still present, but a nanosecond timer must be + provided via `JitterRng::new_with_timer` + - Since no external entropy is available, it is not possible to create + generators with fresh seeds (user must provide entropy) + - `thread_rng`, `weak_rng` and `random` are all disabled + - exponential, normal and gamma type distributions are unavailable + since `exp` and `log` functions are not provided in `core` + - any code requiring `Vec` or `Box` +- `alloc` can be used instead of `std` to provide `Vec` and `Box` + +## Testing + +Unfortunately, `cargo test` does not test everything. The following tests are +recommended: + +``` +# Basic tests for rand and sub-crates +cargo test --all + +# Test no_std support (build only since nearly all tests require std) +cargo build --all --no-default-features + +# Test 128-bit support (requires nightly) +cargo test --all --features nightly + +# Benchmarks (requires nightly) +cargo bench +# or just to test the benchmark code: +cargo test --benches +``` + # `derive(Rand)` You can derive the `Rand` trait for your custom type via the `#[derive(Rand)]` directive. To use this first add this to your Cargo.toml: ```toml -rand = "0.3" +rand = "0.4" rand_derive = "0.3" ``` diff --git a/third_party/rust/rand/appveyor.yml b/third_party/rust/rand/appveyor.yml index 39c6a180be82..02e217fe4893 100644 --- a/third_party/rust/rand/appveyor.yml +++ b/third_party/rust/rand/appveyor.yml @@ -32,6 +32,7 @@ install: build: false test_script: + - cargo test --benches - cargo test - cargo test --features nightly - cargo test --manifest-path rand-derive/Cargo.toml diff --git a/third_party/rust/rand/benches/bench.rs b/third_party/rust/rand/benches/bench.rs index 5fa92bdbea02..d396f25b5e99 100644 --- a/third_party/rust/rand/benches/bench.rs +++ b/third_party/rust/rand/benches/bench.rs @@ -9,52 +9,7 @@ mod distributions; use std::mem::size_of; use test::{black_box, Bencher}; -use rand::{XorShiftRng, StdRng, IsaacRng, Isaac64Rng, Rng}; -use rand::{OsRng, sample, weak_rng}; - -#[bench] -fn rand_xorshift(b: &mut Bencher) { - let mut rng: XorShiftRng = OsRng::new().unwrap().gen(); - b.iter(|| { - for _ in 0..RAND_BENCH_N { - black_box(rng.gen::()); - } - }); - b.bytes = size_of::() as u64 * RAND_BENCH_N; -} - -#[bench] -fn rand_isaac(b: &mut Bencher) { - let mut rng: IsaacRng = OsRng::new().unwrap().gen(); - b.iter(|| { - for _ in 0..RAND_BENCH_N { - black_box(rng.gen::()); - } - }); - b.bytes = size_of::() as u64 * RAND_BENCH_N; -} - -#[bench] -fn rand_isaac64(b: &mut Bencher) { - let mut rng: Isaac64Rng = OsRng::new().unwrap().gen(); - b.iter(|| { - for _ in 0..RAND_BENCH_N { - black_box(rng.gen::()); - } - }); - b.bytes = size_of::() as u64 * RAND_BENCH_N; -} - -#[bench] -fn rand_std(b: &mut Bencher) { - let mut rng = StdRng::new().unwrap(); - b.iter(|| { - for _ in 0..RAND_BENCH_N { - black_box(rng.gen::()); - } - }); - b.bytes = size_of::() as u64 * RAND_BENCH_N; -} +use rand::{StdRng, Rng}; #[bench] fn rand_f32(b: &mut Bencher) { @@ -77,21 +32,3 @@ fn rand_f64(b: &mut Bencher) { }); b.bytes = size_of::() as u64 * RAND_BENCH_N; } - -#[bench] -fn rand_shuffle_100(b: &mut Bencher) { - let mut rng = weak_rng(); - let x : &mut [usize] = &mut [1; 100]; - b.iter(|| { - rng.shuffle(x); - }) -} - -#[bench] -fn rand_sample_10_of_100(b: &mut Bencher) { - let mut rng = weak_rng(); - let x : &[usize] = &[1; 100]; - b.iter(|| { - sample(&mut rng, x, 10); - }) -} diff --git a/third_party/rust/rand/benches/generators.rs b/third_party/rust/rand/benches/generators.rs new file mode 100644 index 000000000000..daee7c5fbb5a --- /dev/null +++ b/third_party/rust/rand/benches/generators.rs @@ -0,0 +1,133 @@ +#![feature(test)] + +extern crate test; +extern crate rand; + +const RAND_BENCH_N: u64 = 1000; +const BYTES_LEN: usize = 1024; + +use std::mem::size_of; +use test::{black_box, Bencher}; + +use rand::{Rng, StdRng, OsRng, JitterRng}; +use rand::{XorShiftRng, IsaacRng, Isaac64Rng, ChaChaRng}; + +macro_rules! gen_bytes { + ($fnn:ident, $gen:ident) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng: $gen = OsRng::new().unwrap().gen(); + let mut buf = [0u8; BYTES_LEN]; + b.iter(|| { + for _ in 0..RAND_BENCH_N { + rng.fill_bytes(&mut buf); + black_box(buf); + } + }); + b.bytes = BYTES_LEN as u64 * RAND_BENCH_N; + } + } +} + +macro_rules! gen_bytes_new { + ($fnn:ident, $gen:ident) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng = $gen::new().unwrap(); + let mut buf = [0u8; BYTES_LEN]; + b.iter(|| { + for _ in 0..RAND_BENCH_N { + rng.fill_bytes(&mut buf); + black_box(buf); + } + }); + b.bytes = BYTES_LEN as u64 * RAND_BENCH_N; + } + } +} + +gen_bytes!(gen_bytes_xorshift, XorShiftRng); +gen_bytes!(gen_bytes_isaac, IsaacRng); +gen_bytes!(gen_bytes_isaac64, Isaac64Rng); +gen_bytes!(gen_bytes_chacha, ChaChaRng); +gen_bytes_new!(gen_bytes_std, StdRng); +gen_bytes_new!(gen_bytes_os, OsRng); + + +macro_rules! gen_uint { + ($fnn:ident, $ty:ty, $gen:ident) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng: $gen = OsRng::new().unwrap().gen(); + b.iter(|| { + for _ in 0..RAND_BENCH_N { + black_box(rng.gen::<$ty>()); + } + }); + b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N; + } + } +} + +macro_rules! gen_uint_new { + ($fnn:ident, $ty:ty, $gen:ident) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng = $gen::new().unwrap(); + b.iter(|| { + for _ in 0..RAND_BENCH_N { + black_box(rng.gen::<$ty>()); + } + }); + b.bytes = size_of::<$ty>() as u64 * RAND_BENCH_N; + } + } +} + +gen_uint!(gen_u32_xorshift, u32, XorShiftRng); +gen_uint!(gen_u32_isaac, u32, IsaacRng); +gen_uint!(gen_u32_isaac64, u32, Isaac64Rng); +gen_uint!(gen_u32_chacha, u32, ChaChaRng); +gen_uint_new!(gen_u32_std, u32, StdRng); +gen_uint_new!(gen_u32_os, u32, OsRng); + +gen_uint!(gen_u64_xorshift, u64, XorShiftRng); +gen_uint!(gen_u64_isaac, u64, IsaacRng); +gen_uint!(gen_u64_isaac64, u64, Isaac64Rng); +gen_uint!(gen_u64_chacha, u64, ChaChaRng); +gen_uint_new!(gen_u64_std, u64, StdRng); +gen_uint_new!(gen_u64_os, u64, OsRng); + +#[bench] +fn gen_u64_jitter(b: &mut Bencher) { + let mut rng = JitterRng::new().unwrap(); + b.iter(|| { + black_box(rng.gen::()); + }); + b.bytes = size_of::() as u64; +} + +macro_rules! init_gen { + ($fnn:ident, $gen:ident) => { + #[bench] + fn $fnn(b: &mut Bencher) { + let mut rng: XorShiftRng = OsRng::new().unwrap().gen(); + b.iter(|| { + let r2: $gen = rng.gen(); + black_box(r2); + }); + } + } +} + +init_gen!(init_xorshift, XorShiftRng); +init_gen!(init_isaac, IsaacRng); +init_gen!(init_isaac64, Isaac64Rng); +init_gen!(init_chacha, ChaChaRng); + +#[bench] +fn init_jitter(b: &mut Bencher) { + b.iter(|| { + black_box(JitterRng::new().unwrap()); + }); +} diff --git a/third_party/rust/rand/benches/misc.rs b/third_party/rust/rand/benches/misc.rs new file mode 100644 index 000000000000..425176176626 --- /dev/null +++ b/third_party/rust/rand/benches/misc.rs @@ -0,0 +1,62 @@ +#![feature(test)] + +extern crate test; +extern crate rand; + +use test::{black_box, Bencher}; + +use rand::{Rng, weak_rng}; +use rand::seq::*; + +#[bench] +fn misc_shuffle_100(b: &mut Bencher) { + let mut rng = weak_rng(); + let x : &mut [usize] = &mut [1; 100]; + b.iter(|| { + rng.shuffle(x); + black_box(&x); + }) +} + +#[bench] +fn misc_sample_iter_10_of_100(b: &mut Bencher) { + let mut rng = weak_rng(); + let x : &[usize] = &[1; 100]; + b.iter(|| { + black_box(sample_iter(&mut rng, x, 10).unwrap_or_else(|e| e)); + }) +} + +#[bench] +fn misc_sample_slice_10_of_100(b: &mut Bencher) { + let mut rng = weak_rng(); + let x : &[usize] = &[1; 100]; + b.iter(|| { + black_box(sample_slice(&mut rng, x, 10)); + }) +} + +#[bench] +fn misc_sample_slice_ref_10_of_100(b: &mut Bencher) { + let mut rng = weak_rng(); + let x : &[usize] = &[1; 100]; + b.iter(|| { + black_box(sample_slice_ref(&mut rng, x, 10)); + }) +} + +macro_rules! sample_indices { + ($name:ident, $amount:expr, $length:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut rng = weak_rng(); + b.iter(|| { + black_box(sample_indices(&mut rng, $length, $amount)); + }) + } + } +} + +sample_indices!(misc_sample_indices_10_of_1k, 10, 1000); +sample_indices!(misc_sample_indices_50_of_1k, 50, 1000); +sample_indices!(misc_sample_indices_100_of_1k, 100, 1000); diff --git a/third_party/rust/rand/src/distributions/mod.rs b/third_party/rust/rand/src/distributions/mod.rs index 876735aae762..5de8efb9c472 100644 --- a/third_party/rust/rand/src/distributions/mod.rs +++ b/third_party/rust/rand/src/distributions/mod.rs @@ -17,20 +17,29 @@ //! internally. The `IndependentSample` trait is for generating values //! that do not need to record state. -use std::marker; +use core::marker; use {Rng, Rand}; pub use self::range::Range; +#[cfg(feature="std")] pub use self::gamma::{Gamma, ChiSquared, FisherF, StudentT}; +#[cfg(feature="std")] pub use self::normal::{Normal, LogNormal}; +#[cfg(feature="std")] pub use self::exponential::Exp; pub mod range; +#[cfg(feature="std")] pub mod gamma; +#[cfg(feature="std")] pub mod normal; +#[cfg(feature="std")] pub mod exponential; +#[cfg(feature="std")] +mod ziggurat_tables; + /// Types that can be used to create a random instance of `Support`. pub trait Sample { /// Generate a random value of `Support`, using `rng` as the @@ -124,7 +133,7 @@ impl<'a, T: Clone> WeightedChoice<'a, T> { /// /// Panics if: /// - /// - `v` is empty + /// - `items` is empty /// - the total weight is 0 /// - the total weight is larger than a `u32` can contain. pub fn new(items: &'a mut [Weighted]) -> WeightedChoice<'a, T> { @@ -203,8 +212,6 @@ impl<'a, T: Clone> IndependentSample for WeightedChoice<'a, T> { } } -mod ziggurat_tables; - /// Sample a random number using the Ziggurat method (specifically the /// ZIGNOR variant from Doornik 2005). Most of the arguments are /// directly from the paper: @@ -220,6 +227,7 @@ mod ziggurat_tables; // the perf improvement (25-50%) is definitely worth the extra code // size from force-inlining. +#[cfg(feature="std")] #[inline(always)] fn ziggurat( rng: &mut R, diff --git a/third_party/rust/rand/src/distributions/range.rs b/third_party/rust/rand/src/distributions/range.rs index 7206941d0dcb..935a00aac361 100644 --- a/third_party/rust/rand/src/distributions/range.rs +++ b/third_party/rust/rand/src/distributions/range.rs @@ -12,7 +12,7 @@ // this is surprisingly complicated to be both generic & correct -use std::num::Wrapping as w; +use core::num::Wrapping as w; use Rng; use distributions::{Sample, IndependentSample}; @@ -99,7 +99,7 @@ macro_rules! integer_impl { #[inline] fn construct_range(low: $ty, high: $ty) -> Range<$ty> { let range = (w(high as $unsigned) - w(low as $unsigned)).0; - let unsigned_max: $unsigned = ::std::$unsigned::MAX; + let unsigned_max: $unsigned = ::core::$unsigned::MAX; // this is the largest number that fits into $unsigned // that `range` divides evenly, so, if we've sampled @@ -136,11 +136,15 @@ integer_impl! { i8, u8 } integer_impl! { i16, u16 } integer_impl! { i32, u32 } integer_impl! { i64, u64 } +#[cfg(feature = "i128_support")] +integer_impl! { i128, u128 } integer_impl! { isize, usize } integer_impl! { u8, u8 } integer_impl! { u16, u16 } integer_impl! { u32, u32 } integer_impl! { u64, u64 } +#[cfg(feature = "i128_support")] +integer_impl! { u128, u128 } integer_impl! { usize, usize } macro_rules! float_impl { @@ -187,7 +191,7 @@ mod tests { $( let v: &[($ty, $ty)] = &[(0, 10), (10, 127), - (::std::$ty::MIN, ::std::$ty::MAX)]; + (::core::$ty::MIN, ::core::$ty::MAX)]; for &(low, high) in v.iter() { let mut sampler: Range<$ty> = Range::new(low, high); for _ in 0..1000 { @@ -200,8 +204,12 @@ mod tests { )* }} } + #[cfg(not(feature = "i128_support"))] t!(i8, i16, i32, i64, isize, - u8, u16, u32, u64, usize) + u8, u16, u32, u64, usize); + #[cfg(feature = "i128_support")] + t!(i8, i16, i32, i64, i128, isize, + u8, u16, u32, u64, u128, usize); } #[test] diff --git a/third_party/rust/rand/src/jitter.rs b/third_party/rust/rand/src/jitter.rs new file mode 100644 index 000000000000..369348110580 --- /dev/null +++ b/third_party/rust/rand/src/jitter.rs @@ -0,0 +1,754 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +// +// Based on jitterentropy-library, http://www.chronox.de/jent.html. +// Copyright Stephan Mueller , 2014 - 2017. +// +// With permission from Stephan Mueller to relicense the Rust translation under +// the MIT license. + +//! Non-physical true random number generator based on timing jitter. + +use Rng; + +use core::{fmt, mem, ptr}; +#[cfg(feature="std")] +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + +const MEMORY_BLOCKS: usize = 64; +const MEMORY_BLOCKSIZE: usize = 32; +const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE; + +/// A true random number generator based on jitter in the CPU execution time, +/// and jitter in memory access time. +/// +/// This is a true random number generator, as opposed to pseudo-random +/// generators. Random numbers generated by `JitterRng` can be seen as fresh +/// entropy. A consequence is that is orders of magnitude slower than `OsRng` +/// and PRNGs (about 10^3 .. 10^6 slower). +/// +/// There are very few situations where using this RNG is appropriate. Only very +/// few applications require true entropy. A normal PRNG can be statistically +/// indistinguishable, and a cryptographic PRNG should also be as impossible to +/// predict. +/// +/// Use of `JitterRng` is recommended for initializing cryptographic PRNGs when +/// `OsRng` is not available. +/// +/// This implementation is based on +/// [Jitterentropy](http://www.chronox.de/jent.html) version 2.1.0. +// +// Note: the C implementation relies on being compiled without optimizations. +// This implementation goes through lengths to make the compiler not optimise +// out what is technically dead code, but that does influence timing jitter. +pub struct JitterRng { + data: u64, // Actual random number + // Number of rounds to run the entropy collector per 64 bits + rounds: u32, + // Timer and previous time stamp, used by `measure_jitter` + timer: fn() -> u64, + prev_time: u64, + // Deltas used for the stuck test + last_delta: i64, + last_delta2: i64, + // Memory for the Memory Access noise source + mem_prev_index: usize, + mem: [u8; MEMORY_SIZE], + // Make `next_u32` not waste 32 bits + data_remaining: Option, +} + +// Custom Debug implementation that does not expose the internal state +impl fmt::Debug for JitterRng { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "JitterRng {{}}") + } +} + +/// An error that can occur when `test_timer` fails. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TimerError { + /// No timer available. + NoTimer, + /// Timer too coarse to use as an entropy source. + CoarseTimer, + /// Timer is not monotonically increasing. + NotMonotonic, + /// Variations of deltas of time too small. + TinyVariantions, + /// Too many stuck results (indicating no added entropy). + TooManyStuck, + #[doc(hidden)] + __Nonexhaustive, +} + +impl TimerError { + fn description(&self) -> &'static str { + match *self { + TimerError::NoTimer => "no timer available", + TimerError::CoarseTimer => "coarse timer", + TimerError::NotMonotonic => "timer not monotonic", + TimerError::TinyVariantions => "time delta variations too small", + TimerError::TooManyStuck => "too many stuck results", + TimerError::__Nonexhaustive => unreachable!(), + } + } +} + +impl fmt::Display for TimerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +#[cfg(feature="std")] +impl ::std::error::Error for TimerError { + fn description(&self) -> &str { + self.description() + } +} + +// Initialise to zero; must be positive +#[cfg(feature="std")] +static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT; + +impl JitterRng { + /// Create a new `JitterRng`. + /// Makes use of `std::time` for a timer. + /// + /// During initialization CPU execution timing jitter is measured a few + /// hundred times. If this does not pass basic quality tests, an error is + /// returned. The test result is cached to make subsequent calls faster. + #[cfg(feature="std")] + pub fn new() -> Result { + let mut ec = JitterRng::new_with_timer(platform::get_nstime); + let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u32; + if rounds == 0 { + // No result yet: run test. + // This allows the timer test to run multiple times; we don't care. + rounds = ec.test_timer()?; + JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed); + } + ec.set_rounds(rounds); + Ok(ec) + } + + /// Create a new `JitterRng`. + /// A custom timer can be supplied, making it possible to use `JitterRng` in + /// `no_std` environments. + /// + /// The timer must have nanosecond precision. + /// + /// This method is more low-level than `new()`. It is the responsibility of + /// the caller to run `test_timer` before using any numbers generated with + /// `JitterRng`, and optionally call `set_rounds()`. + pub fn new_with_timer(timer: fn() -> u64) -> JitterRng { + let mut ec = JitterRng { + data: 0, + rounds: 64, + timer: timer, + prev_time: 0, + last_delta: 0, + last_delta2: 0, + mem_prev_index: 0, + mem: [0; MEMORY_SIZE], + data_remaining: None, + }; + + // Fill `data`, `prev_time`, `last_delta` and `last_delta2` with + // non-zero values. + ec.prev_time = timer(); + ec.gen_entropy(); + + // Do a single read from `self.mem` to make sure the Memory Access noise + // source is not optimised out. + // Note: this read is important, it effects optimisations for the entire + // module! + black_box(ec.mem[0]); + + ec + } + + /// Configures how many rounds are used to generate each 64-bit value. + /// This must be greater than zero, and has a big impact on performance + /// and output quality. + /// + /// `new_with_timer` conservatively uses 64 rounds, but often less rounds + /// can be used. The `test_timer()` function returns the minimum number of + /// rounds required for full strength (platform dependent), so one may use + /// `rng.set_rounds(rng.test_timer()?);` or cache the value. + pub fn set_rounds(&mut self, rounds: u32) { + assert!(rounds > 0); + self.rounds = rounds; + } + + // Calculate a random loop count used for the next round of an entropy + // collection, based on bits from a fresh value from the timer. + // + // The timer is folded to produce a number that contains at most `n_bits` + // bits. + // + // Note: A constant should be added to the resulting random loop count to + // prevent loops that run 0 times. + #[inline(never)] + fn random_loop_cnt(&mut self, n_bits: u32) -> u32 { + let mut rounds = 0; + + let mut time = (self.timer)(); + // Mix with the current state of the random number balance the random + // loop counter a bit more. + time ^= self.data; + + // We fold the time value as much as possible to ensure that as many + // bits of the time stamp are included as possible. + let folds = (64 + n_bits - 1) / n_bits; + let mask = (1 << n_bits) - 1; + for _ in 0..folds { + rounds ^= time & mask; + time = time >> n_bits; + } + + rounds as u32 + } + + // CPU jitter noise source + // Noise source based on the CPU execution time jitter + // + // This function injects the individual bits of the time value into the + // entropy pool using an LFSR. + // + // The code is deliberately inefficient with respect to the bit shifting. + // This function not only acts as folding operation, but this function's + // execution is used to measure the CPU execution time jitter. Any change to + // the loop in this function implies that careful retesting must be done. + #[inline(never)] + fn lfsr_time(&mut self, time: u64, var_rounds: bool) { + fn lfsr(mut data: u64, time: u64) -> u64{ + for i in 1..65 { + let mut tmp = time << (64 - i); + tmp = tmp >> (64 - 1); + + // Fibonacci LSFR with polynomial of + // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is + // primitive according to + // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf + // (the shift values are the polynomial values minus one + // due to counting bits from 0 to 63). As the current + // position is always the LSB, the polynomial only needs + // to shift data in from the left without wrap. + data ^= tmp; + data ^= (data >> 63) & 1; + data ^= (data >> 60) & 1; + data ^= (data >> 55) & 1; + data ^= (data >> 30) & 1; + data ^= (data >> 27) & 1; + data ^= (data >> 22) & 1; + data = data.rotate_left(1); + } + data + } + + // Note: in the reference implementation only the last round effects + // `self.data`, all the other results are ignored. To make sure the + // other rounds are not optimised out, we first run all but the last + // round on a throw-away value instead of the real `self.data`. + let mut lfsr_loop_cnt = 0; + if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) }; + + let mut throw_away: u64 = 0; + for _ in 0..lfsr_loop_cnt { + throw_away = lfsr(throw_away, time); + } + black_box(throw_away); + + self.data = lfsr(self.data, time); + } + + // Memory Access noise source + // This is a noise source based on variations in memory access times + // + // This function performs memory accesses which will add to the timing + // variations due to an unknown amount of CPU wait states that need to be + // added when accessing memory. The memory size should be larger than the L1 + // caches as outlined in the documentation and the associated testing. + // + // The L1 cache has a very high bandwidth, albeit its access rate is usually + // slower than accessing CPU registers. Therefore, L1 accesses only add + // minimal variations as the CPU has hardly to wait. Starting with L2, + // significant variations are added because L2 typically does not belong to + // the CPU any more and therefore a wider range of CPU wait states is + // necessary for accesses. L3 and real memory accesses have even a wider + // range of wait states. However, to reliably access either L3 or memory, + // the `self.mem` memory must be quite large which is usually not desirable. + #[inline(never)] + fn memaccess(&mut self, var_rounds: bool) { + let mut acc_loop_cnt = 128; + if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) }; + + let mut index = self.mem_prev_index; + for _ in 0..acc_loop_cnt { + // Addition of memblocksize - 1 to index with wrap around logic to + // ensure that every memory location is hit evenly. + // The modulus also allows the compiler to remove the indexing + // bounds check. + index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE; + + // memory access: just add 1 to one byte + // memory access implies read from and write to memory location + let tmp = self.mem[index]; + self.mem[index] = tmp.wrapping_add(1); + } + self.mem_prev_index = index; + } + + + // Stuck test by checking the: + // - 1st derivation of the jitter measurement (time delta) + // - 2nd derivation of the jitter measurement (delta of time deltas) + // - 3rd derivation of the jitter measurement (delta of delta of time + // deltas) + // + // All values must always be non-zero. + // This test is a heuristic to see whether the last measurement holds + // entropy. + fn stuck(&mut self, current_delta: i64) -> bool { + let delta2 = self.last_delta - current_delta; + let delta3 = delta2 - self.last_delta2; + + self.last_delta = current_delta; + self.last_delta2 = delta2; + + current_delta == 0 || delta2 == 0 || delta3 == 0 + } + + // This is the heart of the entropy generation: calculate time deltas and + // use the CPU jitter in the time deltas. The jitter is injected into the + // entropy pool. + // + // Ensure that `self.prev_time` is primed before using the output of this + // function. This can be done by calling this function and not using its + // result. + fn measure_jitter(&mut self) -> Option<()> { + // Invoke one noise source before time measurement to add variations + self.memaccess(true); + + // Get time stamp and calculate time delta to previous + // invocation to measure the timing variations + let time = (self.timer)(); + // Note: wrapping_sub combined with a cast to `i64` generates a correct + // delta, even in the unlikely case this is a timer that is not strictly + // monotonic. + let current_delta = time.wrapping_sub(self.prev_time) as i64; + self.prev_time = time; + + // Call the next noise source which also injects the data + self.lfsr_time(current_delta as u64, true); + + // Check whether we have a stuck measurement (i.e. does the last + // measurement holds entropy?). + if self.stuck(current_delta) { return None }; + + // Rotate the data buffer by a prime number (any odd number would + // do) to ensure that every bit position of the input time stamp + // has an even chance of being merged with a bit position in the + // entropy pool. We do not use one here as the adjacent bits in + // successive time deltas may have some form of dependency. The + // chosen value of 7 implies that the low 7 bits of the next + // time delta value is concatenated with the current time delta. + self.data = self.data.rotate_left(7); + + Some(()) + } + + // Shuffle the pool a bit by mixing some value with a bijective function + // (XOR) into the pool. + // + // The function generates a mixer value that depends on the bits set and + // the location of the set bits in the random number generated by the + // entropy source. Therefore, based on the generated random number, this + // mixer value can have 2^64 different values. That mixer value is + // initialized with the first two SHA-1 constants. After obtaining the + // mixer value, it is XORed into the random number. + // + // The mixer value is not assumed to contain any entropy. But due to the + // XOR operation, it can also not destroy any entropy present in the + // entropy pool. + #[inline(never)] + fn stir_pool(&mut self) { + // This constant is derived from the first two 32 bit initialization + // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 + // The order does not really matter as we do not rely on the specific + // numbers. We just pick the SHA-1 constants as they have a good mix of + // bit set and unset. + const CONSTANT: u64 = 0x67452301efcdab89; + + // The start value of the mixer variable is derived from the third + // and fourth 32 bit initialization vector of SHA-1 as defined in + // FIPS 180-4 section 5.3.1 + let mut mixer = 0x98badcfe10325476; + + // This is a constant time function to prevent leaking timing + // information about the random number. + // The normal code is: + // ``` + // for i in 0..64 { + // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; } + // } + // ``` + // This is a bit fragile, as LLVM really wants to use branches here, and + // we rely on it to not recognise the opportunity. + for i in 0..64 { + let apply = (self.data >> i) & 1; + let mask = !apply.wrapping_sub(1); + mixer ^= CONSTANT & mask; + mixer = mixer.rotate_left(1); + } + + self.data ^= mixer; + } + + fn gen_entropy(&mut self) -> u64 { + // Prime `self.prev_time`, and run the noice sources to make sure the + // first loop round collects the expected entropy. + let _ = self.measure_jitter(); + + for _ in 0..self.rounds { + // If a stuck measurement is received, repeat measurement + // Note: we do not guard against an infinite loop, that would mean + // the timer suddenly became broken. + while self.measure_jitter().is_none() {} + } + + self.stir_pool(); + self.data + } + + /// Basic quality tests on the timer, by measuring CPU timing jitter a few + /// hundred times. + /// + /// If succesful, this will return the estimated number of rounds necessary + /// to collect 64 bits of entropy. Otherwise a `TimerError` with the cause + /// of the failure will be returned. + pub fn test_timer(&mut self) -> Result { + // We could add a check for system capabilities such as `clock_getres` + // or check for `CONFIG_X86_TSC`, but it does not make much sense as the + // following sanity checks verify that we have a high-resolution timer. + + #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] + return Err(TimerError::NoTimer); + + let mut delta_sum = 0; + let mut old_delta = 0; + + let mut time_backwards = 0; + let mut count_mod = 0; + let mut count_stuck = 0; + + // TESTLOOPCOUNT needs some loops to identify edge systems. + // 100 is definitely too little. + const TESTLOOPCOUNT: u64 = 300; + const CLEARCACHE: u64 = 100; + + for i in 0..(CLEARCACHE + TESTLOOPCOUNT) { + // Measure time delta of core entropy collection logic + let time = (self.timer)(); + self.memaccess(true); + self.lfsr_time(time, true); + let time2 = (self.timer)(); + + // Test whether timer works + if time == 0 || time2 == 0 { + return Err(TimerError::NoTimer); + } + let delta = time2.wrapping_sub(time) as i64; + + // Test whether timer is fine grained enough to provide delta even + // when called shortly after each other -- this implies that we also + // have a high resolution timer + if delta == 0 { + return Err(TimerError::CoarseTimer); + } + + // Up to here we did not modify any variable that will be + // evaluated later, but we already performed some work. Thus we + // already have had an impact on the caches, branch prediction, + // etc. with the goal to clear it to get the worst case + // measurements. + if i < CLEARCACHE { continue; } + + if self.stuck(delta) { count_stuck += 1; } + + // Test whether we have an increasing timer. + if !(time2 > time) { time_backwards += 1; } + + // Count the number of times the counter increases in steps of 100ns + // or greater. + if (delta % 100) == 0 { count_mod += 1; } + + // Ensure that we have a varying delta timer which is necessary for + // the calculation of entropy -- perform this check only after the + // first loop is executed as we need to prime the old_delta value + delta_sum += (delta - old_delta).abs() as u64; + old_delta = delta; + } + + // We allow the time to run backwards for up to three times. + // This can happen if the clock is being adjusted by NTP operations. + // If such an operation just happens to interfere with our test, it + // should not fail. The value of 3 should cover the NTP case being + // performed during our test run. + if time_backwards > 3 { + return Err(TimerError::NotMonotonic); + } + + // Test that the available amount of entropy per round does not get to + // low. We expect 1 bit of entropy per round as a reasonable minimum + // (although less is possible, it means the collector loop has to run + // much more often). + // `assert!(delta_average >= log2(1))` + // `assert!(delta_sum / TESTLOOPCOUNT >= 1)` + // `assert!(delta_sum >= TESTLOOPCOUNT)` + if delta_sum < TESTLOOPCOUNT { + return Err(TimerError::TinyVariantions); + } + + // Ensure that we have variations in the time stamp below 100 for at + // least 10% of all checks -- on some platforms, the counter increments + // in multiples of 100, but not always + if count_mod > (TESTLOOPCOUNT * 9 / 10) { + return Err(TimerError::CoarseTimer); + } + + // If we have more than 90% stuck results, then this Jitter RNG is + // likely to not work well. + if count_stuck > (TESTLOOPCOUNT * 9 / 10) { + return Err(TimerError::TooManyStuck); + } + + // Estimate the number of `measure_jitter` rounds necessary for 64 bits + // of entropy. + // + // We don't try very hard to come up with a good estimate of the + // available bits of entropy per round here for two reasons: + // 1. Simple estimates of the available bits (like Shannon entropy) are + // too optimistic. + // 2) Unless we want to waste a lot of time during intialization, there + // only a small number of samples are available. + // + // Therefore we use a very simple and conservative estimate: + // `let bits_of_entropy = log2(delta_average) / 2`. + // + // The number of rounds `measure_jitter` should run to collect 64 bits + // of entropy is `64 / bits_of_entropy`. + // + // To have smaller rounding errors, intermediate values are multiplied + // by `FACTOR`. To compensate for `log2` and division rounding down, + // add 1. + let delta_average = delta_sum / TESTLOOPCOUNT; + // println!("delta_average: {}", delta_average); + + const FACTOR: u32 = 3; + fn log2(x: u64) -> u32 { 64 - x.leading_zeros() } + + // pow(δ, FACTOR) must be representable; if you have overflow reduce FACTOR + Ok(64 * 2 * FACTOR / (log2(delta_average.pow(FACTOR)) + 1)) + } + + /// Statistical test: return the timer delta of one normal run of the + /// `JitterEntropy` entropy collector. + /// + /// Setting `var_rounds` to `true` will execute the memory access and the + /// CPU jitter noice sources a variable amount of times (just like a real + /// `JitterEntropy` round). + /// + /// Setting `var_rounds` to `false` will execute the noice sources the + /// minimal number of times. This can be used to measure the minimum amount + /// of entropy one round of entropy collector can collect in the worst case. + /// + /// # Example + /// + /// Use `timer_stats` to run the [NIST SP 800-90B Entropy Estimation Suite] + /// (https://github.com/usnistgov/SP800-90B_EntropyAssessment). + /// + /// This is the recommended way to test the quality of `JitterRng`. It + /// should be run before using the RNG on untested hardware, after changes + /// that could effect how the code is optimised, and after major compiler + /// compiler changes, like a new LLVM version. + /// + /// First generate two files `jitter_rng_var.bin` and `jitter_rng_var.min`. + /// + /// Execute `python noniid_main.py -v jitter_rng_var.bin 8`, and validate it + /// with `restart.py -v jitter_rng_var.bin 8 `. + /// This number is the expected amount of entropy that is at least available + /// for each round of the entropy collector. This number should be greater + /// than the amount estimated with `64 / test_timer()`. + /// + /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and + /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 `. + /// This number is the expected amount of entropy that is available in the + /// last 4 bits of the timer delta after running noice sources. Note that + /// a value of 3.70 is the minimum estimated entropy for true randomness. + /// + /// Execute `python noniid_main.py -v -u 4 jitter_rng_var.bin 4`, and + /// validate it with `restart.py -v -u 4 jitter_rng_var.bin 4 `. + /// This number is the expected amount of entropy that is available to the + /// entropy collecter if both noice sources only run their minimal number of + /// times. This measures the absolute worst-case, and gives a lower bound + /// for the available entropy. + /// + /// ```rust,no_run + /// use rand::JitterRng; + /// + /// # use std::error::Error; + /// # use std::fs::File; + /// # use std::io::Write; + /// # + /// # fn try_main() -> Result<(), Box> { + /// fn get_nstime() -> u64 { + /// use std::time::{SystemTime, UNIX_EPOCH}; + /// + /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + /// // The correct way to calculate the current time is + /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64` + /// // But this is faster, and the difference in terms of entropy is + /// // negligible (log2(10^9) == 29.9). + /// dur.as_secs() << 30 | dur.subsec_nanos() as u64 + /// } + /// + /// // Do not initialize with `JitterRng::new`, but with `new_with_timer`. + /// // 'new' always runst `test_timer`, and can therefore fail to + /// // initialize. We want to be able to get the statistics even when the + /// // timer test fails. + /// let mut rng = JitterRng::new_with_timer(get_nstime); + /// + /// // 1_000_000 results are required for the NIST SP 800-90B Entropy + /// // Estimation Suite + /// // FIXME: this number is smaller here, otherwise the Doc-test is too slow + /// const ROUNDS: usize = 10_000; + /// let mut deltas_variable: Vec = Vec::with_capacity(ROUNDS); + /// let mut deltas_minimal: Vec = Vec::with_capacity(ROUNDS); + /// + /// for _ in 0..ROUNDS { + /// deltas_variable.push(rng.timer_stats(true) as u8); + /// deltas_minimal.push(rng.timer_stats(false) as u8); + /// } + /// + /// // Write out after the statistics collection loop, to not disturb the + /// // test results. + /// File::create("jitter_rng_var.bin")?.write(&deltas_variable)?; + /// File::create("jitter_rng_min.bin")?.write(&deltas_minimal)?; + /// # + /// # Ok(()) + /// # } + /// # + /// # fn main() { + /// # try_main().unwrap(); + /// # } + /// ``` + #[cfg(feature="std")] + pub fn timer_stats(&mut self, var_rounds: bool) -> i64 { + let time = platform::get_nstime(); + self.memaccess(var_rounds); + self.lfsr_time(time, var_rounds); + let time2 = platform::get_nstime(); + time2.wrapping_sub(time) as i64 + } +} + +#[cfg(feature="std")] +mod platform { + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows", all(target_arch = "wasm32", not(target_os = "emscripten")))))] + pub fn get_nstime() -> u64 { + use std::time::{SystemTime, UNIX_EPOCH}; + + let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + // The correct way to calculate the current time is + // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64` + // But this is faster, and the difference in terms of entropy is negligible + // (log2(10^9) == 29.9). + dur.as_secs() << 30 | dur.subsec_nanos() as u64 + } + + #[cfg(any(target_os = "macos", target_os = "ios"))] + pub fn get_nstime() -> u64 { + extern crate libc; + // On Mac OS and iOS std::time::SystemTime only has 1000ns resolution. + // We use `mach_absolute_time` instead. This provides a CPU dependent unit, + // to get real nanoseconds the result should by multiplied by numer/denom + // from `mach_timebase_info`. + // But we are not interested in the exact nanoseconds, just entropy. So we + // use the raw result. + unsafe { libc::mach_absolute_time() } + } + + #[cfg(target_os = "windows")] + pub fn get_nstime() -> u64 { + extern crate winapi; + unsafe { + let mut t = super::mem::zeroed(); + winapi::um::profileapi::QueryPerformanceCounter(&mut t); + *t.QuadPart() as u64 + } + } + + #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] + pub fn get_nstime() -> u64 { + unreachable!() + } +} + +// A function that is opaque to the optimizer to assist in avoiding dead-code +// elimination. Taken from `bencher`. +fn black_box(dummy: T) -> T { + unsafe { + let ret = ptr::read_volatile(&dummy); + mem::forget(dummy); + ret + } +} + +impl Rng for JitterRng { + fn next_u32(&mut self) -> u32 { + // We want to use both parts of the generated entropy + if let Some(high) = self.data_remaining.take() { + high + } else { + let data = self.next_u64(); + self.data_remaining = Some((data >> 32) as u32); + data as u32 + } + } + + fn next_u64(&mut self) -> u64 { + self.gen_entropy() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + let mut left = dest; + while left.len() >= 8 { + let (l, r) = {left}.split_at_mut(8); + left = r; + let chunk: [u8; 8] = unsafe { + mem::transmute(self.next_u64().to_le()) + }; + l.copy_from_slice(&chunk); + } + let n = left.len(); + if n > 0 { + let chunk: [u8; 8] = unsafe { + mem::transmute(self.next_u64().to_le()) + }; + left.copy_from_slice(&chunk[..n]); + } + } +} + +// There are no tests included because (1) this is an "external" RNG, so output +// is not reproducible and (2) `test_timer` *will* fail on some platforms. diff --git a/third_party/rust/rand/src/lib.rs b/third_party/rust/rand/src/lib.rs index 3c7d2cc51494..7b22dd45de7e 100644 --- a/third_party/rust/rand/src/lib.rs +++ b/third_party/rust/rand/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// Copyright 2013-2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -26,7 +26,7 @@ //! //! ```toml //! [dependencies] -//! rand = "0.3" +//! rand = "0.4" //! ``` //! //! and this to your crate root: @@ -184,7 +184,7 @@ //! // where the car is. The game host will never open the door with the car. //! fn game_host_open(car: u32, choice: u32, rng: &mut R) -> u32 { //! let choices = free_doors(&[car, choice]); -//! rand::sample(rng, choices.into_iter(), 1)[0] +//! rand::seq::sample_slice(rng, &choices, 1)[0] //! } //! //! // Returns the door we switch to, given our current choice and @@ -239,48 +239,63 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/rand/0.3")] + html_root_url = "https://docs.rs/rand/0.4")] #![deny(missing_debug_implementations)] -#![cfg_attr(feature = "i128_support", feature(i128_type))] +#![cfg_attr(not(feature="std"), no_std)] +#![cfg_attr(all(feature="alloc", not(feature="std")), feature(alloc))] +#![cfg_attr(feature = "i128_support", feature(i128_type, i128))] -#[cfg(test)] #[macro_use] extern crate log; +#[cfg(feature="std")] extern crate std as core; +#[cfg(all(feature = "alloc", not(feature="std")))] extern crate alloc; +use core::marker; +use core::mem; +#[cfg(feature="std")] use std::cell::RefCell; +#[cfg(feature="std")] use std::io; +#[cfg(feature="std")] use std::rc::Rc; -use std::cell::RefCell; -use std::marker; -use std::mem; -use std::io; -use std::rc::Rc; -use std::num::Wrapping as w; -use std::time; - -pub use os::OsRng; +// external rngs +pub use jitter::JitterRng; +#[cfg(feature="std")] pub use os::OsRng; +// pseudo rngs pub use isaac::{IsaacRng, Isaac64Rng}; pub use chacha::ChaChaRng; +pub use prng::XorShiftRng; +// local use declarations #[cfg(target_pointer_width = "32")] -use IsaacRng as IsaacWordRng; +use prng::IsaacRng as IsaacWordRng; #[cfg(target_pointer_width = "64")] -use Isaac64Rng as IsaacWordRng; +use prng::Isaac64Rng as IsaacWordRng; use distributions::{Range, IndependentSample}; use distributions::range::SampleRange; +// public modules pub mod distributions; -pub mod isaac; -pub mod chacha; +pub mod jitter; +#[cfg(feature="std")] pub mod os; +#[cfg(feature="std")] pub mod read; pub mod reseeding; -mod rand_impls; -pub mod os; -pub mod read; +#[cfg(any(feature="std", feature = "alloc"))] pub mod seq; + +// These tiny modules are here to avoid API breakage, probably only temporarily +pub mod chacha { + //! The ChaCha random number generator. + pub use prng::ChaChaRng; +} +pub mod isaac { + //! The ISAAC random number generator. + pub use prng::{IsaacRng, Isaac64Rng}; +} + +// private modules +mod rand_impls; +mod prng; -#[allow(bad_style)] -type w64 = w; -#[allow(bad_style)] -type w32 = w; /// A type that can be randomly generated using an `Rng`. /// @@ -304,8 +319,8 @@ type w32 = w; /// /// [`Open01`]: struct.Open01.html /// [`Closed01`]: struct.Closed01.html -/// [`Exp1`]: struct.Exp1.html -/// [`StandardNormal`]: struct.StandardNormal.html +/// [`Exp1`]: distributions/exponential/struct.Exp1.html +/// [`StandardNormal`]: distributions/normal/struct.StandardNormal.html /// /// The following aggregate types also implement `Rand` as long as their /// component types implement it: @@ -314,7 +329,6 @@ type w32 = w; /// independently, using its own `Rand` implementation. /// * `Option`: Returns `None` with probability 0.5; otherwise generates a /// random `T` and returns `Some(T)`. - pub trait Rand : Sized { /// Generates a random instance of this type using the specified source of /// randomness. @@ -618,6 +632,7 @@ impl<'a, R: ?Sized> Rng for &'a mut R where R: Rng { } } +#[cfg(feature="std")] impl Rng for Box where R: Rng { fn next_u32(&mut self) -> u32 { (**self).next_u32() @@ -715,93 +730,6 @@ pub trait SeedableRng: Rng { fn from_seed(seed: Seed) -> Self; } -/// An Xorshift[1] random number -/// generator. -/// -/// The Xorshift algorithm is not suitable for cryptographic purposes -/// but is very fast. If you do not know for sure that it fits your -/// requirements, use a more secure one such as `IsaacRng` or `OsRng`. -/// -/// [1]: Marsaglia, George (July 2003). ["Xorshift -/// RNGs"](http://www.jstatsoft.org/v08/i14/paper). *Journal of -/// Statistical Software*. Vol. 8 (Issue 14). -#[allow(missing_copy_implementations)] -#[derive(Clone, Debug)] -pub struct XorShiftRng { - x: w32, - y: w32, - z: w32, - w: w32, -} - -impl XorShiftRng { - /// Creates a new XorShiftRng instance which is not seeded. - /// - /// The initial values of this RNG are constants, so all generators created - /// by this function will yield the same stream of random numbers. It is - /// highly recommended that this is created through `SeedableRng` instead of - /// this function - pub fn new_unseeded() -> XorShiftRng { - XorShiftRng { - x: w(0x193a6754), - y: w(0xa8a7d469), - z: w(0x97830e05), - w: w(0x113ba7bb), - } - } -} - -impl Rng for XorShiftRng { - #[inline] - fn next_u32(&mut self) -> u32 { - let x = self.x; - let t = x ^ (x << 11); - self.x = self.y; - self.y = self.z; - self.z = self.w; - let w_ = self.w; - self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8)); - self.w.0 - } -} - -impl SeedableRng<[u32; 4]> for XorShiftRng { - /// Reseed an XorShiftRng. This will panic if `seed` is entirely 0. - fn reseed(&mut self, seed: [u32; 4]) { - assert!(!seed.iter().all(|&x| x == 0), - "XorShiftRng.reseed called with an all zero seed."); - - self.x = w(seed[0]); - self.y = w(seed[1]); - self.z = w(seed[2]); - self.w = w(seed[3]); - } - - /// Create a new XorShiftRng. This will panic if `seed` is entirely 0. - fn from_seed(seed: [u32; 4]) -> XorShiftRng { - assert!(!seed.iter().all(|&x| x == 0), - "XorShiftRng::from_seed called with an all zero seed."); - - XorShiftRng { - x: w(seed[0]), - y: w(seed[1]), - z: w(seed[2]), - w: w(seed[3]), - } - } -} - -impl Rand for XorShiftRng { - fn rand(rng: &mut R) -> XorShiftRng { - let mut tuple: (u32, u32, u32, u32) = rng.gen(); - while tuple == (0, 0, 0, 0) { - tuple = rng.gen(); - } - let (x, y, z, w_) = tuple; - XorShiftRng { x: w(x), y: w(y), z: w(z), w: w(w_) } - } -} - /// A wrapper for generating floating point numbers uniformly in the /// open interval `(0,1)` (not including either endpoint). /// @@ -856,8 +784,19 @@ impl StdRng { /// /// Reading the randomness from the OS may fail, and any error is /// propagated via the `io::Result` return value. + #[cfg(feature="std")] pub fn new() -> io::Result { - OsRng::new().map(|mut r| StdRng { rng: r.gen() }) + match OsRng::new() { + Ok(mut r) => Ok(StdRng { rng: r.gen() }), + Err(e1) => { + match JitterRng::new() { + Ok(mut r) => Ok(StdRng { rng: r.gen() }), + Err(_) => { + Err(e1) + } + } + } + } } } @@ -893,26 +832,32 @@ impl<'a> SeedableRng<&'a [usize]> for StdRng { /// create the `Rng` yourself. /// /// This will seed the generator with randomness from thread_rng. +#[cfg(feature="std")] pub fn weak_rng() -> XorShiftRng { thread_rng().gen() } /// Controls how the thread-local RNG is reseeded. +#[cfg(feature="std")] #[derive(Debug)] struct ThreadRngReseeder; +#[cfg(feature="std")] impl reseeding::Reseeder for ThreadRngReseeder { fn reseed(&mut self, rng: &mut StdRng) { match StdRng::new() { Ok(r) => *rng = r, - Err(_) => rng.reseed(&weak_seed()) + Err(e) => panic!("No entropy available: {}", e), } } } +#[cfg(feature="std")] const THREAD_RNG_RESEED_THRESHOLD: u64 = 32_768; +#[cfg(feature="std")] type ThreadRngInner = reseeding::ReseedingRng; /// The thread-local RNG. +#[cfg(feature="std")] #[derive(Clone, Debug)] pub struct ThreadRng { rng: Rc>, @@ -930,12 +875,13 @@ pub struct ThreadRng { /// if the operating system random number generator is rigged to give /// the same sequence always. If absolute consistency is required, /// explicitly select an RNG, e.g. `IsaacRng` or `Isaac64Rng`. +#[cfg(feature="std")] pub fn thread_rng() -> ThreadRng { // used to make space in TLS for a random number generator thread_local!(static THREAD_RNG_KEY: Rc> = { let r = match StdRng::new() { Ok(r) => r, - Err(_) => StdRng::from_seed(&weak_seed()) + Err(e) => panic!("No entropy available: {}", e), }; let rng = reseeding::ReseedingRng::new(r, THREAD_RNG_RESEED_THRESHOLD, @@ -946,14 +892,7 @@ pub fn thread_rng() -> ThreadRng { ThreadRng { rng: THREAD_RNG_KEY.with(|t| t.clone()) } } -fn weak_seed() -> [usize; 2] { - let now = time::SystemTime::now(); - let unix_time = now.duration_since(time::UNIX_EPOCH).unwrap(); - let seconds = unix_time.as_secs() as usize; - let nanoseconds = unix_time.subsec_nanos() as usize; - [seconds, nanoseconds] -} - +#[cfg(feature="std")] impl Rng for ThreadRng { fn next_u32(&mut self) -> u32 { self.rng.borrow_mut().next_u32() @@ -1003,7 +942,7 @@ impl Rng for ThreadRng { /// *x = rand::random() /// } /// -/// // would be faster as +/// // can be made faster by caching thread_rng /// /// let mut rng = rand::thread_rng(); /// @@ -1011,11 +950,14 @@ impl Rng for ThreadRng { /// *x = rng.gen(); /// } /// ``` +#[cfg(feature="std")] #[inline] pub fn random() -> T { thread_rng().gen() } +/// DEPRECATED: use `seq::sample_iter` instead. +/// /// Randomly sample up to `amount` elements from a finite iterator. /// The order of elements in the sample is not random. /// @@ -1028,28 +970,21 @@ pub fn random() -> T { /// let sample = sample(&mut rng, 1..100, 5); /// println!("{:?}", sample); /// ``` +#[cfg(feature="std")] +#[inline(always)] +#[deprecated(since="0.4.0", note="renamed to seq::sample_iter")] pub fn sample(rng: &mut R, iterable: I, amount: usize) -> Vec where I: IntoIterator, R: Rng, { - let mut iter = iterable.into_iter(); - let mut reservoir: Vec = iter.by_ref().take(amount).collect(); - // continue unless the iterator was exhausted - if reservoir.len() == amount { - for (i, elem) in iter.enumerate() { - let k = rng.gen_range(0, i + 1 + amount); - if let Some(spot) = reservoir.get_mut(k) { - *spot = elem; - } - } - } - reservoir + // the legacy sample didn't care whether amount was met + seq::sample_iter(rng, iterable, amount) + .unwrap_or_else(|e| e) } #[cfg(test)] mod test { - use super::{Rng, thread_rng, random, SeedableRng, StdRng, sample, - weak_rng}; + use super::{Rng, thread_rng, random, SeedableRng, StdRng, weak_rng}; use std::iter::repeat; pub struct MyRng { inner: R } @@ -1145,14 +1080,6 @@ mod test { r.gen_range(5, 2); } - #[test] - fn test_gen_f64() { - let mut r = thread_rng(); - let a = r.gen::(); - let b = r.gen::(); - debug!("{:?}", (a, b)); - } - #[test] fn test_gen_weighted_bool() { let mut r = thread_rng(); @@ -1255,24 +1182,6 @@ mod test { (f32, (f64, (f64,)))) = random(); } - #[test] - fn test_sample() { - let min_val = 1; - let max_val = 100; - - let mut r = thread_rng(); - let vals = (min_val..max_val).collect::>(); - let small_sample = sample(&mut r, vals.iter(), 5); - let large_sample = sample(&mut r, vals.iter(), vals.len() + 5); - - assert_eq!(small_sample.len(), 5); - assert_eq!(large_sample.len(), vals.len()); - - assert!(small_sample.iter().all(|e| { - **e >= min_val && **e <= max_val - })); - } - #[test] fn test_std_rng_seeded() { let s = thread_rng().gen_iter::().take(256).collect::>(); diff --git a/third_party/rust/rand/src/os.rs b/third_party/rust/rand/src/os.rs index 1eb903a22518..10022fbcd60c 100644 --- a/third_party/rust/rand/src/os.rs +++ b/third_party/rust/rand/src/os.rs @@ -53,13 +53,13 @@ impl fmt::Debug for OsRng { } } -fn next_u32(mut fill_buf: &mut FnMut(&mut [u8])) -> u32 { +fn next_u32(fill_buf: &mut FnMut(&mut [u8])) -> u32 { let mut buf: [u8; 4] = [0; 4]; fill_buf(&mut buf); unsafe { mem::transmute::<[u8; 4], u32>(buf) } } -fn next_u64(mut fill_buf: &mut FnMut(&mut [u8])) -> u64 { +fn next_u64(fill_buf: &mut FnMut(&mut [u8])) -> u64 { let mut buf: [u8; 8] = [0; 8]; fill_buf(&mut buf); unsafe { mem::transmute::<[u8; 8], u64>(buf) } @@ -102,7 +102,7 @@ mod imp { #[cfg(target_arch = "aarch64")] const NR_GETRANDOM: libc::c_long = 278; #[cfg(target_arch = "powerpc")] - const NR_GETRANDOM: libc::c_long = 384; + const NR_GETRANDOM: libc::c_long = 359; unsafe { syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0) @@ -407,7 +407,7 @@ mod imp { next_u64(&mut |v| self.fill_bytes(v)) } fn fill_bytes(&mut self, v: &mut [u8]) { - for s in v.chunks_mut(fuchsia_zircon::ZX_CPRNG_DRAW_MAX_LEN) { + for s in v.chunks_mut(fuchsia_zircon::sys::ZX_CPRNG_DRAW_MAX_LEN) { let mut filled = 0; while filled < s.len() { match fuchsia_zircon::cprng_draw(&mut s[filled..]) { @@ -422,19 +422,16 @@ mod imp { #[cfg(windows)] mod imp { + extern crate winapi; + use std::io; use Rng; use super::{next_u32, next_u64}; - type BOOLEAN = u8; - type ULONG = u32; - - #[link(name = "advapi32")] - extern "system" { - // This function's real name is `RtlGenRandom`. - fn SystemFunction036(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN; - } + use self::winapi::shared::minwindef::ULONG; + use self::winapi::um::ntsecapi::RtlGenRandom; + use self::winapi::um::winnt::PVOID; #[derive(Debug)] pub struct OsRng; @@ -457,7 +454,7 @@ mod imp { // split up the buffer. for slice in v.chunks_mut(::max_value() as usize) { let ret = unsafe { - SystemFunction036(slice.as_mut_ptr(), slice.len() as ULONG) + RtlGenRandom(slice.as_mut_ptr() as PVOID, slice.len() as ULONG) }; if ret == 0 { panic!("couldn't generate random bytes: {}", @@ -544,6 +541,26 @@ mod imp { } } +#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] +mod imp { + use std::io; + use Rng; + + #[derive(Debug)] + pub struct OsRng; + + impl OsRng { + pub fn new() -> io::Result { + Err(io::Error::new(io::ErrorKind::Other, "Not supported")) + } + } + + impl Rng for OsRng { + fn next_u32(&mut self) -> u32 { + panic!("Not supported") + } + } +} #[cfg(test)] mod test { diff --git a/third_party/rust/rand/src/chacha.rs b/third_party/rust/rand/src/prng/chacha.rs similarity index 99% rename from third_party/rust/rand/src/chacha.rs rename to third_party/rust/rand/src/prng/chacha.rs index 1acec5e9bf59..a73e8e78f466 100644 --- a/third_party/rust/rand/src/chacha.rs +++ b/third_party/rust/rand/src/prng/chacha.rs @@ -10,8 +10,11 @@ //! The ChaCha random number generator. -use std::num::Wrapping as w; -use {Rng, SeedableRng, Rand, w32}; +use core::num::Wrapping as w; +use {Rng, SeedableRng, Rand}; + +#[allow(bad_style)] +type w32 = w; const KEY_WORDS : usize = 8; // 8 words for the 256-bit key const STATE_WORDS : usize = 16; diff --git a/third_party/rust/rand/src/prng/isaac.rs b/third_party/rust/rand/src/prng/isaac.rs new file mode 100644 index 000000000000..cf5eb679c088 --- /dev/null +++ b/third_party/rust/rand/src/prng/isaac.rs @@ -0,0 +1,328 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The ISAAC random number generator. + +#![allow(non_camel_case_types)] + +use core::slice; +use core::iter::repeat; +use core::num::Wrapping as w; +use core::fmt; + +use {Rng, SeedableRng, Rand}; + +#[allow(bad_style)] +type w32 = w; + +const RAND_SIZE_LEN: usize = 8; +const RAND_SIZE: u32 = 1 << RAND_SIZE_LEN; +const RAND_SIZE_USIZE: usize = 1 << RAND_SIZE_LEN; + +/// A random number generator that uses the ISAAC algorithm[1]. +/// +/// The ISAAC algorithm is generally accepted as suitable for +/// cryptographic purposes, but this implementation has not be +/// verified as such. Prefer a generator like `OsRng` that defers to +/// the operating system for cases that need high security. +/// +/// [1]: Bob Jenkins, [*ISAAC: A fast cryptographic random number +/// generator*](http://www.burtleburtle.net/bob/rand/isaacafa.html) +#[derive(Copy)] +pub struct IsaacRng { + cnt: u32, + rsl: [w32; RAND_SIZE_USIZE], + mem: [w32; RAND_SIZE_USIZE], + a: w32, + b: w32, + c: w32, +} + +static EMPTY: IsaacRng = IsaacRng { + cnt: 0, + rsl: [w(0); RAND_SIZE_USIZE], + mem: [w(0); RAND_SIZE_USIZE], + a: w(0), b: w(0), c: w(0), +}; + +impl IsaacRng { + + /// Create an ISAAC random number generator using the default + /// fixed seed. + pub fn new_unseeded() -> IsaacRng { + let mut rng = EMPTY; + rng.init(false); + rng + } + + /// Initialises `self`. If `use_rsl` is true, then use the current value + /// of `rsl` as a seed, otherwise construct one algorithmically (not + /// randomly). + fn init(&mut self, use_rsl: bool) { + let mut a = w(0x9e3779b9); + let mut b = a; + let mut c = a; + let mut d = a; + let mut e = a; + let mut f = a; + let mut g = a; + let mut h = a; + + macro_rules! mix { + () => {{ + a=a^(b<<11); d=d+a; b=b+c; + b=b^(c>>2); e=e+b; c=c+d; + c=c^(d<<8); f=f+c; d=d+e; + d=d^(e>>16); g=g+d; e=e+f; + e=e^(f<<10); h=h+e; f=f+g; + f=f^(g>>4); a=a+f; g=g+h; + g=g^(h<<8); b=b+g; h=h+a; + h=h^(a>>9); c=c+h; a=a+b; + }} + } + + for _ in 0..4 { + mix!(); + } + + if use_rsl { + macro_rules! memloop { + ($arr:expr) => {{ + for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) { + a=a+$arr[i ]; b=b+$arr[i+1]; + c=c+$arr[i+2]; d=d+$arr[i+3]; + e=e+$arr[i+4]; f=f+$arr[i+5]; + g=g+$arr[i+6]; h=h+$arr[i+7]; + mix!(); + self.mem[i ]=a; self.mem[i+1]=b; + self.mem[i+2]=c; self.mem[i+3]=d; + self.mem[i+4]=e; self.mem[i+5]=f; + self.mem[i+6]=g; self.mem[i+7]=h; + } + }} + } + + memloop!(self.rsl); + memloop!(self.mem); + } else { + for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) { + mix!(); + self.mem[i ]=a; self.mem[i+1]=b; + self.mem[i+2]=c; self.mem[i+3]=d; + self.mem[i+4]=e; self.mem[i+5]=f; + self.mem[i+6]=g; self.mem[i+7]=h; + } + } + + self.isaac(); + } + + /// Refills the output buffer (`self.rsl`) + #[inline] + fn isaac(&mut self) { + self.c = self.c + w(1); + // abbreviations + let mut a = self.a; + let mut b = self.b + self.c; + + const MIDPOINT: usize = RAND_SIZE_USIZE / 2; + + macro_rules! ind { + ($x:expr) => ( self.mem[($x >> 2usize).0 as usize & (RAND_SIZE_USIZE - 1)] ) + } + + let r = [(0, MIDPOINT), (MIDPOINT, 0)]; + for &(mr_offset, m2_offset) in r.iter() { + + macro_rules! rngstepp { + ($j:expr, $shift:expr) => {{ + let base = $j; + let mix = a << $shift; + + let x = self.mem[base + mr_offset]; + a = (a ^ mix) + self.mem[base + m2_offset]; + let y = ind!(x) + a + b; + self.mem[base + mr_offset] = y; + + b = ind!(y >> RAND_SIZE_LEN) + x; + self.rsl[base + mr_offset] = b; + }} + } + + macro_rules! rngstepn { + ($j:expr, $shift:expr) => {{ + let base = $j; + let mix = a >> $shift; + + let x = self.mem[base + mr_offset]; + a = (a ^ mix) + self.mem[base + m2_offset]; + let y = ind!(x) + a + b; + self.mem[base + mr_offset] = y; + + b = ind!(y >> RAND_SIZE_LEN) + x; + self.rsl[base + mr_offset] = b; + }} + } + + for i in (0..MIDPOINT/4).map(|i| i * 4) { + rngstepp!(i + 0, 13); + rngstepn!(i + 1, 6); + rngstepp!(i + 2, 2); + rngstepn!(i + 3, 16); + } + } + + self.a = a; + self.b = b; + self.cnt = RAND_SIZE; + } +} + +// Cannot be derived because [u32; 256] does not implement Clone +impl Clone for IsaacRng { + fn clone(&self) -> IsaacRng { + *self + } +} + +impl Rng for IsaacRng { + #[inline] + fn next_u32(&mut self) -> u32 { + if self.cnt == 0 { + // make some more numbers + self.isaac(); + } + self.cnt -= 1; + + // self.cnt is at most RAND_SIZE, but that is before the + // subtraction above. We want to index without bounds + // checking, but this could lead to incorrect code if someone + // misrefactors, so we check, sometimes. + // + // (Changes here should be reflected in Isaac64Rng.next_u64.) + debug_assert!(self.cnt < RAND_SIZE); + + // (the % is cheaply telling the optimiser that we're always + // in bounds, without unsafe. NB. this is a power of two, so + // it optimises to a bitwise mask). + self.rsl[(self.cnt % RAND_SIZE) as usize].0 + } +} + +impl<'a> SeedableRng<&'a [u32]> for IsaacRng { + fn reseed(&mut self, seed: &'a [u32]) { + // make the seed into [seed[0], seed[1], ..., seed[seed.len() + // - 1], 0, 0, ...], to fill rng.rsl. + let seed_iter = seed.iter().map(|&x| x).chain(repeat(0u32)); + + for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) { + *rsl_elem = w(seed_elem); + } + self.cnt = 0; + self.a = w(0); + self.b = w(0); + self.c = w(0); + + self.init(true); + } + + /// Create an ISAAC random number generator with a seed. This can + /// be any length, although the maximum number of elements used is + /// 256 and any more will be silently ignored. A generator + /// constructed with a given seed will generate the same sequence + /// of values as all other generators constructed with that seed. + fn from_seed(seed: &'a [u32]) -> IsaacRng { + let mut rng = EMPTY; + rng.reseed(seed); + rng + } +} + +impl Rand for IsaacRng { + fn rand(other: &mut R) -> IsaacRng { + let mut ret = EMPTY; + unsafe { + let ptr = ret.rsl.as_mut_ptr() as *mut u8; + + let slice = slice::from_raw_parts_mut(ptr, RAND_SIZE_USIZE * 4); + other.fill_bytes(slice); + } + ret.cnt = 0; + ret.a = w(0); + ret.b = w(0); + ret.c = w(0); + + ret.init(true); + return ret; + } +} + +impl fmt::Debug for IsaacRng { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "IsaacRng {{}}") + } +} + +#[cfg(test)] +mod test { + use {Rng, SeedableRng}; + use super::IsaacRng; + + #[test] + fn test_rng_32_rand_seeded() { + let s = ::test::rng().gen_iter::().take(256).collect::>(); + let mut ra: IsaacRng = SeedableRng::from_seed(&s[..]); + let mut rb: IsaacRng = SeedableRng::from_seed(&s[..]); + assert!(::test::iter_eq(ra.gen_ascii_chars().take(100), + rb.gen_ascii_chars().take(100))); + } + + #[test] + fn test_rng_32_seeded() { + let seed: &[_] = &[1, 23, 456, 7890, 12345]; + let mut ra: IsaacRng = SeedableRng::from_seed(seed); + let mut rb: IsaacRng = SeedableRng::from_seed(seed); + assert!(::test::iter_eq(ra.gen_ascii_chars().take(100), + rb.gen_ascii_chars().take(100))); + } + + #[test] + fn test_rng_32_reseed() { + let s = ::test::rng().gen_iter::().take(256).collect::>(); + let mut r: IsaacRng = SeedableRng::from_seed(&s[..]); + let string1: String = r.gen_ascii_chars().take(100).collect(); + + r.reseed(&s[..]); + + let string2: String = r.gen_ascii_chars().take(100).collect(); + assert_eq!(string1, string2); + } + + #[test] + fn test_rng_32_true_values() { + let seed: &[_] = &[1, 23, 456, 7890, 12345]; + let mut ra: IsaacRng = SeedableRng::from_seed(seed); + // Regression test that isaac is actually using the above vector + let v = (0..10).map(|_| ra.next_u32()).collect::>(); + assert_eq!(v, + vec!(2558573138, 873787463, 263499565, 2103644246, 3595684709, + 4203127393, 264982119, 2765226902, 2737944514, 3900253796)); + + let seed: &[_] = &[12345, 67890, 54321, 9876]; + let mut rb: IsaacRng = SeedableRng::from_seed(seed); + // skip forward to the 10000th number + for _ in 0..10000 { rb.next_u32(); } + + let v = (0..10).map(|_| rb.next_u32()).collect::>(); + assert_eq!(v, + vec!(3676831399, 3183332890, 2834741178, 3854698763, 2717568474, + 1576568959, 3507990155, 179069555, 141456972, 2478885421)); + } +} diff --git a/third_party/rust/rand/src/isaac.rs b/third_party/rust/rand/src/prng/isaac64.rs similarity index 52% rename from third_party/rust/rand/src/isaac.rs rename to third_party/rust/rand/src/prng/isaac64.rs index b70a8e61d3f2..b98e3fec4f52 100644 --- a/third_party/rust/rand/src/isaac.rs +++ b/third_party/rust/rand/src/prng/isaac64.rs @@ -8,264 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! The ISAAC random number generator. +//! The ISAAC-64 random number generator. -#![allow(non_camel_case_types)] +use core::slice; +use core::iter::repeat; +use core::num::Wrapping as w; +use core::fmt; -use std::slice; -use std::iter::repeat; -use std::num::Wrapping as w; -use std::fmt; +use {Rng, SeedableRng, Rand}; -use {Rng, SeedableRng, Rand, w32, w64}; - -const RAND_SIZE_LEN: usize = 8; -const RAND_SIZE: u32 = 1 << RAND_SIZE_LEN; -const RAND_SIZE_USIZE: usize = 1 << RAND_SIZE_LEN; - -/// A random number generator that uses the ISAAC algorithm[1]. -/// -/// The ISAAC algorithm is generally accepted as suitable for -/// cryptographic purposes, but this implementation has not be -/// verified as such. Prefer a generator like `OsRng` that defers to -/// the operating system for cases that need high security. -/// -/// [1]: Bob Jenkins, [*ISAAC: A fast cryptographic random number -/// generator*](http://www.burtleburtle.net/bob/rand/isaacafa.html) -#[derive(Copy)] -pub struct IsaacRng { - cnt: u32, - rsl: [w32; RAND_SIZE_USIZE], - mem: [w32; RAND_SIZE_USIZE], - a: w32, - b: w32, - c: w32, -} - -static EMPTY: IsaacRng = IsaacRng { - cnt: 0, - rsl: [w(0); RAND_SIZE_USIZE], - mem: [w(0); RAND_SIZE_USIZE], - a: w(0), b: w(0), c: w(0), -}; - -impl IsaacRng { - - /// Create an ISAAC random number generator using the default - /// fixed seed. - pub fn new_unseeded() -> IsaacRng { - let mut rng = EMPTY; - rng.init(false); - rng - } - - /// Initialises `self`. If `use_rsl` is true, then use the current value - /// of `rsl` as a seed, otherwise construct one algorithmically (not - /// randomly). - fn init(&mut self, use_rsl: bool) { - let mut a = w(0x9e3779b9); - let mut b = a; - let mut c = a; - let mut d = a; - let mut e = a; - let mut f = a; - let mut g = a; - let mut h = a; - - macro_rules! mix { - () => {{ - a=a^(b<<11); d=d+a; b=b+c; - b=b^(c>>2); e=e+b; c=c+d; - c=c^(d<<8); f=f+c; d=d+e; - d=d^(e>>16); g=g+d; e=e+f; - e=e^(f<<10); h=h+e; f=f+g; - f=f^(g>>4); a=a+f; g=g+h; - g=g^(h<<8); b=b+g; h=h+a; - h=h^(a>>9); c=c+h; a=a+b; - }} - } - - for _ in 0..4 { - mix!(); - } - - if use_rsl { - macro_rules! memloop { - ($arr:expr) => {{ - for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) { - a=a+$arr[i ]; b=b+$arr[i+1]; - c=c+$arr[i+2]; d=d+$arr[i+3]; - e=e+$arr[i+4]; f=f+$arr[i+5]; - g=g+$arr[i+6]; h=h+$arr[i+7]; - mix!(); - self.mem[i ]=a; self.mem[i+1]=b; - self.mem[i+2]=c; self.mem[i+3]=d; - self.mem[i+4]=e; self.mem[i+5]=f; - self.mem[i+6]=g; self.mem[i+7]=h; - } - }} - } - - memloop!(self.rsl); - memloop!(self.mem); - } else { - for i in (0..RAND_SIZE_USIZE/8).map(|i| i * 8) { - mix!(); - self.mem[i ]=a; self.mem[i+1]=b; - self.mem[i+2]=c; self.mem[i+3]=d; - self.mem[i+4]=e; self.mem[i+5]=f; - self.mem[i+6]=g; self.mem[i+7]=h; - } - } - - self.isaac(); - } - - /// Refills the output buffer (`self.rsl`) - #[inline] - fn isaac(&mut self) { - self.c = self.c + w(1); - // abbreviations - let mut a = self.a; - let mut b = self.b + self.c; - - const MIDPOINT: usize = RAND_SIZE_USIZE / 2; - - macro_rules! ind { - ($x:expr) => ( self.mem[($x >> 2usize).0 as usize & (RAND_SIZE_USIZE - 1)] ) - } - - let r = [(0, MIDPOINT), (MIDPOINT, 0)]; - for &(mr_offset, m2_offset) in r.iter() { - - macro_rules! rngstepp { - ($j:expr, $shift:expr) => {{ - let base = $j; - let mix = a << $shift; - - let x = self.mem[base + mr_offset]; - a = (a ^ mix) + self.mem[base + m2_offset]; - let y = ind!(x) + a + b; - self.mem[base + mr_offset] = y; - - b = ind!(y >> RAND_SIZE_LEN) + x; - self.rsl[base + mr_offset] = b; - }} - } - - macro_rules! rngstepn { - ($j:expr, $shift:expr) => {{ - let base = $j; - let mix = a >> $shift; - - let x = self.mem[base + mr_offset]; - a = (a ^ mix) + self.mem[base + m2_offset]; - let y = ind!(x) + a + b; - self.mem[base + mr_offset] = y; - - b = ind!(y >> RAND_SIZE_LEN) + x; - self.rsl[base + mr_offset] = b; - }} - } - - for i in (0..MIDPOINT/4).map(|i| i * 4) { - rngstepp!(i + 0, 13); - rngstepn!(i + 1, 6); - rngstepp!(i + 2, 2); - rngstepn!(i + 3, 16); - } - } - - self.a = a; - self.b = b; - self.cnt = RAND_SIZE; - } -} - -// Cannot be derived because [u32; 256] does not implement Clone -impl Clone for IsaacRng { - fn clone(&self) -> IsaacRng { - *self - } -} - -impl Rng for IsaacRng { - #[inline] - fn next_u32(&mut self) -> u32 { - if self.cnt == 0 { - // make some more numbers - self.isaac(); - } - self.cnt -= 1; - - // self.cnt is at most RAND_SIZE, but that is before the - // subtraction above. We want to index without bounds - // checking, but this could lead to incorrect code if someone - // misrefactors, so we check, sometimes. - // - // (Changes here should be reflected in Isaac64Rng.next_u64.) - debug_assert!(self.cnt < RAND_SIZE); - - // (the % is cheaply telling the optimiser that we're always - // in bounds, without unsafe. NB. this is a power of two, so - // it optimises to a bitwise mask). - self.rsl[(self.cnt % RAND_SIZE) as usize].0 - } -} - -impl<'a> SeedableRng<&'a [u32]> for IsaacRng { - fn reseed(&mut self, seed: &'a [u32]) { - // make the seed into [seed[0], seed[1], ..., seed[seed.len() - // - 1], 0, 0, ...], to fill rng.rsl. - let seed_iter = seed.iter().map(|&x| x).chain(repeat(0u32)); - - for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) { - *rsl_elem = w(seed_elem); - } - self.cnt = 0; - self.a = w(0); - self.b = w(0); - self.c = w(0); - - self.init(true); - } - - /// Create an ISAAC random number generator with a seed. This can - /// be any length, although the maximum number of elements used is - /// 256 and any more will be silently ignored. A generator - /// constructed with a given seed will generate the same sequence - /// of values as all other generators constructed with that seed. - fn from_seed(seed: &'a [u32]) -> IsaacRng { - let mut rng = EMPTY; - rng.reseed(seed); - rng - } -} - -impl Rand for IsaacRng { - fn rand(other: &mut R) -> IsaacRng { - let mut ret = EMPTY; - unsafe { - let ptr = ret.rsl.as_mut_ptr() as *mut u8; - - let slice = slice::from_raw_parts_mut(ptr, RAND_SIZE_USIZE * 4); - other.fill_bytes(slice); - } - ret.cnt = 0; - ret.a = w(0); - ret.b = w(0); - ret.c = w(0); - - ret.init(true); - return ret; - } -} - -impl fmt::Debug for IsaacRng { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "IsaacRng {{}}") - } -} +#[allow(bad_style)] +type w64 = w; const RAND_SIZE_64_LEN: usize = 8; const RAND_SIZE_64: usize = 1 << RAND_SIZE_64_LEN; @@ -441,7 +194,6 @@ impl Clone for Isaac64Rng { } impl Rng for Isaac64Rng { - // FIXME #7771: having next_u32 like this should be unnecessary #[inline] fn next_u32(&mut self) -> u32 { self.next_u64() as u32 @@ -519,16 +271,8 @@ impl fmt::Debug for Isaac64Rng { #[cfg(test)] mod test { use {Rng, SeedableRng}; - use super::{IsaacRng, Isaac64Rng}; + use super::Isaac64Rng; - #[test] - fn test_rng_32_rand_seeded() { - let s = ::test::rng().gen_iter::().take(256).collect::>(); - let mut ra: IsaacRng = SeedableRng::from_seed(&s[..]); - let mut rb: IsaacRng = SeedableRng::from_seed(&s[..]); - assert!(::test::iter_eq(ra.gen_ascii_chars().take(100), - rb.gen_ascii_chars().take(100))); - } #[test] fn test_rng_64_rand_seeded() { let s = ::test::rng().gen_iter::().take(256).collect::>(); @@ -538,14 +282,6 @@ mod test { rb.gen_ascii_chars().take(100))); } - #[test] - fn test_rng_32_seeded() { - let seed: &[_] = &[1, 23, 456, 7890, 12345]; - let mut ra: IsaacRng = SeedableRng::from_seed(seed); - let mut rb: IsaacRng = SeedableRng::from_seed(seed); - assert!(::test::iter_eq(ra.gen_ascii_chars().take(100), - rb.gen_ascii_chars().take(100))); - } #[test] fn test_rng_64_seeded() { let seed: &[_] = &[1, 23, 456, 7890, 12345]; @@ -555,17 +291,6 @@ mod test { rb.gen_ascii_chars().take(100))); } - #[test] - fn test_rng_32_reseed() { - let s = ::test::rng().gen_iter::().take(256).collect::>(); - let mut r: IsaacRng = SeedableRng::from_seed(&s[..]); - let string1: String = r.gen_ascii_chars().take(100).collect(); - - r.reseed(&s[..]); - - let string2: String = r.gen_ascii_chars().take(100).collect(); - assert_eq!(string1, string2); - } #[test] fn test_rng_64_reseed() { let s = ::test::rng().gen_iter::().take(256).collect::>(); @@ -578,26 +303,6 @@ mod test { assert_eq!(string1, string2); } - #[test] - fn test_rng_32_true_values() { - let seed: &[_] = &[1, 23, 456, 7890, 12345]; - let mut ra: IsaacRng = SeedableRng::from_seed(seed); - // Regression test that isaac is actually using the above vector - let v = (0..10).map(|_| ra.next_u32()).collect::>(); - assert_eq!(v, - vec!(2558573138, 873787463, 263499565, 2103644246, 3595684709, - 4203127393, 264982119, 2765226902, 2737944514, 3900253796)); - - let seed: &[_] = &[12345, 67890, 54321, 9876]; - let mut rb: IsaacRng = SeedableRng::from_seed(seed); - // skip forward to the 10000th number - for _ in 0..10000 { rb.next_u32(); } - - let v = (0..10).map(|_| rb.next_u32()).collect::>(); - assert_eq!(v, - vec!(3676831399, 3183332890, 2834741178, 3854698763, 2717568474, - 1576568959, 3507990155, 179069555, 141456972, 2478885421)); - } #[test] fn test_rng_64_true_values() { let seed: &[_] = &[1, 23, 456, 7890, 12345]; diff --git a/third_party/rust/rand/src/prng/mod.rs b/third_party/rust/rand/src/prng/mod.rs new file mode 100644 index 000000000000..ed3e01886495 --- /dev/null +++ b/third_party/rust/rand/src/prng/mod.rs @@ -0,0 +1,51 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Pseudo random number generators are algorithms to produce *apparently +//! random* numbers deterministically, and usually fairly quickly. +//! +//! So long as the algorithm is computationally secure, is initialised with +//! sufficient entropy (i.e. unknown by an attacker), and its internal state is +//! also protected (unknown to an attacker), the output will also be +//! *computationally secure*. Computationally Secure Pseudo Random Number +//! Generators (CSPRNGs) are thus suitable sources of random numbers for +//! cryptography. There are a couple of gotchas here, however. First, the seed +//! used for initialisation must be unknown. Usually this should be provided by +//! the operating system and should usually be secure, however this may not +//! always be the case (especially soon after startup). Second, user-space +//! memory may be vulnerable, for example when written to swap space, and after +//! forking a child process should reinitialise any user-space PRNGs. For this +//! reason it may be preferable to source random numbers directly from the OS +//! for cryptographic applications. +//! +//! PRNGs are also widely used for non-cryptographic uses: randomised +//! algorithms, simulations, games. In these applications it is usually not +//! important for numbers to be cryptographically *unguessable*, but even +//! distribution and independence from other samples (from the point of view +//! of someone unaware of the algorithm used, at least) may still be important. +//! Good PRNGs should satisfy these properties, but do not take them for +//! granted; Wikipedia's article on +//! [Pseudorandom number generators](https://en.wikipedia.org/wiki/Pseudorandom_number_generator) +//! provides some background on this topic. +//! +//! Care should be taken when seeding (initialising) PRNGs. Some PRNGs have +//! short periods for some seeds. If one PRNG is seeded from another using the +//! same algorithm, it is possible that both will yield the same sequence of +//! values (with some lag). + +mod chacha; +mod isaac; +mod isaac64; +mod xorshift; + +pub use self::chacha::ChaChaRng; +pub use self::isaac::IsaacRng; +pub use self::isaac64::Isaac64Rng; +pub use self::xorshift::XorShiftRng; diff --git a/third_party/rust/rand/src/prng/xorshift.rs b/third_party/rust/rand/src/prng/xorshift.rs new file mode 100644 index 000000000000..dd367e9bfe91 --- /dev/null +++ b/third_party/rust/rand/src/prng/xorshift.rs @@ -0,0 +1,101 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Xorshift generators + +use core::num::Wrapping as w; +use {Rng, SeedableRng, Rand}; + +/// An Xorshift[1] random number +/// generator. +/// +/// The Xorshift algorithm is not suitable for cryptographic purposes +/// but is very fast. If you do not know for sure that it fits your +/// requirements, use a more secure one such as `IsaacRng` or `OsRng`. +/// +/// [1]: Marsaglia, George (July 2003). ["Xorshift +/// RNGs"](http://www.jstatsoft.org/v08/i14/paper). *Journal of +/// Statistical Software*. Vol. 8 (Issue 14). +#[allow(missing_copy_implementations)] +#[derive(Clone, Debug)] +pub struct XorShiftRng { + x: w, + y: w, + z: w, + w: w, +} + +impl XorShiftRng { + /// Creates a new XorShiftRng instance which is not seeded. + /// + /// The initial values of this RNG are constants, so all generators created + /// by this function will yield the same stream of random numbers. It is + /// highly recommended that this is created through `SeedableRng` instead of + /// this function + pub fn new_unseeded() -> XorShiftRng { + XorShiftRng { + x: w(0x193a6754), + y: w(0xa8a7d469), + z: w(0x97830e05), + w: w(0x113ba7bb), + } + } +} + +impl Rng for XorShiftRng { + #[inline] + fn next_u32(&mut self) -> u32 { + let x = self.x; + let t = x ^ (x << 11); + self.x = self.y; + self.y = self.z; + self.z = self.w; + let w_ = self.w; + self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8)); + self.w.0 + } +} + +impl SeedableRng<[u32; 4]> for XorShiftRng { + /// Reseed an XorShiftRng. This will panic if `seed` is entirely 0. + fn reseed(&mut self, seed: [u32; 4]) { + assert!(!seed.iter().all(|&x| x == 0), + "XorShiftRng.reseed called with an all zero seed."); + + self.x = w(seed[0]); + self.y = w(seed[1]); + self.z = w(seed[2]); + self.w = w(seed[3]); + } + + /// Create a new XorShiftRng. This will panic if `seed` is entirely 0. + fn from_seed(seed: [u32; 4]) -> XorShiftRng { + assert!(!seed.iter().all(|&x| x == 0), + "XorShiftRng::from_seed called with an all zero seed."); + + XorShiftRng { + x: w(seed[0]), + y: w(seed[1]), + z: w(seed[2]), + w: w(seed[3]), + } + } +} + +impl Rand for XorShiftRng { + fn rand(rng: &mut R) -> XorShiftRng { + let mut tuple: (u32, u32, u32, u32) = rng.gen(); + while tuple == (0, 0, 0, 0) { + tuple = rng.gen(); + } + let (x, y, z, w_) = tuple; + XorShiftRng { x: w(x), y: w(y), z: w(z), w: w(w_) } + } +} diff --git a/third_party/rust/rand/src/rand_impls.rs b/third_party/rust/rand/src/rand_impls.rs index a9cf5d9908bd..a865bb695826 100644 --- a/third_party/rust/rand/src/rand_impls.rs +++ b/third_party/rust/rand/src/rand_impls.rs @@ -10,8 +10,7 @@ //! The implementations of `Rand` for the built-in types. -use std::char; -use std::mem; +use core::{char, mem}; use {Rand,Rng}; diff --git a/third_party/rust/rand/src/reseeding.rs b/third_party/rust/rand/src/reseeding.rs index 2fba9f4ad606..1f24e2006fd2 100644 --- a/third_party/rust/rand/src/reseeding.rs +++ b/third_party/rust/rand/src/reseeding.rs @@ -11,7 +11,7 @@ //! A wrapper around another RNG that reseeds it after it //! generates a certain number of random bytes. -use std::default::Default; +use core::default::Default; use {Rng, SeedableRng}; diff --git a/third_party/rust/rand/src/seq.rs b/third_party/rust/rand/src/seq.rs new file mode 100644 index 000000000000..a7889fe34b23 --- /dev/null +++ b/third_party/rust/rand/src/seq.rs @@ -0,0 +1,337 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Functions for randomly accessing and sampling sequences. + +use super::Rng; + +// This crate is only enabled when either std or alloc is available. +// BTreeMap is not as fast in tests, but better than nothing. +#[cfg(feature="std")] use std::collections::HashMap; +#[cfg(not(feature="std"))] use alloc::btree_map::BTreeMap; + +#[cfg(not(feature="std"))] use alloc::Vec; + +/// Randomly sample `amount` elements from a finite iterator. +/// +/// The following can be returned: +/// - `Ok`: `Vec` of `amount` non-repeating randomly sampled elements. The order is not random. +/// - `Err`: `Vec` of all the elements from `iterable` in sequential order. This happens when the +/// length of `iterable` was less than `amount`. This is considered an error since exactly +/// `amount` elements is typically expected. +/// +/// This implementation uses `O(len(iterable))` time and `O(amount)` memory. +/// +/// # Example +/// +/// ```rust +/// use rand::{thread_rng, seq}; +/// +/// let mut rng = thread_rng(); +/// let sample = seq::sample_iter(&mut rng, 1..100, 5).unwrap(); +/// println!("{:?}", sample); +/// ``` +pub fn sample_iter(rng: &mut R, iterable: I, amount: usize) -> Result, Vec> + where I: IntoIterator, + R: Rng, +{ + let mut iter = iterable.into_iter(); + let mut reservoir = Vec::with_capacity(amount); + reservoir.extend(iter.by_ref().take(amount)); + + // Continue unless the iterator was exhausted + // + // note: this prevents iterators that "restart" from causing problems. + // If the iterator stops once, then so do we. + if reservoir.len() == amount { + for (i, elem) in iter.enumerate() { + let k = rng.gen_range(0, i + 1 + amount); + if let Some(spot) = reservoir.get_mut(k) { + *spot = elem; + } + } + Ok(reservoir) + } else { + // Don't hang onto extra memory. There is a corner case where + // `amount` was much less than `len(iterable)`. + reservoir.shrink_to_fit(); + Err(reservoir) + } +} + +/// Randomly sample exactly `amount` values from `slice`. +/// +/// The values are non-repeating and in random order. +/// +/// This implementation uses `O(amount)` time and memory. +/// +/// Panics if `amount > slice.len()` +/// +/// # Example +/// +/// ```rust +/// use rand::{thread_rng, seq}; +/// +/// let mut rng = thread_rng(); +/// let values = vec![5, 6, 1, 3, 4, 6, 7]; +/// println!("{:?}", seq::sample_slice(&mut rng, &values, 3)); +/// ``` +pub fn sample_slice(rng: &mut R, slice: &[T], amount: usize) -> Vec + where R: Rng, + T: Clone +{ + let indices = sample_indices(rng, slice.len(), amount); + + let mut out = Vec::with_capacity(amount); + out.extend(indices.iter().map(|i| slice[*i].clone())); + out +} + +/// Randomly sample exactly `amount` references from `slice`. +/// +/// The references are non-repeating and in random order. +/// +/// This implementation uses `O(amount)` time and memory. +/// +/// Panics if `amount > slice.len()` +/// +/// # Example +/// +/// ```rust +/// use rand::{thread_rng, seq}; +/// +/// let mut rng = thread_rng(); +/// let values = vec![5, 6, 1, 3, 4, 6, 7]; +/// println!("{:?}", seq::sample_slice_ref(&mut rng, &values, 3)); +/// ``` +pub fn sample_slice_ref<'a, R, T>(rng: &mut R, slice: &'a [T], amount: usize) -> Vec<&'a T> + where R: Rng +{ + let indices = sample_indices(rng, slice.len(), amount); + + let mut out = Vec::with_capacity(amount); + out.extend(indices.iter().map(|i| &slice[*i])); + out +} + +/// Randomly sample exactly `amount` indices from `0..length`. +/// +/// The values are non-repeating and in random order. +/// +/// This implementation uses `O(amount)` time and memory. +/// +/// This method is used internally by the slice sampling methods, but it can sometimes be useful to +/// have the indices themselves so this is provided as an alternative. +/// +/// Panics if `amount > length` +pub fn sample_indices(rng: &mut R, length: usize, amount: usize) -> Vec + where R: Rng, +{ + if amount > length { + panic!("`amount` must be less than or equal to `slice.len()`"); + } + + // We are going to have to allocate at least `amount` for the output no matter what. However, + // if we use the `cached` version we will have to allocate `amount` as a HashMap as well since + // it inserts an element for every loop. + // + // Therefore, if `amount >= length / 2` then inplace will be both faster and use less memory. + // In fact, benchmarks show the inplace version is faster for length up to about 20 times + // faster than amount. + // + // TODO: there is probably even more fine-tuning that can be done here since + // `HashMap::with_capacity(amount)` probably allocates more than `amount` in practice, + // and a trade off could probably be made between memory/cpu, since hashmap operations + // are slower than array index swapping. + if amount >= length / 20 { + sample_indices_inplace(rng, length, amount) + } else { + sample_indices_cache(rng, length, amount) + } +} + +/// Sample an amount of indices using an inplace partial fisher yates method. +/// +/// This allocates the entire `length` of indices and randomizes only the first `amount`. +/// It then truncates to `amount` and returns. +/// +/// This is better than using a HashMap "cache" when `amount >= length / 2` since it does not +/// require allocating an extra cache and is much faster. +fn sample_indices_inplace(rng: &mut R, length: usize, amount: usize) -> Vec + where R: Rng, +{ + debug_assert!(amount <= length); + let mut indices: Vec = Vec::with_capacity(length); + indices.extend(0..length); + for i in 0..amount { + let j: usize = rng.gen_range(i, length); + let tmp = indices[i]; + indices[i] = indices[j]; + indices[j] = tmp; + } + indices.truncate(amount); + debug_assert_eq!(indices.len(), amount); + indices +} + + +/// This method performs a partial fisher-yates on a range of indices using a HashMap +/// as a cache to record potential collisions. +/// +/// The cache avoids allocating the entire `length` of values. This is especially useful when +/// `amount <<< length`, i.e. select 3 non-repeating from 1_000_000 +fn sample_indices_cache( + rng: &mut R, + length: usize, + amount: usize, +) -> Vec + where R: Rng, +{ + debug_assert!(amount <= length); + #[cfg(feature="std")] let mut cache = HashMap::with_capacity(amount); + #[cfg(not(feature="std"))] let mut cache = BTreeMap::new(); + let mut out = Vec::with_capacity(amount); + for i in 0..amount { + let j: usize = rng.gen_range(i, length); + + // equiv: let tmp = slice[i]; + let tmp = match cache.get(&i) { + Some(e) => *e, + None => i, + }; + + // equiv: slice[i] = slice[j]; + let x = match cache.get(&j) { + Some(x) => *x, + None => j, + }; + + // equiv: slice[j] = tmp; + cache.insert(j, tmp); + + // note that in the inplace version, slice[i] is automatically "returned" value + out.push(x); + } + debug_assert_eq!(out.len(), amount); + out +} + +#[cfg(test)] +mod test { + use super::*; + use {thread_rng, XorShiftRng, SeedableRng}; + + #[test] + fn test_sample_iter() { + let min_val = 1; + let max_val = 100; + + let mut r = thread_rng(); + let vals = (min_val..max_val).collect::>(); + let small_sample = sample_iter(&mut r, vals.iter(), 5).unwrap(); + let large_sample = sample_iter(&mut r, vals.iter(), vals.len() + 5).unwrap_err(); + + assert_eq!(small_sample.len(), 5); + assert_eq!(large_sample.len(), vals.len()); + // no randomization happens when amount >= len + assert_eq!(large_sample, vals.iter().collect::>()); + + assert!(small_sample.iter().all(|e| { + **e >= min_val && **e <= max_val + })); + } + #[test] + fn test_sample_slice_boundaries() { + let empty: &[u8] = &[]; + + let mut r = thread_rng(); + + // sample 0 items + assert_eq!(sample_slice(&mut r, empty, 0), vec![]); + assert_eq!(sample_slice(&mut r, &[42, 2, 42], 0), vec![]); + + // sample 1 item + assert_eq!(sample_slice(&mut r, &[42], 1), vec![42]); + let v = sample_slice(&mut r, &[1, 42], 1)[0]; + assert!(v == 1 || v == 42); + + // sample "all" the items + let v = sample_slice(&mut r, &[42, 133], 2); + assert!(v == vec![42, 133] || v == vec![133, 42]); + + assert_eq!(sample_indices_inplace(&mut r, 0, 0), vec![]); + assert_eq!(sample_indices_inplace(&mut r, 1, 0), vec![]); + assert_eq!(sample_indices_inplace(&mut r, 1, 1), vec![0]); + + assert_eq!(sample_indices_cache(&mut r, 0, 0), vec![]); + assert_eq!(sample_indices_cache(&mut r, 1, 0), vec![]); + assert_eq!(sample_indices_cache(&mut r, 1, 1), vec![0]); + + // Make sure lucky 777's aren't lucky + let slice = &[42, 777]; + let mut num_42 = 0; + let total = 1000; + for _ in 0..total { + let v = sample_slice(&mut r, slice, 1); + assert_eq!(v.len(), 1); + let v = v[0]; + assert!(v == 42 || v == 777); + if v == 42 { + num_42 += 1; + } + } + let ratio_42 = num_42 as f64 / 1000 as f64; + assert!(0.4 <= ratio_42 || ratio_42 <= 0.6, "{}", ratio_42); + } + + #[test] + fn test_sample_slice() { + let xor_rng = XorShiftRng::from_seed; + + let max_range = 100; + let mut r = thread_rng(); + + for length in 1usize..max_range { + let amount = r.gen_range(0, length); + let seed: [u32; 4] = [ + r.next_u32(), r.next_u32(), r.next_u32(), r.next_u32() + ]; + + println!("Selecting indices: len={}, amount={}, seed={:?}", length, amount, seed); + + // assert that the two index methods give exactly the same result + let inplace = sample_indices_inplace( + &mut xor_rng(seed), length, amount); + let cache = sample_indices_cache( + &mut xor_rng(seed), length, amount); + assert_eq!(inplace, cache); + + // assert the basics work + let regular = sample_indices( + &mut xor_rng(seed), length, amount); + assert_eq!(regular.len(), amount); + assert!(regular.iter().all(|e| *e < length)); + assert_eq!(regular, inplace); + + // also test that sampling the slice works + let vec: Vec = (0..length).collect(); + { + let result = sample_slice(&mut xor_rng(seed), &vec, amount); + assert_eq!(result, regular); + } + + { + let result = sample_slice_ref(&mut xor_rng(seed), &vec, amount); + let expected = regular.iter().map(|v| v).collect::>(); + assert_eq!(result, expected); + } + } + } +} diff --git a/third_party/rust/rand/utils/ziggurat_tables.py b/third_party/rust/rand/utils/ziggurat_tables.py new file mode 100755 index 000000000000..762f9565b780 --- /dev/null +++ b/third_party/rust/rand/utils/ziggurat_tables.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# +# Copyright 2013 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# This creates the tables used for distributions implemented using the +# ziggurat algorithm in `rand::distributions;`. They are +# (basically) the tables as used in the ZIGNOR variant (Doornik 2005). +# They are changed rarely, so the generated file should be checked in +# to git. +# +# It creates 3 tables: X as in the paper, F which is f(x_i), and +# F_DIFF which is f(x_i) - f(x_{i-1}). The latter two are just cached +# values which is not done in that paper (but is done in other +# variants). Note that the adZigR table is unnecessary because of +# algebra. +# +# It is designed to be compatible with Python 2 and 3. + +from math import exp, sqrt, log, floor +import random + +# The order should match the return value of `tables` +TABLE_NAMES = ['X', 'F'] + +# The actual length of the table is 1 more, to stop +# index-out-of-bounds errors. This should match the bitwise operation +# to find `i` in `zigurrat` in `libstd/rand/mod.rs`. Also the *_R and +# *_V constants below depend on this value. +TABLE_LEN = 256 + +# equivalent to `zigNorInit` in Doornik2005, but generalised to any +# distribution. r = dR, v = dV, f = probability density function, +# f_inv = inverse of f +def tables(r, v, f, f_inv): + # compute the x_i + xvec = [0]*(TABLE_LEN+1) + + xvec[0] = v / f(r) + xvec[1] = r + + for i in range(2, TABLE_LEN): + last = xvec[i-1] + xvec[i] = f_inv(v / last + f(last)) + + # cache the f's + fvec = [0]*(TABLE_LEN+1) + for i in range(TABLE_LEN+1): + fvec[i] = f(xvec[i]) + + return xvec, fvec + +# Distributions +# N(0, 1) +def norm_f(x): + return exp(-x*x/2.0) +def norm_f_inv(y): + return sqrt(-2.0*log(y)) + +NORM_R = 3.6541528853610088 +NORM_V = 0.00492867323399 + +NORM = tables(NORM_R, NORM_V, + norm_f, norm_f_inv) + +# Exp(1) +def exp_f(x): + return exp(-x) +def exp_f_inv(y): + return -log(y) + +EXP_R = 7.69711747013104972 +EXP_V = 0.0039496598225815571993 + +EXP = tables(EXP_R, EXP_V, + exp_f, exp_f_inv) + + +# Output the tables/constants/types + +def render_static(name, type, value): + # no space or + return 'pub static %s: %s =%s;\n' % (name, type, value) + +# static `name`: [`type`, .. `len(values)`] = +# [values[0], ..., values[3], +# values[4], ..., values[7], +# ... ]; +def render_table(name, values): + rows = [] + # 4 values on each row + for i in range(0, len(values), 4): + row = values[i:i+4] + rows.append(', '.join('%.18f' % f for f in row)) + + rendered = '\n [%s]' % ',\n '.join(rows) + return render_static(name, '[f64, .. %d]' % len(values), rendered) + + +with open('ziggurat_tables.rs', 'w') as f: + f.write('''// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Tables for distributions which are sampled using the ziggurat +// algorithm. Autogenerated by `ziggurat_tables.py`. + +pub type ZigTable = &\'static [f64, .. %d]; +''' % (TABLE_LEN + 1)) + for name, tables, r in [('NORM', NORM, NORM_R), + ('EXP', EXP, EXP_R)]: + f.write(render_static('ZIG_%s_R' % name, 'f64', ' %.18f' % r)) + for (tabname, table) in zip(TABLE_NAMES, tables): + f.write(render_table('ZIG_%s_%s' % (name, tabname), table)) diff --git a/third_party/rust/slab-0.3.0/.cargo-checksum.json b/third_party/rust/slab-0.3.0/.cargo-checksum.json new file mode 100644 index 000000000000..d84e947bc6be --- /dev/null +++ b/third_party/rust/slab-0.3.0/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"4cd8cbaedfe34dd4e0cc476e1484dc950b7ae90b693073fa89a298b014e6c0a1","README.md":"36ba748d4deb1875f5355dbf997be6ef1cb857709d78db7127c24d640e90300a","src/lib.rs":"003277f46755d1870148756841dbaad216109812cd659e4862e220e7a5b0c963"},"package":"17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"} \ No newline at end of file diff --git a/third_party/rust/slab-0.3.0/Cargo.toml b/third_party/rust/slab-0.3.0/Cargo.toml new file mode 100644 index 000000000000..080051801a70 --- /dev/null +++ b/third_party/rust/slab-0.3.0/Cargo.toml @@ -0,0 +1,17 @@ +[package] + +name = "slab" +version = "0.3.0" +license = "MIT" +authors = ["Carl Lerche "] +description = "Simple slab allocator" +documentation = "https://docs.rs/slab/0.3.0/slab/" +homepage = "https://github.com/carllerche/slab" +repository = "https://github.com/carllerche/slab" +readme = "README.md" +keywords = ["slab", "allocator"] +exclude = [ + ".gitignore", + ".travis.yml", + "test/**/*", +] diff --git a/third_party/rust/slab-0.3.0/README.md b/third_party/rust/slab-0.3.0/README.md new file mode 100644 index 000000000000..8e4b462b4d5a --- /dev/null +++ b/third_party/rust/slab-0.3.0/README.md @@ -0,0 +1,3 @@ +Slab Allocator for Rust + +Preallocate memory for values of a given type. diff --git a/third_party/rust/slab-0.3.0/src/lib.rs b/third_party/rust/slab-0.3.0/src/lib.rs new file mode 100644 index 000000000000..a8f2de2c9f94 --- /dev/null +++ b/third_party/rust/slab-0.3.0/src/lib.rs @@ -0,0 +1,837 @@ +use std::{fmt, mem, usize}; +use std::iter::IntoIterator; +use std::ops; +use std::marker::PhantomData; + +/// A preallocated chunk of memory for storing objects of the same type. +pub struct Slab { + // Chunk of memory + entries: Vec>, + + // Number of Filled elements currently in the slab + len: usize, + + // Offset of the next available slot in the slab. Set to the slab's + // capacity when the slab is full. + next: usize, + + _marker: PhantomData, +} + +/// A handle to an occupied slot in the `Slab` +pub struct Entry<'a, T: 'a, I: 'a> { + slab: &'a mut Slab, + idx: usize, +} + +/// A handle to a vacant slot in the `Slab` +pub struct VacantEntry<'a, T: 'a, I: 'a> { + slab: &'a mut Slab, + idx: usize, +} + +/// An iterator over the values stored in the `Slab` +pub struct Iter<'a, T: 'a, I: 'a> { + slab: &'a Slab, + cur_idx: usize, + yielded: usize, +} + +/// A mutable iterator over the values stored in the `Slab` +pub struct IterMut<'a, T: 'a, I: 'a> { + slab: *mut Slab, + cur_idx: usize, + yielded: usize, + _marker: PhantomData<&'a mut ()>, +} + +enum Slot { + Empty(usize), + Filled(T), + Invalid, +} + +unsafe impl Send for Slab where T: Send {} + +macro_rules! some { + ($expr:expr) => (match $expr { + Some(val) => val, + None => return None, + }) +} + +impl Slab { + /// Returns an empty `Slab` with the requested capacity + pub fn with_capacity(capacity: usize) -> Slab { + let entries = (1..capacity + 1) + .map(Slot::Empty) + .collect::>(); + + Slab { + entries: entries, + next: 0, + len: 0, + _marker: PhantomData, + } + } + + /// Returns the number of values stored by the `Slab` + pub fn len(&self) -> usize { + self.len + } + + /// Returns the total capacity of the `Slab` + pub fn capacity(&self) -> usize { + self.entries.len() + } + + /// Returns true if the `Slab` is storing no values + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + /// Returns the number of available slots remaining in the `Slab` + pub fn available(&self) -> usize { + self.entries.len() - self.len + } + + /// Returns true if the `Slab` has available slots + pub fn has_available(&self) -> bool { + self.available() > 0 + } +} + +impl + From> Slab { + /// Returns true if the `Slab` contains a value for the given token + pub fn contains(&self, idx: I) -> bool { + self.get(idx).is_some() + } + + /// Get a reference to the value associated with the given token + pub fn get(&self, idx: I) -> Option<&T> { + let idx = some!(self.local_index(idx)); + + match self.entries[idx] { + Slot::Filled(ref val) => Some(val), + Slot::Empty(_) => None, + Slot::Invalid => panic!("Slab corrupt"), + } + } + + /// Get a mutable reference to the value associated with the given token + pub fn get_mut(&mut self, idx: I) -> Option<&mut T> { + let idx = some!(self.local_index(idx)); + + match self.entries[idx] { + Slot::Filled(ref mut v) => Some(v), + _ => None, + } + } + + /// Insert a value into the slab, returning the associated token + pub fn insert(&mut self, val: T) -> Result { + match self.vacant_entry() { + Some(entry) => Ok(entry.insert(val).index()), + None => Err(val), + } + } + + /// Returns a handle to an entry. + /// + /// This allows more advanced manipulation of the value stored at the given + /// index. + pub fn entry(&mut self, idx: I) -> Option> { + let idx = some!(self.local_index(idx)); + + match self.entries[idx] { + Slot::Filled(_) => { + Some(Entry { + slab: self, + idx: idx, + }) + } + Slot::Empty(_) => None, + Slot::Invalid => panic!("Slab corrupt"), + } + } + + /// Returns a handle to a vacant entry. + /// + /// This allows optionally inserting a value that is constructed with the + /// index. + pub fn vacant_entry(&mut self) -> Option> { + let idx = self.next; + + if idx >= self.entries.len() { + return None; + } + + Some(VacantEntry { + slab: self, + idx: idx, + }) + } + + /// Releases the given slot + pub fn remove(&mut self, idx: I) -> Option { + self.entry(idx).map(Entry::remove) + } + + /// Retain only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns false. + /// This method operates in place and preserves the order of the retained + /// elements. + pub fn retain(&mut self, mut f: F) + where F: FnMut(&T) -> bool + { + for i in 0..self.entries.len() { + if let Some(e) = self.entry(I::from(i)) { + if !f(e.get()) { + e.remove(); + } + } + } + } + + /// An iterator for visiting all elements stored in the `Slab` + pub fn iter(&self) -> Iter { + Iter { + slab: self, + cur_idx: 0, + yielded: 0, + } + } + + /// A mutable iterator for visiting all elements stored in the `Slab` + pub fn iter_mut(&mut self) -> IterMut { + IterMut { + slab: self as *mut Slab, + cur_idx: 0, + yielded: 0, + _marker: PhantomData, + } + } + + /// Empty the slab, by freeing all entries + pub fn clear(&mut self) { + for (i, e) in self.entries.iter_mut().enumerate() { + *e = Slot::Empty(i + 1) + } + self.next = 0; + self.len = 0; + } + + /// Reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `Slab`. Does nothing if the capacity is + /// already sufficient. + pub fn reserve_exact(&mut self, additional: usize) { + let prev_len = self.entries.len(); + + // Ensure `entries_num` isn't too big + assert!(additional < usize::MAX - prev_len, "capacity too large"); + + let prev_len_next = prev_len + 1; + self.entries.extend((prev_len_next..(prev_len_next + additional)).map(Slot::Empty)); + + debug_assert_eq!(self.entries.len(), prev_len + additional); + } + + fn insert_at(&mut self, idx: usize, value: T) -> I { + self.next = match self.entries[idx] { + Slot::Empty(next) => next, + Slot::Filled(_) => panic!("Index already contains value"), + Slot::Invalid => panic!("Slab corrupt"), + }; + + self.entries[idx] = Slot::Filled(value); + self.len += 1; + + I::from(idx) + } + + fn replace(&mut self, idx: usize, e: Slot) -> Option { + if let Slot::Filled(val) = mem::replace(&mut self.entries[idx], e) { + self.next = idx; + return Some(val); + } + + None + } + + fn local_index(&self, idx: I) -> Option { + let idx: usize = idx.into(); + + if idx >= self.entries.len() { + return None; + } + + Some(idx) + } +} + +impl + Into> ops::Index for Slab { + type Output = T; + + fn index(&self, index: I) -> &T { + self.get(index).expect("invalid index") + } +} + +impl + Into> ops::IndexMut for Slab { + fn index_mut(&mut self, index: I) -> &mut T { + self.get_mut(index).expect("invalid index") + } +} + +impl fmt::Debug for Slab + where T: fmt::Debug, + I: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, + "Slab {{ len: {}, cap: {} }}", + self.len, + self.capacity()) + } +} + +impl<'a, T, I: From + Into> IntoIterator for &'a Slab { + type Item = &'a T; + type IntoIter = Iter<'a, T, I>; + + fn into_iter(self) -> Iter<'a, T, I> { + self.iter() + } +} + +impl<'a, T, I: From + Into> IntoIterator for &'a mut Slab { + type Item = &'a mut T; + type IntoIter = IterMut<'a, T, I>; + + fn into_iter(self) -> IterMut<'a, T, I> { + self.iter_mut() + } +} + +/* + * + * ===== Entry ===== + * + */ + +impl<'a, T, I: From + Into> Entry<'a, T, I> { + + /// Replace the value stored in the entry + pub fn replace(&mut self, val: T) -> T { + match mem::replace(&mut self.slab.entries[self.idx], Slot::Filled(val)) { + Slot::Filled(v) => v, + _ => panic!("Slab corrupt"), + } + } + + /// Apply the function to the current value, replacing it with the result + /// of the function. + pub fn replace_with(&mut self, f: F) + where F: FnOnce(T) -> T + { + let idx = self.idx; + + // Take the value out of the entry, temporarily setting it to Invalid + let val = match mem::replace(&mut self.slab.entries[idx], Slot::Invalid) { + Slot::Filled(v) => f(v), + _ => panic!("Slab corrupt"), + }; + + self.slab.entries[idx] = Slot::Filled(val); + } + + /// Remove and return the value stored in the entry + pub fn remove(self) -> T { + let next = self.slab.next; + + if let Some(v) = self.slab.replace(self.idx, Slot::Empty(next)) { + self.slab.len -= 1; + v + } else { + panic!("Slab corrupt"); + } + } + + /// Get a reference to the value stored in the entry + pub fn get(&self) -> &T { + let idx = self.index(); + self.slab + .get(idx) + .expect("Filled slot in Entry") + } + + /// Get a mutable reference to the value stored in the entry + pub fn get_mut(&mut self) -> &mut T { + let idx = self.index(); + self.slab + .get_mut(idx) + .expect("Filled slot in Entry") + } + + /// Convert the entry handle to a mutable reference + pub fn into_mut(self) -> &'a mut T { + let idx = self.index(); + self.slab + .get_mut(idx) + .expect("Filled slot in Entry") + } + + /// Return the entry index + pub fn index(&self) -> I { + I::from(self.idx) + } +} + +/* + * + * ===== VacantEntry ===== + * + */ + +impl<'a, T, I: From + Into> VacantEntry<'a, T, I> { + /// Insert a value into the entry + pub fn insert(self, val: T) -> Entry<'a, T, I> { + self.slab.insert_at(self.idx, val); + + Entry { + slab: self.slab, + idx: self.idx, + } + } + + /// Returns the entry index + pub fn index(&self) -> I { + I::from(self.idx) + } +} + +/* + * + * ===== Iter ===== + * + */ + +impl<'a, T, I> Iterator for Iter<'a, T, I> { + type Item = &'a T; + + fn next(&mut self) -> Option<&'a T> { + while self.yielded < self.slab.len { + match self.slab.entries[self.cur_idx] { + Slot::Filled(ref v) => { + self.cur_idx += 1; + self.yielded += 1; + return Some(v); + } + Slot::Empty(_) => { + self.cur_idx += 1; + } + Slot::Invalid => { + panic!("Slab corrupt"); + } + } + } + + None + } +} + +/* + * + * ===== IterMut ===== + * + */ + +impl<'a, T, I> Iterator for IterMut<'a, T, I> { + type Item = &'a mut T; + + fn next(&mut self) -> Option<&'a mut T> { + unsafe { + while self.yielded < (*self.slab).len { + let idx = self.cur_idx; + + match (*self.slab).entries[idx] { + Slot::Filled(ref mut v) => { + self.cur_idx += 1; + self.yielded += 1; + return Some(v); + } + Slot::Empty(_) => { + self.cur_idx += 1; + } + Slot::Invalid => { + panic!("Slab corrupt"); + } + } + } + + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct MyIndex(pub usize); + + impl From for MyIndex { + fn from(i: usize) -> MyIndex { + MyIndex(i) + } + } + + impl Into for MyIndex { + fn into(self) -> usize { + self.0 + } + } + + #[test] + fn test_index_trait() { + let mut slab = Slab::::with_capacity(1); + let idx = slab.insert(10).ok().expect("Failed to insert"); + assert_eq!(idx, MyIndex(0)); + assert_eq!(slab[idx], 10); + } + + #[test] + fn test_insertion() { + let mut slab = Slab::::with_capacity(1); + assert_eq!(slab.is_empty(), true); + assert_eq!(slab.has_available(), true); + assert_eq!(slab.available(), 1); + let idx = slab.insert(10).ok().expect("Failed to insert"); + assert_eq!(slab[idx], 10); + assert_eq!(slab.is_empty(), false); + assert_eq!(slab.has_available(), false); + assert_eq!(slab.available(), 0); + } + + #[test] + fn test_insert_with() { + let mut slab = Slab::::with_capacity(1); + + { + let e = slab.vacant_entry().unwrap(); + assert_eq!(e.index(), 0); + let e = e.insert(5); + assert_eq!(5, *e.get()); + } + + assert_eq!(Some(&5), slab.get(0)); + } + + #[test] + fn test_repeated_insertion() { + let mut slab = Slab::::with_capacity(10); + + for i in 0..10 { + let idx = slab.insert(i + 10).ok().expect("Failed to insert"); + assert_eq!(slab[idx], i + 10); + } + + slab.insert(20).err().expect("Inserted when full"); + } + + #[test] + fn test_repeated_insertion_and_removal() { + let mut slab = Slab::::with_capacity(10); + let mut indices = vec![]; + + for i in 0..10 { + let idx = slab.insert(i + 10).ok().expect("Failed to insert"); + indices.push(idx); + assert_eq!(slab[idx], i + 10); + } + + for &i in indices.iter() { + slab.remove(i); + } + + slab.insert(20).ok().expect("Failed to insert in newly empty slab"); + } + + #[test] + fn test_insertion_when_full() { + let mut slab = Slab::::with_capacity(1); + slab.insert(10).ok().expect("Failed to insert"); + slab.insert(10).err().expect("Inserted into a full slab"); + } + + #[test] + fn test_removal_at_boundries() { + let mut slab = Slab::::with_capacity(1); + assert_eq!(slab.remove(0), None); + assert_eq!(slab.remove(1), None); + } + + #[test] + fn test_removal_is_successful() { + let mut slab = Slab::::with_capacity(1); + let t1 = slab.insert(10).ok().expect("Failed to insert"); + slab.remove(t1); + let t2 = slab.insert(20).ok().expect("Failed to insert"); + assert_eq!(slab[t2], 20); + } + + #[test] + fn test_remove_empty_entry() { + let mut s = Slab::<(), usize>::with_capacity(3); + let t1 = s.insert(()).unwrap(); + assert!(s.remove(t1).is_some()); + assert!(s.remove(t1).is_none()); + assert!(s.insert(()).is_ok()); + assert!(s.insert(()).is_ok()); + } + + #[test] + fn test_mut_retrieval() { + let mut slab = Slab::<_, usize>::with_capacity(1); + let t1 = slab.insert("foo".to_string()).ok().expect("Failed to insert"); + + slab[t1].push_str("bar"); + + assert_eq!(&slab[t1][..], "foobar"); + } + + #[test] + #[should_panic] + fn test_reusing_slots_1() { + let mut slab = Slab::::with_capacity(16); + + let t0 = slab.insert(123).unwrap(); + let t1 = slab.insert(456).unwrap(); + + assert!(slab.len() == 2); + assert!(slab.available() == 14); + + slab.remove(t0); + + assert!(slab.len() == 1, "actual={}", slab.len()); + assert!(slab.available() == 15); + + slab.remove(t1); + + assert!(slab.len() == 0); + assert!(slab.available() == 16); + + let _ = slab[t1]; + } + + #[test] + fn test_reusing_slots_2() { + let mut slab = Slab::::with_capacity(16); + + let t0 = slab.insert(123).unwrap(); + + assert!(slab[t0] == 123); + assert!(slab.remove(t0) == Some(123)); + + let t0 = slab.insert(456).unwrap(); + + assert!(slab[t0] == 456); + + let t1 = slab.insert(789).unwrap(); + + assert!(slab[t0] == 456); + assert!(slab[t1] == 789); + + assert!(slab.remove(t0).unwrap() == 456); + assert!(slab.remove(t1).unwrap() == 789); + + assert!(slab.len() == 0); + } + + #[test] + #[should_panic] + fn test_accessing_out_of_bounds() { + let slab = Slab::::with_capacity(16); + slab[0]; + } + + #[test] + #[should_panic] + fn test_capacity_too_large1() { + use std::usize; + Slab::::with_capacity(usize::MAX); + } + + #[test] + #[should_panic] + fn test_capacity_too_large_in_reserve_exact() { + use std::usize; + let mut slab = Slab::::with_capacity(100); + slab.reserve_exact(usize::MAX - 100); + } + + #[test] + fn test_contains() { + let mut slab = Slab::with_capacity(16); + assert!(!slab.contains(0)); + + let idx = slab.insert(111).unwrap(); + assert!(slab.contains(idx)); + } + + #[test] + fn test_get() { + let mut slab = Slab::::with_capacity(16); + let tok = slab.insert(5).unwrap(); + assert_eq!(slab.get(tok), Some(&5)); + assert_eq!(slab.get(1), None); + assert_eq!(slab.get(23), None); + } + + #[test] + fn test_get_mut() { + let mut slab = Slab::::with_capacity(16); + let tok = slab.insert(5u32).unwrap(); + { + let mut_ref = slab.get_mut(tok).unwrap(); + assert_eq!(*mut_ref, 5); + *mut_ref = 12; + } + assert_eq!(slab[tok], 12); + assert_eq!(slab.get_mut(1), None); + assert_eq!(slab.get_mut(23), None); + } + + #[test] + fn test_replace() { + let mut slab = Slab::::with_capacity(16); + let tok = slab.insert(5).unwrap(); + + slab.entry(tok).unwrap().replace(6); + assert!(slab.entry(tok + 1).is_none()); + + assert_eq!(slab[tok], 6); + assert_eq!(slab.len(), 1); + } + + #[test] + fn test_replace_again() { + let mut slab = Slab::::with_capacity(16); + let tok = slab.insert(5).unwrap(); + + slab.entry(tok).unwrap().replace(6); + slab.entry(tok).unwrap().replace(7); + slab.entry(tok).unwrap().replace(8); + assert_eq!(slab[tok], 8); + } + + #[test] + fn test_replace_with() { + let mut slab = Slab::::with_capacity(16); + let tok = slab.insert(5u32).unwrap(); + slab.entry(tok).unwrap().replace_with(|x| x + 1); + assert_eq!(slab[tok], 6); + } + + #[test] + fn test_retain() { + let mut slab = Slab::::with_capacity(2); + let tok1 = slab.insert(0).unwrap(); + let tok2 = slab.insert(1).unwrap(); + slab.retain(|x| x % 2 == 0); + assert_eq!(slab.len(), 1); + assert_eq!(slab[tok1], 0); + assert_eq!(slab.contains(tok2), false); + } + + #[test] + fn test_iter() { + let mut slab = Slab::::with_capacity(4); + for i in 0..4 { + slab.insert(i).unwrap(); + } + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![0, 1, 2, 3]); + + slab.remove(1); + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![0, 2, 3]); + } + + #[test] + fn test_iter_mut() { + let mut slab = Slab::::with_capacity(4); + for i in 0..4 { + slab.insert(i).unwrap(); + } + for e in slab.iter_mut() { + *e = *e + 1; + } + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![1, 2, 3, 4]); + + slab.remove(2); + for e in slab.iter_mut() { + *e = *e + 1; + } + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![2, 3, 5]); + } + + #[test] + fn test_reserve_exact() { + let mut slab = Slab::::with_capacity(4); + for i in 0..4 { + slab.insert(i).unwrap(); + } + + assert!(slab.insert(0).is_err()); + + slab.reserve_exact(3); + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![0, 1, 2, 3]); + + for i in 0..3 { + slab.insert(i).unwrap(); + } + assert!(slab.insert(0).is_err()); + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![0, 1, 2, 3, 0, 1, 2]); + } + + #[test] + fn test_clear() { + let mut slab = Slab::::with_capacity(4); + for i in 0..4 { + slab.insert(i).unwrap(); + } + + // clear full + slab.clear(); + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![]); + + for i in 0..2 { + slab.insert(i).unwrap(); + } + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![0, 1]); + + + // clear half-filled + slab.clear(); + + let vals: Vec = slab.iter().map(|r| *r).collect(); + assert_eq!(vals, vec![]); + } +} diff --git a/third_party/rust/slab/.cargo-checksum.json b/third_party/rust/slab/.cargo-checksum.json index d84e947bc6be..f98f7fe4a27b 100644 --- a/third_party/rust/slab/.cargo-checksum.json +++ b/third_party/rust/slab/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"4cd8cbaedfe34dd4e0cc476e1484dc950b7ae90b693073fa89a298b014e6c0a1","README.md":"36ba748d4deb1875f5355dbf997be6ef1cb857709d78db7127c24d640e90300a","src/lib.rs":"003277f46755d1870148756841dbaad216109812cd659e4862e220e7a5b0c963"},"package":"17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"} \ No newline at end of file +{"files":{".travis.yml":"513b2259c238f4f467fc135d2c74d34be858ddae8495d2268ec7da6d6add7ed5","CHANGELOG.md":"a3e1912aae6f72659729ba4b2e303aa46bfbe1110fe9133386ac3ac3b1c5a1ea","Cargo.toml":"f07494b6213d2ad4b56d1ea9d6579305175de381c1016fa169fc9e84e5ffab9d","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"ba0e174bd812dba6f93c38595c44187083dfafa408b308492b652182513218f6","src/lib.rs":"4736c1ce76fe28644d70dcc097404d19423578d0c85ae0b47d71c3a79e9c0649","tests/slab.rs":"c166c080b2534ffd5a5de4317f40072603678b2ca996a694c583eafadeb99a8c"},"package":"5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"} \ No newline at end of file diff --git a/third_party/rust/slab/.travis.yml b/third_party/rust/slab/.travis.yml new file mode 100644 index 000000000000..407e1f2e8e6c --- /dev/null +++ b/third_party/rust/slab/.travis.yml @@ -0,0 +1,29 @@ +language: rust +rust: + - nightly + - stable + - 1.6.0 + +script: + - cargo test + - cargo doc --no-deps + +# Deploy documentation to S3 for specific branches. At some +# point, it would be nice to also support building docs for +# a specific tag +deploy: + provider: s3 + access_key_id: AKIAIXM3KLI7WZS4ZA3Q + secret_access_key: + secure: WyYzM8PxSKC3HQ+jINE50KOu5j3taOA4chJJ9zfAhM8Eug/Z1bK8taHnm73xrCUsvh4bv1C3XWAnSTl4YO/HykYulTIVPPs6go+ssk/59PDV6dGPhheLj2tKcSrjKd4q8H668MAPiAlNt9Rvq/GkkdAW2GXG1+otPMVFBrnR+kld6WaX5EB18SjApKgl5NwSRj9wiSIPYJTBnZQhCsaM4YRMkpFbFoHUWjSjm7N9f/6A3a3jRzW7/ZtqXvMaMazMSBAlN0/LH2UMTKCuj7nywKJt1NkpEF8mA9IEUCDBCnQs+e58v6BpkDZ2nhCJ7vdm0bISuZB6jXhg+sOycZbdb7mbn5n4mPBMa1c8WnsfmVxm7bV7G3sRpcGU8HvRT35lCCuCt4bFBX1O2abuTtVqS7XgtyChBmrSG6I/z+lw+u44Dk5bYK9A2hZSOEPFr09R8f2YRe9cqAq+uI6rNPyY7DC0eATCRCX5CxjYR6DG2bDoDFfPsBlRLQJJUl/BOM5pWYdm97iaqobxlPmKaxuxTSHw1D3Z9OvuQVeB2z+4G9xMhBBTJ0N671oZhUajpBy8OW4k9c8jl+joe01W+SScfk+qPV8ivjirTPYsUYRT3gtUgO/X/XuZ+EXGcnx+Brpu6FQtW6qSKH4Q+cofM4aohoopSIAP9dZ5zpQqQTACKyE= + bucket: rust-doc + endpoint: rust-doc.s3-website-us-east-1.amazonaws.com + skip_cleanup: true + local-dir: target/doc + upload-dir: slab/${TRAVIS_BRANCH} + acl: public_read + on: + condition: $TRAVIS_RUST_VERSION == "1.3.0" && $TRAVIS_OS_NAME == "linux" + repo: carllerche/slab + branch: + - master diff --git a/third_party/rust/slab/CHANGELOG.md b/third_party/rust/slab/CHANGELOG.md new file mode 100644 index 000000000000..85c78281d59e --- /dev/null +++ b/third_party/rust/slab/CHANGELOG.md @@ -0,0 +1,4 @@ +# 0.4.1 (July 15, 2018) + +* Improve `reserve` and `reserve_exact` (#37). +* Implement `Default` for `Slab` (#43). diff --git a/third_party/rust/slab/Cargo.toml b/third_party/rust/slab/Cargo.toml index 080051801a70..e24f199f120b 100644 --- a/third_party/rust/slab/Cargo.toml +++ b/third_party/rust/slab/Cargo.toml @@ -1,17 +1,24 @@ -[package] +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) +[package] name = "slab" -version = "0.3.0" -license = "MIT" +version = "0.4.1" authors = ["Carl Lerche "] -description = "Simple slab allocator" -documentation = "https://docs.rs/slab/0.3.0/slab/" +description = "Pre-allocated storage for a uniform data type" homepage = "https://github.com/carllerche/slab" -repository = "https://github.com/carllerche/slab" +documentation = "https://docs.rs/slab" readme = "README.md" keywords = ["slab", "allocator"] -exclude = [ - ".gitignore", - ".travis.yml", - "test/**/*", -] +categories = ["memory-management", "data-structures"] +license = "MIT" +repository = "https://github.com/carllerche/slab" diff --git a/third_party/rust/slab/LICENSE b/third_party/rust/slab/LICENSE new file mode 100644 index 000000000000..58fb29a12384 --- /dev/null +++ b/third_party/rust/slab/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Carl Lerche + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/slab/README.md b/third_party/rust/slab/README.md index 8e4b462b4d5a..b7aa209f9b0a 100644 --- a/third_party/rust/slab/README.md +++ b/third_party/rust/slab/README.md @@ -1,3 +1,48 @@ -Slab Allocator for Rust +# Slab -Preallocate memory for values of a given type. +Pre-allocated storage for a uniform data type. + +[![Crates.io](https://img.shields.io/crates/v/slab.svg?maxAge=2592000)](https://crates.io/crates/slab) +[![Build Status](https://travis-ci.org/carllerche/slab.svg?branch=master)](https://travis-ci.org/carllerche/slab) + +[Documentation](https://docs.rs/slab) + +## Usage + +To use `slab`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +slab = "0.4" +``` + +Next, add this to your crate: + +```rust +extern crate slab; + +use slab::Slab; + +let mut slab = Slab::new(); + +let hello = slab.insert("hello"); +let world = slab.insert("world"); + +assert_eq!(slab[hello], "hello"); +assert_eq!(slab[world], "world"); + +slab[world] = "earth"; +assert_eq!(slab[world], "earth"); +``` + +See [documentation](https://docs.rs/slab) for more details. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `slab` by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/slab/src/lib.rs b/third_party/rust/slab/src/lib.rs index a8f2de2c9f94..7d37184c53a5 100644 --- a/third_party/rust/slab/src/lib.rs +++ b/third_party/rust/slab/src/lib.rs @@ -1,12 +1,120 @@ -use std::{fmt, mem, usize}; +//! Pre-allocated storage for a uniform data type. +//! +//! `Slab` provides pre-allocated storage for a single data type. If many values +//! of a single type are being allocated, it can be more efficient to +//! pre-allocate the necessary storage. Since the size of the type is uniform, +//! memory fragmentation can be avoided. Storing, clearing, and lookup +//! operations become very cheap. +//! +//! While `Slab` may look like other Rust collections, it is not intended to be +//! used as a general purpose collection. The primary difference between `Slab` +//! and `Vec` is that `Slab` returns the key when storing the value. +//! +//! It is important to note that keys may be reused. In other words, once a +//! value associated with a given key is removed from a slab, that key may be +//! returned from future calls to `insert`. +//! +//! # Examples +//! +//! Basic storing and retrieval. +//! +//! ``` +//! # use slab::*; +//! let mut slab = Slab::new(); +//! +//! let hello = slab.insert("hello"); +//! let world = slab.insert("world"); +//! +//! assert_eq!(slab[hello], "hello"); +//! assert_eq!(slab[world], "world"); +//! +//! slab[world] = "earth"; +//! assert_eq!(slab[world], "earth"); +//! ``` +//! +//! Sometimes it is useful to be able to associate the key with the value being +//! inserted in the slab. This can be done with the `vacant_entry` API as such: +//! +//! ``` +//! # use slab::*; +//! let mut slab = Slab::new(); +//! +//! let hello = { +//! let entry = slab.vacant_entry(); +//! let key = entry.key(); +//! +//! entry.insert((key, "hello")); +//! key +//! }; +//! +//! assert_eq!(hello, slab[hello].0); +//! assert_eq!("hello", slab[hello].1); +//! ``` +//! +//! It is generally a good idea to specify the desired capacity of a slab at +//! creation time. Note that `Slab` will grow the internal capacity when +//! attempting to insert a new value once the existing capacity has been reached. +//! To avoid this, add a check. +//! +//! ``` +//! # use slab::*; +//! let mut slab = Slab::with_capacity(1024); +//! +//! // ... use the slab +//! +//! if slab.len() == slab.capacity() { +//! panic!("slab full"); +//! } +//! +//! slab.insert("the slab is not at capacity yet"); +//! ``` +//! +//! # Capacity and reallocation +//! +//! The capacity of a slab is the amount of space allocated for any future +//! values that will be inserted in the slab. This is not to be confused with +//! the *length* of the slab, which specifies the number of actual values +//! currently being inserted. If a slab's length is equal to its capacity, the +//! next value inserted into the slab will require growing the slab by +//! reallocating. +//! +//! For example, a slab with capacity 10 and length 0 would be an empty slab +//! with space for 10 more stored values. Storing 10 or fewer elements into the +//! slab will not change its capacity or cause reallocation to occur. However, +//! if the slab length is increased to 11 (due to another `insert`), it will +//! have to reallocate, which can be slow. For this reason, it is recommended to +//! use [`Slab::with_capacity`] whenever possible to specify how many values the +//! slab is expected to store. +//! +//! # Implementation +//! +//! `Slab` is backed by a `Vec` of slots. Each slot is either occupied or +//! vacant. `Slab` maintains a stack of vacant slots using a linked list. To +//! find a vacant slot, the stack is popped. When a slot is released, it is +//! pushed onto the stack. +//! +//! If there are no more available slots in the stack, then `Vec::reserve(1)` is +//! called and a new slot is created. +//! +//! [`Slab::with_capacity`]: struct.Slab.html#with_capacity + +#![deny(warnings, missing_docs, missing_debug_implementations)] +#![doc(html_root_url = "https://docs.rs/slab/0.4.1")] +#![crate_name = "slab"] + +use std::{fmt, mem}; use std::iter::IntoIterator; use std::ops; -use std::marker::PhantomData; -/// A preallocated chunk of memory for storing objects of the same type. -pub struct Slab { +/// Pre-allocated storage for a uniform data type +/// +/// See the [module documentation] for more details. +/// +/// [module documentation]: index.html +#[derive(Clone)] +pub struct Slab { // Chunk of memory - entries: Vec>, + entries: Vec>, // Number of Filled elements currently in the slab len: usize, @@ -14,280 +122,670 @@ pub struct Slab { // Offset of the next available slot in the slab. Set to the slab's // capacity when the slab is full. next: usize, - - _marker: PhantomData, } -/// A handle to an occupied slot in the `Slab` -pub struct Entry<'a, T: 'a, I: 'a> { - slab: &'a mut Slab, - idx: usize, +impl Default for Slab { + fn default() -> Self { + Slab::new() + } } -/// A handle to a vacant slot in the `Slab` -pub struct VacantEntry<'a, T: 'a, I: 'a> { - slab: &'a mut Slab, - idx: usize, +/// A handle to a vacant entry in a `Slab`. +/// +/// `VacantEntry` allows constructing values with the key that they will be +/// assigned to. +/// +/// # Examples +/// +/// ``` +/// # use slab::*; +/// let mut slab = Slab::new(); +/// +/// let hello = { +/// let entry = slab.vacant_entry(); +/// let key = entry.key(); +/// +/// entry.insert((key, "hello")); +/// key +/// }; +/// +/// assert_eq!(hello, slab[hello].0); +/// assert_eq!("hello", slab[hello].1); +/// ``` +#[derive(Debug)] +pub struct VacantEntry<'a, T: 'a> { + slab: &'a mut Slab, + key: usize, } /// An iterator over the values stored in the `Slab` -pub struct Iter<'a, T: 'a, I: 'a> { - slab: &'a Slab, - cur_idx: usize, - yielded: usize, +pub struct Iter<'a, T: 'a> { + entries: std::slice::Iter<'a, Entry>, + curr: usize, } /// A mutable iterator over the values stored in the `Slab` -pub struct IterMut<'a, T: 'a, I: 'a> { - slab: *mut Slab, - cur_idx: usize, - yielded: usize, - _marker: PhantomData<&'a mut ()>, +pub struct IterMut<'a, T: 'a> { + entries: std::slice::IterMut<'a, Entry>, + curr: usize, } -enum Slot { - Empty(usize), - Filled(T), - Invalid, +#[derive(Clone)] +enum Entry { + Vacant(usize), + Occupied(T), } -unsafe impl Send for Slab where T: Send {} - -macro_rules! some { - ($expr:expr) => (match $expr { - Some(val) => val, - None => return None, - }) -} - -impl Slab { - /// Returns an empty `Slab` with the requested capacity - pub fn with_capacity(capacity: usize) -> Slab { - let entries = (1..capacity + 1) - .map(Slot::Empty) - .collect::>(); +impl Slab { + /// Construct a new, empty `Slab`. + /// + /// The function does not allocate and the returned slab will have no + /// capacity until `insert` is called or capacity is explicitly reserved. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let slab: Slab = Slab::new(); + /// ``` + pub fn new() -> Slab { + Slab::with_capacity(0) + } + /// Construct a new, empty `Slab` with the specified capacity. + /// + /// The returned slab will be able to store exactly `capacity` without + /// reallocating. If `capacity` is 0, the slab will not allocate. + /// + /// It is important to note that this function does not specify the *length* + /// of the returned slab, but only the capacity. For an explanation of the + /// difference between length and capacity, see [Capacity and + /// reallocation](index.html#capacity-and-reallocation). + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::with_capacity(10); + /// + /// // The slab contains no values, even though it has capacity for more + /// assert_eq!(slab.len(), 0); + /// + /// // These are all done without reallocating... + /// for i in 0..10 { + /// slab.insert(i); + /// } + /// + /// // ...but this may make the slab reallocate + /// slab.insert(11); + /// ``` + pub fn with_capacity(capacity: usize) -> Slab { Slab { - entries: entries, + entries: Vec::with_capacity(capacity), next: 0, len: 0, - _marker: PhantomData, } } - /// Returns the number of values stored by the `Slab` + /// Return the number of values the slab can store without reallocating. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let slab: Slab = Slab::with_capacity(10); + /// assert_eq!(slab.capacity(), 10); + /// ``` + pub fn capacity(&self) -> usize { + self.entries.capacity() + } + + /// Reserve capacity for at least `additional` more values to be stored + /// without allocating. + /// + /// `reserve` does nothing if the slab already has sufficient capacity for + /// `additional` more values. If more capacity is required, a new segment of + /// memory will be allocated and all existing values will be copied into it. + /// As such, if the slab is already very large, a call to `reserve` can end + /// up being expensive. + /// + /// The slab may reserve more than `additional` extra space in order to + /// avoid frequent reallocations. Use `reserve_exact` instead to guarantee + /// that only the requested space is allocated. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// slab.insert("hello"); + /// slab.reserve(10); + /// assert!(slab.capacity() >= 11); + /// ``` + pub fn reserve(&mut self, additional: usize) { + if self.capacity() - self.len >= additional { + return; + } + let need_add = self.len + additional - self.entries.len(); + self.entries.reserve(need_add); + } + + /// Reserve the minimum capacity required to store exactly `additional` + /// more values. + /// + /// `reserve_exact` does nothing if the slab already has sufficient capacity + /// for `additional` more valus. If more capacity is required, a new segment + /// of memory will be allocated and all existing values will be copied into + /// it. As such, if the slab is already very large, a call to `reserve` can + /// end up being expensive. + /// + /// Note that the allocator may give the slab more space than it requests. + /// Therefore capacity can not be relied upon to be precisely minimal. + /// Prefer `reserve` if future insertions are expected. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// slab.insert("hello"); + /// slab.reserve_exact(10); + /// assert!(slab.capacity() >= 11); + /// ``` + pub fn reserve_exact(&mut self, additional: usize) { + if self.capacity() - self.len >= additional { + return; + } + let need_add = self.len + additional - self.entries.len(); + self.entries.reserve_exact(need_add); + } + + /// Shrink the capacity of the slab as much as possible. + /// + /// It will drop down as close as possible to the length but the allocator + /// may still inform the vector that there is space for a few more elements. + /// Also, since values are not moved, the slab cannot shrink past any stored + /// values. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::with_capacity(10); + /// + /// for i in 0..3 { + /// slab.insert(i); + /// } + /// + /// assert_eq!(slab.capacity(), 10); + /// slab.shrink_to_fit(); + /// assert!(slab.capacity() >= 3); + /// ``` + /// + /// In this case, even though two values are removed, the slab cannot shrink + /// past the last value. + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::with_capacity(10); + /// + /// for i in 0..3 { + /// slab.insert(i); + /// } + /// + /// slab.remove(0); + /// slab.remove(1); + /// + /// assert_eq!(slab.capacity(), 10); + /// slab.shrink_to_fit(); + /// assert!(slab.capacity() >= 3); + /// ``` + pub fn shrink_to_fit(&mut self) { + self.entries.shrink_to_fit(); + } + + /// Clear the slab of all values. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// for i in 0..3 { + /// slab.insert(i); + /// } + /// + /// slab.clear(); + /// assert!(slab.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.entries.clear(); + self.len = 0; + self.next = 0; + } + + /// Return the number of stored values. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// for i in 0..3 { + /// slab.insert(i); + /// } + /// + /// assert_eq!(3, slab.len()); + /// ``` pub fn len(&self) -> usize { self.len } - /// Returns the total capacity of the `Slab` - pub fn capacity(&self) -> usize { - self.entries.len() - } - - /// Returns true if the `Slab` is storing no values + /// Return `true` if there are no values stored in the slab. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// assert!(slab.is_empty()); + /// + /// slab.insert(1); + /// assert!(!slab.is_empty()); + /// ``` pub fn is_empty(&self) -> bool { self.len == 0 } - /// Returns the number of available slots remaining in the `Slab` - pub fn available(&self) -> usize { - self.entries.len() - self.len - } - - /// Returns true if the `Slab` has available slots - pub fn has_available(&self) -> bool { - self.available() > 0 - } -} - -impl + From> Slab { - /// Returns true if the `Slab` contains a value for the given token - pub fn contains(&self, idx: I) -> bool { - self.get(idx).is_some() - } - - /// Get a reference to the value associated with the given token - pub fn get(&self, idx: I) -> Option<&T> { - let idx = some!(self.local_index(idx)); - - match self.entries[idx] { - Slot::Filled(ref val) => Some(val), - Slot::Empty(_) => None, - Slot::Invalid => panic!("Slab corrupt"), + /// Return an iterator over the slab. + /// + /// This function should generally be **avoided** as it is not efficient. + /// Iterators must iterate over every slot in the slab even if it is + /// vacant. As such, a slab with a capacity of 1 million but only one + /// stored value must still iterate the million slots. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// for i in 0..3 { + /// slab.insert(i); + /// } + /// + /// let mut iterator = slab.iter(); + /// + /// assert_eq!(iterator.next(), Some((0, &0))); + /// assert_eq!(iterator.next(), Some((1, &1))); + /// assert_eq!(iterator.next(), Some((2, &2))); + /// assert_eq!(iterator.next(), None); + /// ``` + pub fn iter(&self) -> Iter { + Iter { + entries: self.entries.iter(), + curr: 0, } } - /// Get a mutable reference to the value associated with the given token - pub fn get_mut(&mut self, idx: I) -> Option<&mut T> { - let idx = some!(self.local_index(idx)); + /// Return an iterator that allows modifying each value. + /// + /// This function should generally be **avoided** as it is not efficient. + /// Iterators must iterate over every slot in the slab even if it is + /// vacant. As such, a slab with a capacity of 1 million but only one + /// stored value must still iterate the million slots. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let key1 = slab.insert(0); + /// let key2 = slab.insert(1); + /// + /// for (key, val) in slab.iter_mut() { + /// if key == key1 { + /// *val += 2; + /// } + /// } + /// + /// assert_eq!(slab[key1], 2); + /// assert_eq!(slab[key2], 1); + /// ``` + pub fn iter_mut(&mut self) -> IterMut { + IterMut { + entries: self.entries.iter_mut(), + curr: 0, + } + } - match self.entries[idx] { - Slot::Filled(ref mut v) => Some(v), + /// Return a reference to the value associated with the given key. + /// + /// If the given key is not associated with a value, then `None` is + /// returned. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// let key = slab.insert("hello"); + /// + /// assert_eq!(slab.get(key), Some(&"hello")); + /// assert_eq!(slab.get(123), None); + /// ``` + pub fn get(&self, key: usize) -> Option<&T> { + match self.entries.get(key) { + Some(&Entry::Occupied(ref val)) => Some(val), _ => None, } } - /// Insert a value into the slab, returning the associated token - pub fn insert(&mut self, val: T) -> Result { - match self.vacant_entry() { - Some(entry) => Ok(entry.insert(val).index()), - None => Err(val), + /// Return a mutable reference to the value associated with the given key. + /// + /// If the given key is not associated with a value, then `None` is + /// returned. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// let key = slab.insert("hello"); + /// + /// *slab.get_mut(key).unwrap() = "world"; + /// + /// assert_eq!(slab[key], "world"); + /// assert_eq!(slab.get_mut(123), None); + /// ``` + pub fn get_mut(&mut self, key: usize) -> Option<&mut T> { + match self.entries.get_mut(key) { + Some(&mut Entry::Occupied(ref mut val)) => Some(val), + _ => None, } } - /// Returns a handle to an entry. + /// Return a reference to the value associated with the given key without + /// performing bounds checking. /// - /// This allows more advanced manipulation of the value stored at the given - /// index. - pub fn entry(&mut self, idx: I) -> Option> { - let idx = some!(self.local_index(idx)); - - match self.entries[idx] { - Slot::Filled(_) => { - Some(Entry { - slab: self, - idx: idx, - }) - } - Slot::Empty(_) => None, - Slot::Invalid => panic!("Slab corrupt"), + /// This function should be used with care. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// let key = slab.insert(2); + /// + /// unsafe { + /// assert_eq!(slab.get_unchecked(key), &2); + /// } + /// ``` + pub unsafe fn get_unchecked(&self, key: usize) -> &T { + match *self.entries.get_unchecked(key) { + Entry::Occupied(ref val) => val, + _ => unreachable!(), } } - /// Returns a handle to a vacant entry. + /// Return a mutable reference to the value associated with the given key + /// without performing bounds checking. /// - /// This allows optionally inserting a value that is constructed with the - /// index. - pub fn vacant_entry(&mut self) -> Option> { - let idx = self.next; - - if idx >= self.entries.len() { - return None; + /// This function should be used with care. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// let key = slab.insert(2); + /// + /// unsafe { + /// let val = slab.get_unchecked_mut(key); + /// *val = 13; + /// } + /// + /// assert_eq!(slab[key], 13); + /// ``` + pub unsafe fn get_unchecked_mut(&mut self, key: usize) -> &mut T { + match *self.entries.get_unchecked_mut(key) { + Entry::Occupied(ref mut val) => val, + _ => unreachable!(), } + } - Some(VacantEntry { + /// Insert a value in the slab, returning key assigned to the value. + /// + /// The returned key can later be used to retrieve or remove the value using indexed + /// lookup and `remove`. Additional capacity is allocated if needed. See + /// [Capacity and reallocation](index.html#capacity-and-reallocation). + /// + /// # Panics + /// + /// Panics if the number of elements in the vector overflows a `usize`. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// let key = slab.insert("hello"); + /// assert_eq!(slab[key], "hello"); + /// ``` + pub fn insert(&mut self, val: T) -> usize { + let key = self.next; + + self.insert_at(key, val); + + key + } + + /// Return a handle to a vacant entry allowing for further manipulation. + /// + /// This function is useful when creating values that must contain their + /// slab key. The returned `VacantEntry` reserves a slot in the slab and is + /// able to query the associated key. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let hello = { + /// let entry = slab.vacant_entry(); + /// let key = entry.key(); + /// + /// entry.insert((key, "hello")); + /// key + /// }; + /// + /// assert_eq!(hello, slab[hello].0); + /// assert_eq!("hello", slab[hello].1); + /// ``` + pub fn vacant_entry(&mut self) -> VacantEntry { + VacantEntry { + key: self.next, slab: self, - idx: idx, - }) + } } - /// Releases the given slot - pub fn remove(&mut self, idx: I) -> Option { - self.entry(idx).map(Entry::remove) + fn insert_at(&mut self, key: usize, val: T) { + self.len += 1; + + if key == self.entries.len() { + self.entries.push(Entry::Occupied(val)); + self.next = key + 1; + } else { + let prev = mem::replace( + &mut self.entries[key], + Entry::Occupied(val)); + + match prev { + Entry::Vacant(next) => { + self.next = next; + } + _ => unreachable!(), + } + } + } + + /// Remove and return the value associated with the given key. + /// + /// The key is then released and may be associated with future stored + /// values. + /// + /// # Panics + /// + /// Panics if `key` is not associated with a value. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let hello = slab.insert("hello"); + /// + /// assert_eq!(slab.remove(hello), "hello"); + /// assert!(!slab.contains(hello)); + /// ``` + pub fn remove(&mut self, key: usize) -> T { + // Swap the entry at the provided value + let prev = mem::replace( + &mut self.entries[key], + Entry::Vacant(self.next)); + + match prev { + Entry::Occupied(val) => { + self.len -= 1; + self.next = key; + val + } + _ => { + // Woops, the entry is actually vacant, restore the state + self.entries[key] = prev; + panic!("invalid key"); + } + } + } + + /// Return `true` if a value is associated with the given key. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let hello = slab.insert("hello"); + /// assert!(slab.contains(hello)); + /// + /// slab.remove(hello); + /// + /// assert!(!slab.contains(hello)); + /// ``` + pub fn contains(&self, key: usize) -> bool { + self.entries.get(key) + .map(|e| { + match *e { + Entry::Occupied(_) => true, + _ => false, + } + }) + .unwrap_or(false) } /// Retain only the elements specified by the predicate. /// - /// In other words, remove all elements `e` such that `f(&e)` returns false. - /// This method operates in place and preserves the order of the retained - /// elements. + /// In other words, remove all elements `e` such that `f(usize, &mut e)` + /// returns false. This method operates in place and preserves the key + /// associated with the retained values. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let k1 = slab.insert(0); + /// let k2 = slab.insert(1); + /// let k3 = slab.insert(2); + /// + /// slab.retain(|key, val| key == k1 || *val == 1); + /// + /// assert!(slab.contains(k1)); + /// assert!(slab.contains(k2)); + /// assert!(!slab.contains(k3)); + /// + /// assert_eq!(2, slab.len()); + /// ``` pub fn retain(&mut self, mut f: F) - where F: FnMut(&T) -> bool + where F: FnMut(usize, &mut T) -> bool { for i in 0..self.entries.len() { - if let Some(e) = self.entry(I::from(i)) { - if !f(e.get()) { - e.remove(); - } + let keep = match self.entries[i] { + Entry::Occupied(ref mut v) => f(i, v), + _ => true, + }; + + if !keep { + self.remove(i); } } } - - /// An iterator for visiting all elements stored in the `Slab` - pub fn iter(&self) -> Iter { - Iter { - slab: self, - cur_idx: 0, - yielded: 0, - } - } - - /// A mutable iterator for visiting all elements stored in the `Slab` - pub fn iter_mut(&mut self) -> IterMut { - IterMut { - slab: self as *mut Slab, - cur_idx: 0, - yielded: 0, - _marker: PhantomData, - } - } - - /// Empty the slab, by freeing all entries - pub fn clear(&mut self) { - for (i, e) in self.entries.iter_mut().enumerate() { - *e = Slot::Empty(i + 1) - } - self.next = 0; - self.len = 0; - } - - /// Reserves the minimum capacity for exactly `additional` more elements to - /// be inserted in the given `Slab`. Does nothing if the capacity is - /// already sufficient. - pub fn reserve_exact(&mut self, additional: usize) { - let prev_len = self.entries.len(); - - // Ensure `entries_num` isn't too big - assert!(additional < usize::MAX - prev_len, "capacity too large"); - - let prev_len_next = prev_len + 1; - self.entries.extend((prev_len_next..(prev_len_next + additional)).map(Slot::Empty)); - - debug_assert_eq!(self.entries.len(), prev_len + additional); - } - - fn insert_at(&mut self, idx: usize, value: T) -> I { - self.next = match self.entries[idx] { - Slot::Empty(next) => next, - Slot::Filled(_) => panic!("Index already contains value"), - Slot::Invalid => panic!("Slab corrupt"), - }; - - self.entries[idx] = Slot::Filled(value); - self.len += 1; - - I::from(idx) - } - - fn replace(&mut self, idx: usize, e: Slot) -> Option { - if let Slot::Filled(val) = mem::replace(&mut self.entries[idx], e) { - self.next = idx; - return Some(val); - } - - None - } - - fn local_index(&self, idx: I) -> Option { - let idx: usize = idx.into(); - - if idx >= self.entries.len() { - return None; - } - - Some(idx) - } } -impl + Into> ops::Index for Slab { +impl ops::Index for Slab { type Output = T; - fn index(&self, index: I) -> &T { - self.get(index).expect("invalid index") + fn index(&self, key: usize) -> &T { + match self.entries[key] { + Entry::Occupied(ref v) => v, + _ => panic!("invalid key"), + } } } -impl + Into> ops::IndexMut for Slab { - fn index_mut(&mut self, index: I) -> &mut T { - self.get_mut(index).expect("invalid index") +impl ops::IndexMut for Slab { + fn index_mut(&mut self, key: usize) -> &mut T { + match self.entries[key] { + Entry::Occupied(ref mut v) => v, + _ => panic!("invalid key"), + } } } -impl fmt::Debug for Slab - where T: fmt::Debug, - I: fmt::Debug, -{ +impl<'a, T> IntoIterator for &'a Slab { + type Item = (usize, &'a T); + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T> IntoIterator for &'a mut Slab { + type Item = (usize, &'a mut T); + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl fmt::Debug for Slab where T: fmt::Debug { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "Slab {{ len: {}, cap: {} }}", @@ -296,144 +794,96 @@ impl fmt::Debug for Slab } } -impl<'a, T, I: From + Into> IntoIterator for &'a Slab { - type Item = &'a T; - type IntoIter = Iter<'a, T, I>; - - fn into_iter(self) -> Iter<'a, T, I> { - self.iter() +impl<'a, T: 'a> fmt::Debug for Iter<'a, T> where T: fmt::Debug { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Iter") + .field("curr", &self.curr) + .field("remaining", &self.entries.len()) + .finish() } } -impl<'a, T, I: From + Into> IntoIterator for &'a mut Slab { - type Item = &'a mut T; - type IntoIter = IterMut<'a, T, I>; - - fn into_iter(self) -> IterMut<'a, T, I> { - self.iter_mut() +impl<'a, T: 'a> fmt::Debug for IterMut<'a, T> where T: fmt::Debug { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("IterMut") + .field("curr", &self.curr) + .field("remaining", &self.entries.len()) + .finish() } } -/* - * - * ===== Entry ===== - * - */ +// ===== VacantEntry ===== -impl<'a, T, I: From + Into> Entry<'a, T, I> { +impl<'a, T> VacantEntry<'a, T> { + /// Insert a value in the entry, returning a mutable reference to the value. + /// + /// To get the key associated with the value, use `key` prior to calling + /// `insert`. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let hello = { + /// let entry = slab.vacant_entry(); + /// let key = entry.key(); + /// + /// entry.insert((key, "hello")); + /// key + /// }; + /// + /// assert_eq!(hello, slab[hello].0); + /// assert_eq!("hello", slab[hello].1); + /// ``` + pub fn insert(self, val: T) -> &'a mut T { + self.slab.insert_at(self.key, val); - /// Replace the value stored in the entry - pub fn replace(&mut self, val: T) -> T { - match mem::replace(&mut self.slab.entries[self.idx], Slot::Filled(val)) { - Slot::Filled(v) => v, - _ => panic!("Slab corrupt"), + match self.slab.entries[self.key] { + Entry::Occupied(ref mut v) => v, + _ => unreachable!(), } } - /// Apply the function to the current value, replacing it with the result - /// of the function. - pub fn replace_with(&mut self, f: F) - where F: FnOnce(T) -> T - { - let idx = self.idx; - - // Take the value out of the entry, temporarily setting it to Invalid - let val = match mem::replace(&mut self.slab.entries[idx], Slot::Invalid) { - Slot::Filled(v) => f(v), - _ => panic!("Slab corrupt"), - }; - - self.slab.entries[idx] = Slot::Filled(val); - } - - /// Remove and return the value stored in the entry - pub fn remove(self) -> T { - let next = self.slab.next; - - if let Some(v) = self.slab.replace(self.idx, Slot::Empty(next)) { - self.slab.len -= 1; - v - } else { - panic!("Slab corrupt"); - } - } - - /// Get a reference to the value stored in the entry - pub fn get(&self) -> &T { - let idx = self.index(); - self.slab - .get(idx) - .expect("Filled slot in Entry") - } - - /// Get a mutable reference to the value stored in the entry - pub fn get_mut(&mut self) -> &mut T { - let idx = self.index(); - self.slab - .get_mut(idx) - .expect("Filled slot in Entry") - } - - /// Convert the entry handle to a mutable reference - pub fn into_mut(self) -> &'a mut T { - let idx = self.index(); - self.slab - .get_mut(idx) - .expect("Filled slot in Entry") - } - - /// Return the entry index - pub fn index(&self) -> I { - I::from(self.idx) + /// Return the key associated with this entry. + /// + /// A value stored in this entry will be associated with this key. + /// + /// # Examples + /// + /// ``` + /// # use slab::*; + /// let mut slab = Slab::new(); + /// + /// let hello = { + /// let entry = slab.vacant_entry(); + /// let key = entry.key(); + /// + /// entry.insert((key, "hello")); + /// key + /// }; + /// + /// assert_eq!(hello, slab[hello].0); + /// assert_eq!("hello", slab[hello].1); + /// ``` + pub fn key(&self) -> usize { + self.key } } -/* - * - * ===== VacantEntry ===== - * - */ +// ===== Iter ===== -impl<'a, T, I: From + Into> VacantEntry<'a, T, I> { - /// Insert a value into the entry - pub fn insert(self, val: T) -> Entry<'a, T, I> { - self.slab.insert_at(self.idx, val); +impl<'a, T> Iterator for Iter<'a, T> { + type Item = (usize, &'a T); - Entry { - slab: self.slab, - idx: self.idx, - } - } + fn next(&mut self) -> Option<(usize, &'a T)> { + while let Some(entry) = self.entries.next() { + let curr = self.curr; + self.curr += 1; - /// Returns the entry index - pub fn index(&self) -> I { - I::from(self.idx) - } -} - -/* - * - * ===== Iter ===== - * - */ - -impl<'a, T, I> Iterator for Iter<'a, T, I> { - type Item = &'a T; - - fn next(&mut self) -> Option<&'a T> { - while self.yielded < self.slab.len { - match self.slab.entries[self.cur_idx] { - Slot::Filled(ref v) => { - self.cur_idx += 1; - self.yielded += 1; - return Some(v); - } - Slot::Empty(_) => { - self.cur_idx += 1; - } - Slot::Invalid => { - panic!("Slab corrupt"); - } + if let Entry::Occupied(ref v) = *entry { + return Some((curr, v)); } } @@ -441,397 +891,21 @@ impl<'a, T, I> Iterator for Iter<'a, T, I> { } } -/* - * - * ===== IterMut ===== - * - */ +// ===== IterMut ===== -impl<'a, T, I> Iterator for IterMut<'a, T, I> { - type Item = &'a mut T; +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = (usize, &'a mut T); - fn next(&mut self) -> Option<&'a mut T> { - unsafe { - while self.yielded < (*self.slab).len { - let idx = self.cur_idx; + fn next(&mut self) -> Option<(usize, &'a mut T)> { + while let Some(entry) = self.entries.next() { + let curr = self.curr; + self.curr += 1; - match (*self.slab).entries[idx] { - Slot::Filled(ref mut v) => { - self.cur_idx += 1; - self.yielded += 1; - return Some(v); - } - Slot::Empty(_) => { - self.cur_idx += 1; - } - Slot::Invalid => { - panic!("Slab corrupt"); - } - } + if let Entry::Occupied(ref mut v) = *entry { + return Some((curr, v)); } - - None } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct MyIndex(pub usize); - - impl From for MyIndex { - fn from(i: usize) -> MyIndex { - MyIndex(i) - } - } - - impl Into for MyIndex { - fn into(self) -> usize { - self.0 - } - } - - #[test] - fn test_index_trait() { - let mut slab = Slab::::with_capacity(1); - let idx = slab.insert(10).ok().expect("Failed to insert"); - assert_eq!(idx, MyIndex(0)); - assert_eq!(slab[idx], 10); - } - - #[test] - fn test_insertion() { - let mut slab = Slab::::with_capacity(1); - assert_eq!(slab.is_empty(), true); - assert_eq!(slab.has_available(), true); - assert_eq!(slab.available(), 1); - let idx = slab.insert(10).ok().expect("Failed to insert"); - assert_eq!(slab[idx], 10); - assert_eq!(slab.is_empty(), false); - assert_eq!(slab.has_available(), false); - assert_eq!(slab.available(), 0); - } - - #[test] - fn test_insert_with() { - let mut slab = Slab::::with_capacity(1); - - { - let e = slab.vacant_entry().unwrap(); - assert_eq!(e.index(), 0); - let e = e.insert(5); - assert_eq!(5, *e.get()); - } - - assert_eq!(Some(&5), slab.get(0)); - } - - #[test] - fn test_repeated_insertion() { - let mut slab = Slab::::with_capacity(10); - - for i in 0..10 { - let idx = slab.insert(i + 10).ok().expect("Failed to insert"); - assert_eq!(slab[idx], i + 10); - } - - slab.insert(20).err().expect("Inserted when full"); - } - - #[test] - fn test_repeated_insertion_and_removal() { - let mut slab = Slab::::with_capacity(10); - let mut indices = vec![]; - - for i in 0..10 { - let idx = slab.insert(i + 10).ok().expect("Failed to insert"); - indices.push(idx); - assert_eq!(slab[idx], i + 10); - } - - for &i in indices.iter() { - slab.remove(i); - } - - slab.insert(20).ok().expect("Failed to insert in newly empty slab"); - } - - #[test] - fn test_insertion_when_full() { - let mut slab = Slab::::with_capacity(1); - slab.insert(10).ok().expect("Failed to insert"); - slab.insert(10).err().expect("Inserted into a full slab"); - } - - #[test] - fn test_removal_at_boundries() { - let mut slab = Slab::::with_capacity(1); - assert_eq!(slab.remove(0), None); - assert_eq!(slab.remove(1), None); - } - - #[test] - fn test_removal_is_successful() { - let mut slab = Slab::::with_capacity(1); - let t1 = slab.insert(10).ok().expect("Failed to insert"); - slab.remove(t1); - let t2 = slab.insert(20).ok().expect("Failed to insert"); - assert_eq!(slab[t2], 20); - } - - #[test] - fn test_remove_empty_entry() { - let mut s = Slab::<(), usize>::with_capacity(3); - let t1 = s.insert(()).unwrap(); - assert!(s.remove(t1).is_some()); - assert!(s.remove(t1).is_none()); - assert!(s.insert(()).is_ok()); - assert!(s.insert(()).is_ok()); - } - - #[test] - fn test_mut_retrieval() { - let mut slab = Slab::<_, usize>::with_capacity(1); - let t1 = slab.insert("foo".to_string()).ok().expect("Failed to insert"); - - slab[t1].push_str("bar"); - - assert_eq!(&slab[t1][..], "foobar"); - } - - #[test] - #[should_panic] - fn test_reusing_slots_1() { - let mut slab = Slab::::with_capacity(16); - - let t0 = slab.insert(123).unwrap(); - let t1 = slab.insert(456).unwrap(); - - assert!(slab.len() == 2); - assert!(slab.available() == 14); - - slab.remove(t0); - - assert!(slab.len() == 1, "actual={}", slab.len()); - assert!(slab.available() == 15); - - slab.remove(t1); - - assert!(slab.len() == 0); - assert!(slab.available() == 16); - - let _ = slab[t1]; - } - - #[test] - fn test_reusing_slots_2() { - let mut slab = Slab::::with_capacity(16); - - let t0 = slab.insert(123).unwrap(); - - assert!(slab[t0] == 123); - assert!(slab.remove(t0) == Some(123)); - - let t0 = slab.insert(456).unwrap(); - - assert!(slab[t0] == 456); - - let t1 = slab.insert(789).unwrap(); - - assert!(slab[t0] == 456); - assert!(slab[t1] == 789); - - assert!(slab.remove(t0).unwrap() == 456); - assert!(slab.remove(t1).unwrap() == 789); - - assert!(slab.len() == 0); - } - - #[test] - #[should_panic] - fn test_accessing_out_of_bounds() { - let slab = Slab::::with_capacity(16); - slab[0]; - } - - #[test] - #[should_panic] - fn test_capacity_too_large1() { - use std::usize; - Slab::::with_capacity(usize::MAX); - } - - #[test] - #[should_panic] - fn test_capacity_too_large_in_reserve_exact() { - use std::usize; - let mut slab = Slab::::with_capacity(100); - slab.reserve_exact(usize::MAX - 100); - } - - #[test] - fn test_contains() { - let mut slab = Slab::with_capacity(16); - assert!(!slab.contains(0)); - - let idx = slab.insert(111).unwrap(); - assert!(slab.contains(idx)); - } - - #[test] - fn test_get() { - let mut slab = Slab::::with_capacity(16); - let tok = slab.insert(5).unwrap(); - assert_eq!(slab.get(tok), Some(&5)); - assert_eq!(slab.get(1), None); - assert_eq!(slab.get(23), None); - } - - #[test] - fn test_get_mut() { - let mut slab = Slab::::with_capacity(16); - let tok = slab.insert(5u32).unwrap(); - { - let mut_ref = slab.get_mut(tok).unwrap(); - assert_eq!(*mut_ref, 5); - *mut_ref = 12; - } - assert_eq!(slab[tok], 12); - assert_eq!(slab.get_mut(1), None); - assert_eq!(slab.get_mut(23), None); - } - - #[test] - fn test_replace() { - let mut slab = Slab::::with_capacity(16); - let tok = slab.insert(5).unwrap(); - - slab.entry(tok).unwrap().replace(6); - assert!(slab.entry(tok + 1).is_none()); - - assert_eq!(slab[tok], 6); - assert_eq!(slab.len(), 1); - } - - #[test] - fn test_replace_again() { - let mut slab = Slab::::with_capacity(16); - let tok = slab.insert(5).unwrap(); - - slab.entry(tok).unwrap().replace(6); - slab.entry(tok).unwrap().replace(7); - slab.entry(tok).unwrap().replace(8); - assert_eq!(slab[tok], 8); - } - - #[test] - fn test_replace_with() { - let mut slab = Slab::::with_capacity(16); - let tok = slab.insert(5u32).unwrap(); - slab.entry(tok).unwrap().replace_with(|x| x + 1); - assert_eq!(slab[tok], 6); - } - - #[test] - fn test_retain() { - let mut slab = Slab::::with_capacity(2); - let tok1 = slab.insert(0).unwrap(); - let tok2 = slab.insert(1).unwrap(); - slab.retain(|x| x % 2 == 0); - assert_eq!(slab.len(), 1); - assert_eq!(slab[tok1], 0); - assert_eq!(slab.contains(tok2), false); - } - - #[test] - fn test_iter() { - let mut slab = Slab::::with_capacity(4); - for i in 0..4 { - slab.insert(i).unwrap(); - } - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![0, 1, 2, 3]); - - slab.remove(1); - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![0, 2, 3]); - } - - #[test] - fn test_iter_mut() { - let mut slab = Slab::::with_capacity(4); - for i in 0..4 { - slab.insert(i).unwrap(); - } - for e in slab.iter_mut() { - *e = *e + 1; - } - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![1, 2, 3, 4]); - - slab.remove(2); - for e in slab.iter_mut() { - *e = *e + 1; - } - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![2, 3, 5]); - } - - #[test] - fn test_reserve_exact() { - let mut slab = Slab::::with_capacity(4); - for i in 0..4 { - slab.insert(i).unwrap(); - } - - assert!(slab.insert(0).is_err()); - - slab.reserve_exact(3); - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![0, 1, 2, 3]); - - for i in 0..3 { - slab.insert(i).unwrap(); - } - assert!(slab.insert(0).is_err()); - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![0, 1, 2, 3, 0, 1, 2]); - } - - #[test] - fn test_clear() { - let mut slab = Slab::::with_capacity(4); - for i in 0..4 { - slab.insert(i).unwrap(); - } - - // clear full - slab.clear(); - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![]); - - for i in 0..2 { - slab.insert(i).unwrap(); - } - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![0, 1]); - - - // clear half-filled - slab.clear(); - - let vals: Vec = slab.iter().map(|r| *r).collect(); - assert_eq!(vals, vec![]); + + None } } diff --git a/third_party/rust/slab/tests/slab.rs b/third_party/rust/slab/tests/slab.rs new file mode 100644 index 000000000000..01d61388f68d --- /dev/null +++ b/third_party/rust/slab/tests/slab.rs @@ -0,0 +1,262 @@ +extern crate slab; + +use slab::*; + +#[test] +fn insert_get_remove_one() { + let mut slab = Slab::new(); + assert!(slab.is_empty()); + + let key = slab.insert(10); + + assert_eq!(slab[key], 10); + assert_eq!(slab.get(key), Some(&10)); + assert!(!slab.is_empty()); + assert!(slab.contains(key)); + + assert_eq!(slab.remove(key), 10); + assert!(!slab.contains(key)); + assert!(slab.get(key).is_none()); +} + +#[test] +fn insert_get_many() { + let mut slab = Slab::with_capacity(10); + + for i in 0..10 { + let key = slab.insert(i + 10); + assert_eq!(slab[key], i + 10); + } + + assert_eq!(slab.capacity(), 10); + + // Storing another one grows the slab + let key = slab.insert(20); + assert_eq!(slab[key], 20); + + // Capacity grows by 2x + assert_eq!(slab.capacity(), 20); +} + +#[test] +fn insert_get_remove_many() { + let mut slab = Slab::with_capacity(10); + let mut keys = vec![]; + + for i in 0..10 { + for j in 0..10 { + let val = (i * 10) + j; + + let key = slab.insert(val); + keys.push((key, val)); + assert_eq!(slab[key], val); + } + + for (key, val) in keys.drain(..) { + assert_eq!(val, slab.remove(key)); + } + } + + assert_eq!(10, slab.capacity()); +} + +#[test] +fn insert_with_vacant_entry() { + let mut slab = Slab::with_capacity(1); + let key; + + { + let entry = slab.vacant_entry(); + key = entry.key(); + entry.insert(123); + } + + assert_eq!(123, slab[key]); +} + +#[test] +fn get_vacant_entry_without_using() { + let mut slab = Slab::::with_capacity(1); + let key = slab.vacant_entry().key(); + assert_eq!(key, slab.vacant_entry().key()); +} + +#[test] +#[should_panic] +fn invalid_get_panics() { + let slab = Slab::::with_capacity(1); + slab[0]; +} + +#[test] +#[should_panic] +fn double_remove_panics() { + let mut slab = Slab::::with_capacity(1); + let key = slab.insert(123); + slab.remove(key); + slab.remove(key); +} + +#[test] +#[should_panic] +fn invalid_remove_panics() { + let mut slab = Slab::::with_capacity(1); + slab.remove(0); +} + +#[test] +fn slab_get_mut() { + let mut slab = Slab::new(); + let key = slab.insert(1); + + slab[key] = 2; + assert_eq!(slab[key], 2); + + *slab.get_mut(key).unwrap() = 3; + assert_eq!(slab[key], 3); +} + +#[test] +fn reserve_does_not_allocate_if_available() { + let mut slab = Slab::with_capacity(10); + let mut keys = vec![]; + + for i in 0..6 { + keys.push(slab.insert(i)); + } + + for key in 0..4 { + slab.remove(key); + } + + assert!(slab.capacity() - slab.len() == 8); + + slab.reserve(8); + assert_eq!(10, slab.capacity()); +} + +#[test] +fn reserve_exact_does_not_allocate_if_available() { + let mut slab = Slab::with_capacity(10); + let mut keys = vec![]; + + for i in 0..6 { + keys.push(slab.insert(i)); + } + + for key in 0..4 { + slab.remove(key); + } + + assert!(slab.capacity() - slab.len() == 8); + + slab.reserve(8); + assert_eq!(10, slab.capacity()); +} + +#[test] +fn retain() { + let mut slab = Slab::with_capacity(2); + + let key1 = slab.insert(0); + let key2 = slab.insert(1); + + slab.retain(|key, x| { + assert_eq!(key, *x); + *x % 2 == 0 + }); + + assert_eq!(slab.len(), 1); + assert_eq!(slab[key1], 0); + assert!(!slab.contains(key2)); + + // Ensure consistency is retained + let key = slab.insert(123); + assert_eq!(key, key2); + + assert_eq!(2, slab.len()); + assert_eq!(2, slab.capacity()); + + // Inserting another element grows + let key = slab.insert(345); + assert_eq!(key, 2); + + assert_eq!(4, slab.capacity()); +} + +#[test] +fn iter() { + let mut slab = Slab::new(); + + for i in 0..4 { + slab.insert(i); + } + + let vals: Vec<_> = slab.iter().enumerate().map(|(i, (key, val))| { + assert_eq!(i, key); + *val + }).collect(); + assert_eq!(vals, vec![0, 1, 2, 3]); + + slab.remove(1); + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert_eq!(vals, vec![0, 2, 3]); +} + +#[test] +fn iter_mut() { + let mut slab = Slab::new(); + + for i in 0..4 { + slab.insert(i); + } + + for (i, (key, e)) in slab.iter_mut().enumerate() { + assert_eq!(i, key); + *e = *e + 1; + } + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert_eq!(vals, vec![1, 2, 3, 4]); + + slab.remove(2); + + for (_, e) in slab.iter_mut() { + *e = *e + 1; + } + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert_eq!(vals, vec![2, 3, 5]); +} + +#[test] +fn clear() { + let mut slab = Slab::new(); + + for i in 0..4 { + slab.insert(i); + } + + // clear full + slab.clear(); + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert!(vals.is_empty()); + + assert_eq!(0, slab.len()); + assert_eq!(4, slab.capacity()); + + for i in 0..2 { + slab.insert(i); + } + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert_eq!(vals, vec![0, 1]); + + // clear half-filled + slab.clear(); + + let vals: Vec<_> = slab.iter().map(|(_, r)| *r).collect(); + assert!(vals.is_empty()); +} diff --git a/third_party/rust/string/.cargo-checksum.json b/third_party/rust/string/.cargo-checksum.json new file mode 100644 index 000000000000..4501465a376a --- /dev/null +++ b/third_party/rust/string/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".travis.yml":"5ca50e222e5aaff116317678326bc4e603bf1813869268cee2cd79cbb0503cd9","CHANGELOG.md":"dc18dc416743d25df3a4571ed1b5912646dae31438433e6cce39566a65c1c4da","Cargo.toml":"c168030e3a9658fa71d9cf0ad8cb177a9fe5639cbd6f5268df780a0eb85f0819","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"e59818f038af4c3b1bb58fad5fdfeb3d9c1193877f6bf9f8cfc105e11d104ff3","src/lib.rs":"136b60e6dba1364a85b1d6b2063b614526d1c85e4a7c31c829904bb8717fa531"},"package":"00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970"} \ No newline at end of file diff --git a/third_party/rust/string/.travis.yml b/third_party/rust/string/.travis.yml new file mode 100644 index 000000000000..6ead831fbd89 --- /dev/null +++ b/third_party/rust/string/.travis.yml @@ -0,0 +1,22 @@ +--- +language: rust +sudo: false + +rust: + - stable + +os: + - linux + +matrix: + include: + - os: linux + rust: 1.20.0 + +script: + - cargo build + - cargo test + +notifications: + email: + on_success: never diff --git a/third_party/rust/string/CHANGELOG.md b/third_party/rust/string/CHANGELOG.md new file mode 100644 index 000000000000..83087de8fccf --- /dev/null +++ b/third_party/rust/string/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.1.1 (July 13, 2018) + +* Fix doc gen (#2). + +# 0.1.0 (January 11, 2018) + +* Initial release. diff --git a/third_party/rust/string/Cargo.toml b/third_party/rust/string/Cargo.toml new file mode 100644 index 000000000000..c39a5daf18a6 --- /dev/null +++ b/third_party/rust/string/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "string" +version = "0.1.1" +authors = ["Carl Lerche "] +description = "A UTF-8 encoded string with configurable byte storage." +homepage = "https://github.com/carllerche/string" +documentation = "https://docs.rs/string" +readme = "README.md" +keywords = ["string"] +categories = ["data-structures"] +license = "MIT" +repository = "https://github.com/carllerche/string" + +[dependencies] +[badges.travis-ci] +branch = "master" +repository = "carllerche/string" diff --git a/third_party/rust/string/LICENSE b/third_party/rust/string/LICENSE new file mode 100644 index 000000000000..58fb29a12384 --- /dev/null +++ b/third_party/rust/string/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Carl Lerche + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/string/README.md b/third_party/rust/string/README.md new file mode 100644 index 000000000000..b367f09b08c3 --- /dev/null +++ b/third_party/rust/string/README.md @@ -0,0 +1,30 @@ +# String + +A UTF-8 encoded string with configurable byte storage. + +[![Build Status](https://travis-ci.org/carllerche/string.svg?branch=master)](https://travis-ci.org/carllerche/string) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Crates.io](https://img.shields.io/crates/v/string.svg?maxAge=2592000)](https://crates.io/crates/string) +[![Documentation](https://docs.rs/string/badge.svg)](https://docs.rs/string) + +## Usage + +To use `string`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +string = "0.1" +``` + +Next, add this to your crate: + +```rust +extern crate string; + +use string::{String, TryFrom}; + +let s: String<[u8; 2]> = String::try_from([b'h', b'i']).unwrap(); +assert_eq!(&s[..], "hi"); +``` + +See [documentation](https://docs.rs/string) for more details. diff --git a/third_party/rust/string/src/lib.rs b/third_party/rust/string/src/lib.rs new file mode 100644 index 000000000000..455632cf1dac --- /dev/null +++ b/third_party/rust/string/src/lib.rs @@ -0,0 +1,181 @@ +//! A UTF-8 encoded string with configurable byte storage. +//! +//! This crate provides `String`, a type similar to its std counterpart, but +//! with one significant difference: the underlying byte storage is +//! configurable. In other words, `String` is a marker type wrapping `T`, +//! indicating that it represents a UTF-8 encoded string. +//! +//! For example, one can represent small strings (stack allocated) by wrapping +//! an array: +//! +//! ``` +//! # use string::*; +//! let s: String<[u8; 2]> = String::try_from([b'h', b'i']).unwrap(); +//! assert_eq!(&s[..], "hi"); +//! ``` + +#![deny(warnings, missing_docs, missing_debug_implementations)] +#![doc(html_root_url = "https://docs.rs/string/0.1.1")] + +use std::{fmt, ops, str}; + +/// A UTF-8 encoded string with configurable byte storage. +/// +/// This type differs from `std::String` in that it is generic over the +/// underlying byte storage, enabling it to use `Vec<[u8]>`, `&[u8]`, or third +/// party types, such as [`Bytes`]. +/// +/// [`Bytes`]: https://docs.rs/bytes/0.4.8/bytes/struct.Bytes.html +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default)] +pub struct String> { + value: T, +} + +impl String { + /// Get a reference to the underlying byte storage. + /// + /// # Examples + /// + /// ``` + /// # use string::*; + /// let s = String::new(); + /// let vec = s.get_ref(); + /// ``` + pub fn get_ref(&self) -> &T { + &self.value + } + + /// Get a mutable reference to the underlying byte storage. + /// + /// It is inadvisable to directly manipulate the byte storage. This function + /// is unsafe as the bytes could no longer be valid UTF-8 after mutation. + /// + /// # Examples + /// + /// ``` + /// # use string::*; + /// let mut s = String::new(); + /// + /// unsafe { + /// let vec = s.get_mut(); + /// } + /// ``` + pub unsafe fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + /// Unwraps this `String`, returning the underlying byte storage. + /// + /// # Examples + /// + /// ``` + /// # use string::*; + /// let s = String::new(); + /// let vec = s.into_inner(); + /// ``` + pub fn into_inner(self) -> T { + self.value + } +} + +impl String { + /// Creates a new empty `String`. + /// + /// Given that the `String` is empty, this will not allocate. + /// + /// # Examples + /// + /// Basic usage + /// + /// ``` + /// let s = String::new(); + /// assert_eq!(s, ""); + /// ``` + pub fn new() -> String { + String::default() + } +} + +impl String + where T: AsRef<[u8]>, +{ + /// Converts the provided value to a `String` without checking that the + /// given value is valid UTF-8. + /// + /// Use `TryFrom` for a safe conversion. + pub unsafe fn from_utf8_unchecked(value: T) -> String { + String { value } + } +} + +impl ops::Deref for String + where T: AsRef<[u8]> +{ + type Target = str; + + #[inline] + fn deref(&self) -> &str { + let b = self.value.as_ref(); + unsafe { str::from_utf8_unchecked(b) } + } +} + +impl From<::std::string::String> for String<::std::string::String> { + fn from(value: ::std::string::String) -> Self { + String { value } + } +} + +impl<'a> From<&'a str> for String<&'a str> { + fn from(value: &'a str) -> Self { + String { value } + } +} + +impl TryFrom for String + where T: AsRef<[u8]> +{ + type Error = str::Utf8Error; + + fn try_from(value: T) -> Result { + let _ = str::from_utf8(value.as_ref())?; + Ok(String { value }) + } +} + +impl> fmt::Debug for String { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(fmt) + } +} + +/// Attempt to construct `Self` via a conversion. +/// +/// This trait will be deprecated in favor of [std::convert::TryFrom] once it +/// reaches stable Rust. +pub trait TryFrom: Sized + sealed::Sealed { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from(value: T) -> Result; +} + +impl sealed::Sealed for String {} + +mod sealed { + /// Private trait to this crate to prevent traits from being implemented in + /// downstream crates. + pub trait Sealed {} +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_from_std_string() { + let s: String<_> = "hello".to_string().into(); + assert_eq!(&s[..], "hello"); + } +} diff --git a/third_party/rust/tokio-codec/.cargo-checksum.json b/third_party/rust/tokio-codec/.cargo-checksum.json new file mode 100644 index 000000000000..ccd6df08fb4c --- /dev/null +++ b/third_party/rust/tokio-codec/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"c669cb16ddb7527ab1e52cb8d1d41b2b5fe5f212bb804710d6d3697bab380ac4","Cargo.toml":"f54f1b39a7327b1c5479278d0c81f04a1fc5e336f4681efaec867d453ded4b47","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"a130c56b3b4c625e1284dcfe17235de0e214635d310a09141ade604f1e15956f","src/bytes_codec.rs":"ad7a52ae6501b98bd6332af537a8fa8c4940f3e4495a8d2fed5cf3585afb0e7b","src/lib.rs":"cfca50711173ef5c0ebed4a281c3a8b77792868bd15b3a9ba0a1fec47638e863","src/lines_codec.rs":"cec96bee040e70a039d6598e6afcc50383922c9e949de2573805e3028cbd5781","tests/codecs.rs":"eef71df1db09a8128d017cef44ed0eb9b82ed232d2fcee61a1b4dfb419728327","tests/framed.rs":"b4b3ba571f3a8c1727aef5773e2f4a68f1cf162955c3984da145e512d1047ad1","tests/framed_read.rs":"4e3558a66acd2e1cbd2d82721d48f10d16104979196d616ac5a1e7c120f0ede1","tests/framed_write.rs":"b7aae09c670678d0d7cd24017b5ffe2ba634cc3371222487381aaf8499bf819d"},"package":"881e9645b81c2ce95fcb799ded2c29ffb9f25ef5bef909089a420e5961dd8ccb"} \ No newline at end of file diff --git a/third_party/rust/tokio-codec/CHANGELOG.md b/third_party/rust/tokio-codec/CHANGELOG.md new file mode 100644 index 000000000000..5c3ac55860e5 --- /dev/null +++ b/third_party/rust/tokio-codec/CHANGELOG.md @@ -0,0 +1,3 @@ +# # 0.1.0 (June 13, 2018) + +* Initial release (#353) diff --git a/third_party/rust/tokio-codec/Cargo.toml b/third_party/rust/tokio-codec/Cargo.toml new file mode 100644 index 000000000000..e204ed4e6235 --- /dev/null +++ b/third_party/rust/tokio-codec/Cargo.toml @@ -0,0 +1,30 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-codec" +version = "0.1.0" +authors = ["Carl Lerche ", "Bryan Burgers "] +description = "Utilities for encoding and decoding frames.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-codec/0.1" +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.bytes] +version = "0.4.7" + +[dependencies.futures] +version = "0.1.18" + +[dependencies.tokio-io] +version = "0.1.7" diff --git a/third_party/rust/tokio-codec/LICENSE b/third_party/rust/tokio-codec/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-codec/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-codec/README.md b/third_party/rust/tokio-codec/README.md new file mode 100644 index 000000000000..e0c1a3858c7d --- /dev/null +++ b/third_party/rust/tokio-codec/README.md @@ -0,0 +1,35 @@ +# tokio-codec + +Utilities for encoding and decoding frames. + +[Documentation](https://docs.rs/tokio-codec) + +## Usage + +First, add this to your `Cargo.toml`: + +```toml +[dependencies] +tokio-codec = "0.1" +``` + +Next, add this to your crate: + +```rust +extern crate tokio_codec; +``` + +You can find extensive documentation and examples about how to use this crate +online at [https://tokio.rs](https://tokio.rs). The [API +documentation](https://docs.rs/tokio-codec) is also a great place to get started +for the nitty-gritty. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-codec/src/bytes_codec.rs b/third_party/rust/tokio-codec/src/bytes_codec.rs new file mode 100644 index 000000000000..d535aef689d1 --- /dev/null +++ b/third_party/rust/tokio-codec/src/bytes_codec.rs @@ -0,0 +1,37 @@ +use bytes::{Bytes, BufMut, BytesMut}; +use tokio_io::_tokio_codec::{Encoder, Decoder}; +use std::io; + +/// A simple `Codec` implementation that just ships bytes around. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct BytesCodec(()); + +impl BytesCodec { + /// Creates a new `BytesCodec` for shipping around raw bytes. + pub fn new() -> BytesCodec { BytesCodec(()) } +} + +impl Decoder for BytesCodec { + type Item = BytesMut; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } +} + +impl Encoder for BytesCodec { + type Item = Bytes; + type Error = io::Error; + + fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} diff --git a/third_party/rust/tokio-codec/src/lib.rs b/third_party/rust/tokio-codec/src/lib.rs new file mode 100644 index 000000000000..2b26b542bbbd --- /dev/null +++ b/third_party/rust/tokio-codec/src/lib.rs @@ -0,0 +1,32 @@ +//! Utilities for encoding and decoding frames. +//! +//! Contains adapters to go from streams of bytes, [`AsyncRead`] and +//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. +//! Framed streams are also known as [transports]. +//! +//! [`AsyncRead`]: # +//! [`AsyncWrite`]: # +//! [`Sink`]: # +//! [`Stream`]: # +//! [transports]: # + +#![deny(missing_docs, missing_debug_implementations, warnings)] +#![doc(html_root_url = "https://docs.rs/tokio-codec/0.1.0")] + +extern crate bytes; +extern crate tokio_io; + +mod bytes_codec; +mod lines_codec; + +pub use tokio_io::_tokio_codec::{ + Decoder, + Encoder, + Framed, + FramedParts, + FramedRead, + FramedWrite, +}; + +pub use bytes_codec::BytesCodec; +pub use lines_codec::LinesCodec; diff --git a/third_party/rust/tokio-codec/src/lines_codec.rs b/third_party/rust/tokio-codec/src/lines_codec.rs new file mode 100644 index 000000000000..bf4135b8e32c --- /dev/null +++ b/third_party/rust/tokio-codec/src/lines_codec.rs @@ -0,0 +1,89 @@ +use bytes::{BufMut, BytesMut}; +use tokio_io::_tokio_codec::{Encoder, Decoder}; +use std::{io, str}; + +/// A simple `Codec` implementation that splits up data into lines. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct LinesCodec { + // Stored index of the next index to examine for a `\n` character. + // This is used to optimize searching. + // For example, if `decode` was called with `abc`, it would hold `3`, + // because that is the next index to examine. + // The next time `decode` is called with `abcde\n`, the method will + // only look at `de\n` before returning. + next_index: usize, +} + +impl LinesCodec { + /// Returns a `LinesCodec` for splitting up data into lines. + pub fn new() -> LinesCodec { + LinesCodec { next_index: 0 } + } +} + +fn utf8(buf: &[u8]) -> Result<&str, io::Error> { + str::from_utf8(buf).map_err(|_| + io::Error::new( + io::ErrorKind::InvalidData, + "Unable to decode input as UTF8")) +} + +fn without_carriage_return(s: &[u8]) -> &[u8] { + if let Some(&b'\r') = s.last() { + &s[..s.len() - 1] + } else { + s + } +} + +impl Decoder for LinesCodec { + type Item = String; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + if let Some(newline_offset) = + buf[self.next_index..].iter().position(|b| *b == b'\n') + { + let newline_index = newline_offset + self.next_index; + let line = buf.split_to(newline_index + 1); + let line = &line[..line.len()-1]; + let line = without_carriage_return(line); + let line = utf8(line)?; + self.next_index = 0; + Ok(Some(line.to_string())) + } else { + self.next_index = buf.len(); + Ok(None) + } + } + + fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + Ok(match self.decode(buf)? { + Some(frame) => Some(frame), + None => { + // No terminating newline - return remaining data, if any + if buf.is_empty() || buf == &b"\r"[..] { + None + } else { + let line = buf.take(); + let line = without_carriage_return(&line); + let line = utf8(line)?; + self.next_index = 0; + Some(line.to_string()) + } + } + }) + } +} + +impl Encoder for LinesCodec { + type Item = String; + type Error = io::Error; + + fn encode(&mut self, line: String, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(line.len() + 1); + buf.put(line); + buf.put_u8(b'\n'); + Ok(()) + } +} diff --git a/third_party/rust/tokio-codec/tests/codecs.rs b/third_party/rust/tokio-codec/tests/codecs.rs new file mode 100644 index 000000000000..6359e7c72ffd --- /dev/null +++ b/third_party/rust/tokio-codec/tests/codecs.rs @@ -0,0 +1,76 @@ +extern crate tokio_codec; +extern crate bytes; + +use bytes::{BytesMut, Bytes, BufMut}; +use tokio_codec::{BytesCodec, LinesCodec, Decoder, Encoder}; + +#[test] +fn bytes_decoder() { + let mut codec = BytesCodec::new(); + let buf = &mut BytesMut::new(); + buf.put_slice(b"abc"); + assert_eq!("abc", codec.decode(buf).unwrap().unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + buf.put_slice(b"a"); + assert_eq!("a", codec.decode(buf).unwrap().unwrap()); +} + +#[test] +fn bytes_encoder() { + let mut codec = BytesCodec::new(); + + // Default capacity of BytesMut + #[cfg(target_pointer_width = "64")] + const INLINE_CAP: usize = 4 * 8 - 1; + #[cfg(target_pointer_width = "32")] + const INLINE_CAP: usize = 4 * 4 - 1; + + let mut buf = BytesMut::new(); + codec.encode(Bytes::from_static(&[0; INLINE_CAP + 1]), &mut buf).unwrap(); + + // Default capacity of Framed Read + const INITIAL_CAPACITY: usize = 8 * 1024; + + let mut buf = BytesMut::with_capacity(INITIAL_CAPACITY); + codec.encode(Bytes::from_static(&[0; INITIAL_CAPACITY + 1]), &mut buf).unwrap(); +} + +#[test] +fn lines_decoder() { + let mut codec = LinesCodec::new(); + let buf = &mut BytesMut::new(); + buf.reserve(200); + buf.put("line 1\nline 2\r\nline 3\n\r\n\r"); + assert_eq!("line 1", codec.decode(buf).unwrap().unwrap()); + assert_eq!("line 2", codec.decode(buf).unwrap().unwrap()); + assert_eq!("line 3", codec.decode(buf).unwrap().unwrap()); + assert_eq!("", codec.decode(buf).unwrap().unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); + buf.put("k"); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!("\rk", codec.decode_eof(buf).unwrap().unwrap()); + assert_eq!(None, codec.decode(buf).unwrap()); + assert_eq!(None, codec.decode_eof(buf).unwrap()); +} + +#[test] +fn lines_encoder() { + let mut codec = BytesCodec::new(); + + // Default capacity of BytesMut + #[cfg(target_pointer_width = "64")] + const INLINE_CAP: usize = 4 * 8 - 1; + #[cfg(target_pointer_width = "32")] + const INLINE_CAP: usize = 4 * 4 - 1; + + let mut buf = BytesMut::new(); + codec.encode(Bytes::from_static(&[b'a'; INLINE_CAP + 1]), &mut buf).unwrap(); + + // Default capacity of Framed Read + const INITIAL_CAPACITY: usize = 8 * 1024; + + let mut buf = BytesMut::with_capacity(INITIAL_CAPACITY); + codec.encode(Bytes::from_static(&[b'a'; INITIAL_CAPACITY + 1]), &mut buf).unwrap(); +} diff --git a/third_party/rust/tokio-io/tests/framed.rs b/third_party/rust/tokio-codec/tests/framed.rs similarity index 54% rename from third_party/rust/tokio-io/tests/framed.rs rename to third_party/rust/tokio-codec/tests/framed.rs index 7b2693be1054..f7dd9cdf708f 100644 --- a/third_party/rust/tokio-io/tests/framed.rs +++ b/third_party/rust/tokio-codec/tests/framed.rs @@ -1,15 +1,17 @@ +extern crate tokio_codec; extern crate tokio_io; extern crate bytes; extern crate futures; use futures::{Stream, Future}; use std::io::{self, Read}; -use tokio_io::codec::{Framed, FramedParts, Decoder, Encoder}; +use tokio_codec::{Framed, FramedParts, Decoder, Encoder}; use tokio_io::AsyncRead; -use bytes::{BytesMut, Buf, BufMut, IntoBuf, BigEndian}; +use bytes::{BytesMut, Buf, BufMut, IntoBuf}; const INITIAL_CAPACITY: usize = 8 * 1024; +/// Encode and decode u32 values. struct U32Codec; impl Decoder for U32Codec { @@ -21,7 +23,7 @@ impl Decoder for U32Codec { return Ok(None); } - let n = buf.split_to(4).into_buf().get_u32::(); + let n = buf.split_to(4).into_buf().get_u32_be(); Ok(Some(n)) } } @@ -33,11 +35,12 @@ impl Encoder for U32Codec { fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> { // Reserve space dst.reserve(4); - dst.put_u32::(item); + dst.put_u32_be(item); Ok(()) } } +/// This value should never be used struct DontReadIntoThis; impl Read for DontReadIntoThis { @@ -51,12 +54,10 @@ impl AsyncRead for DontReadIntoThis {} #[test] fn can_read_from_existing_buf() { - let parts = FramedParts { - inner: DontReadIntoThis, - readbuf: vec![0, 0, 0, 42].into(), - writebuf: BytesMut::with_capacity(0), - }; - let framed = Framed::from_parts(parts, U32Codec); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + parts.read_buf = vec![0, 0, 0, 42].into(); + + let framed = Framed::from_parts(parts); let num = framed .into_future() @@ -66,32 +67,28 @@ fn can_read_from_existing_buf() { .wait() .map_err(|e| e.0) .unwrap(); + assert_eq!(num, 42); } #[test] fn external_buf_grows_to_init() { - let parts = FramedParts { - inner: DontReadIntoThis, - readbuf: vec![0, 0, 0, 42].into(), - writebuf: BytesMut::with_capacity(0), - }; - let framed = Framed::from_parts(parts, U32Codec); - let FramedParts { readbuf, .. } = framed.into_parts(); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + parts.read_buf = vec![0, 0, 0, 42].into(); - assert_eq!(readbuf.capacity(), INITIAL_CAPACITY); + let framed = Framed::from_parts(parts); + let FramedParts { read_buf, .. } = framed.into_parts(); + + assert_eq!(read_buf.capacity(), INITIAL_CAPACITY); } #[test] fn external_buf_does_not_shrink() { - let parts = FramedParts { - inner: DontReadIntoThis, - readbuf: vec![0; INITIAL_CAPACITY * 2].into(), - writebuf: BytesMut::with_capacity(0), - }; - let framed = Framed::from_parts(parts, U32Codec); - let FramedParts { readbuf, .. } = framed.into_parts(); + let mut parts = FramedParts::new(DontReadIntoThis, U32Codec); + parts.read_buf = vec![0; INITIAL_CAPACITY * 2].into(); - assert_eq!(readbuf.capacity(), INITIAL_CAPACITY * 2); + let framed = Framed::from_parts(parts); + let FramedParts { read_buf, .. } = framed.into_parts(); + + assert_eq!(read_buf.capacity(), INITIAL_CAPACITY * 2); } - diff --git a/third_party/rust/tokio-io/tests/framed_read.rs b/third_party/rust/tokio-codec/tests/framed_read.rs similarity index 98% rename from third_party/rust/tokio-io/tests/framed_read.rs rename to third_party/rust/tokio-codec/tests/framed_read.rs index 0dd327372f5b..80dfa5e5056a 100644 --- a/third_party/rust/tokio-io/tests/framed_read.rs +++ b/third_party/rust/tokio-codec/tests/framed_read.rs @@ -1,9 +1,10 @@ +extern crate tokio_codec; extern crate tokio_io; extern crate bytes; extern crate futures; use tokio_io::AsyncRead; -use tokio_io::codec::{FramedRead, Decoder}; +use tokio_codec::{FramedRead, Decoder}; use bytes::{BytesMut, Buf, IntoBuf, BigEndian}; use futures::Stream; diff --git a/third_party/rust/tokio-io/tests/framed_write.rs b/third_party/rust/tokio-codec/tests/framed_write.rs similarity index 96% rename from third_party/rust/tokio-io/tests/framed_write.rs rename to third_party/rust/tokio-codec/tests/framed_write.rs index 801602684001..137fb5be13d3 100644 --- a/third_party/rust/tokio-io/tests/framed_write.rs +++ b/third_party/rust/tokio-codec/tests/framed_write.rs @@ -1,9 +1,10 @@ +extern crate tokio_codec; extern crate tokio_io; extern crate bytes; extern crate futures; use tokio_io::AsyncWrite; -use tokio_io::codec::{Encoder, FramedWrite}; +use tokio_codec::{Encoder, FramedWrite}; use futures::{Sink, Poll}; use bytes::{BytesMut, BufMut, BigEndian}; @@ -28,7 +29,7 @@ impl Encoder for U32Encoder { fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> { // Reserve space dst.reserve(4); - dst.put_u32::(item); + dst.put_u32_be(item); Ok(()) } } @@ -65,7 +66,7 @@ fn write_hits_backpressure() { for i in 0..(ITER + 1) { let mut b = BytesMut::with_capacity(4); - b.put_u32::(i as u32); + b.put_u32_be(i as u32); // Append to the end match mock.calls.back_mut().unwrap() { diff --git a/third_party/rust/tokio-core/.cargo-checksum.json b/third_party/rust/tokio-core/.cargo-checksum.json index c029282eb21e..7828a122677d 100644 --- a/third_party/rust/tokio-core/.cargo-checksum.json +++ b/third_party/rust/tokio-core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"2b7fd7a794a404db00e20b9f9fc3f9b45cfa3ab816382b15e8eb0ba9fd0ea043","Cargo.toml":"71e9047397cdbb17762daf98bd12950afd195e7be2b08744349bf18fd2eec15a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"0b79c264429cd07f6a26caf0cd6e053ee054efb78290e3b1ba33dc657a0a2f0b","appveyor.yml":"65fe7e8f9acb2cb6305fe65f8fa5eb88d38dcd18988dd6932f085476c2fb70c7","benches/latency.rs":"fdb479530b48e08ec1437917c6f61b4d516c5dc04e4c01424d0861a724767950","benches/mio-ops.rs":"ea21132d525821f0876c6ba07d53ee3abc80d20c7f961dcdbb5785c011279ffa","examples/chat.rs":"56751b765b167deee1554425c94fa031e654c6c7c5c32459770ed0c4aee7419f","examples/connect.rs":"17fdcb50e10b5826240ff6dab51e4f1cc5856a2827abcca97d572d59c285ceb3","examples/echo-udp.rs":"e79c3e43ce38fc9f53e51a6ad2fb8d48906564ed6ed4b500eee2dbdc870108d3","examples/echo.rs":"b496ae301786f5e864a0edc8434f0183fe4461e38b9ced2ac106dd6293296077","examples/hello.rs":"b9906d61f29d0d3b5bdb78094f8808eb97ee40fc1d148f0e44e5a3468d60a627","examples/proxy.rs":"3cfb742c40d1a8b9441ba6d7ed89032c90850f36b41b0e5e32fcf43d6e457c3d","examples/sink.rs":"c0013ef03e0dcbf83179b233ac289988786e270ac5c1cb5a587646b443de98db","examples/udp-codec.rs":"9c75011293674c7e940496a178fe900c542ec5579ee15cbc36392546fa2be4a3","src/channel.rs":"b41271b362614595bd8a1f40907285075453e44ea5afc403316215ccbc9a24e5","src/heap.rs":"91c7a89cd5eb30618209800b4d9779923b36aa21cffef175d9320bbf8b631e17","src/io/copy.rs":"0e05d1d3fb5989e8bd2f78f5602c96b1fff37e7cdb3630997e0611ae6ff04780","src/io/flush.rs":"861cbdbb40f8d16e3302e40012ae19a0520761e3e77bfe591f1f23b6b19e98fb","src/io/frame.rs":"652fba7c3a56d9c980dc173e067cf36b4fe08e2b15bfebaad303d455467efbb6","src/io/mod.rs":"9ba9c57bca90c4eec5b5673894571dfff97fcc5f9f0fc5a67e9ede82639d8358","src/io/read.rs":"f71c562b50902aa7fad1a597bdcffa5e9028149c4dc243308d6633d35dddcb99","src/io/read_exact.rs":"bdcfe2abfc10db5fd48135268aaa84f40a86dbfb768ddacdcc6f387acbd0e66b","src/io/read_to_end.rs":"b39c5a24e8a7c8bd83f29cb435b5d7cc65e5ce9bd6ae43190c1ca8bfd0f9c8a8","src/io/read_until.rs":"da944ab44103caddc0b1a40565775fe31a963db7f14508dd7a69d39fea6de3de","src/io/split.rs":"8f6fa075bf5204380dbacce06bad3392f41b3aee76711563f67bdcd8ffff69c4","src/io/window.rs":"263c3eae1f36563edbe2f37d728e0e0d0bcc9ed2f60540efc18e50bef5dd55f6","src/io/write_all.rs":"b6d6a41e92841d9608d6e041d1c191cfb36176a745e49631f51cc540c6297b5e","src/lib.rs":"ff3447535164d6057026dd4d1be78972a8e4e292ad2c0cb997379ea4dd93f425","src/net/mod.rs":"2d250c12416d2cc1b088f6a3a3226ec53861a9cf67bcd1f1c99b25dc5c9e416e","src/net/tcp.rs":"760deea09312f72b2108198d2c291afd96772dcec889ee784880b3861b8d9347","src/net/udp/frame.rs":"4e8e4ea1a3a1bdac03ee6576f523e055137a46c7e4cbdd5e58c7be6ccb818af0","src/net/udp/mod.rs":"1820953251a03ba514e9d4a4af4d27497b3c3f60bb6a57dd7cbbda3410677345","src/reactor/interval.rs":"149f45c1ff982c495860aab17c1a6ffe9f696753caf49825b4c06369f3d073c2","src/reactor/io_token.rs":"ab6c23c79b149d9a7f4bfb02732b12a71dc2a83717baf374000c8599438a85ed","src/reactor/mod.rs":"8f974ba31db656373f1146acf85881b03d0316d9c2ae8be023a17b33d51d9409","src/reactor/poll_evented.rs":"a37657eb4357d112c62d5de128117a2cc9a0642142785f92d65f5eb711032e9c","src/reactor/timeout.rs":"b9730643d236347c29c883235c3b95df957c33dc61b832bc1537badbde7279b0","src/reactor/timeout_token.rs":"2536aa2f8aefb4073a70dbe3e89d81bd1fe27c618d2ea6b6f21594bdee34191c","tests/buffered.rs":"6bb8371b8c35c277fcf33e213a18ed521534b4285c853b8773d94b843fe1e99f","tests/chain.rs":"e40145b41e48da31d6d0ab4dc49036bf5b0582b5b9cccb9e16e621c2c59286ff","tests/echo.rs":"ad686f974455ae5bab9a9745f6ec3dc6b5e9ae3f00c3171894706c2c285dccda","tests/interval.rs":"5bd96d5ce47f9ab5637057cdbc1cb292d1f969bcf7bec4d36c5a0044d64c3f9a","tests/limit.rs":"da99b76fb6ef9c7e94877ed85fcde992be0f368070ea65fc9ea041542c5165a4","tests/line-frames.rs":"67c8f1f6d430c8e27adf1da40de4becef389cf090247bda30153d0cbcb37261b","tests/pipe-hup.rs":"392b46b77cbf8c4145e75d0ca0f0bbc8d614b949a600b391fbea559ed32efdbd","tests/spawn.rs":"0a167ae55c4f192c87e1d75ed352385ccff78b18c46ca1c021484cd1bbff1563","tests/stream-buffered.rs":"6ea52c26af8abdedf7839d78b30086662d622b9f7a8a36ed5f3554cc48a37c7a","tests/tcp.rs":"c200e00f7ee74f2fe758ac9b7a1febe678bc0a72bd4eb389e2577b7c3cfba823","tests/timeout.rs":"11ecf55a3c6f1956ac30f0e238ffb6262f3f20d751b8ee543c259d98a50d3ffa","tests/udp.rs":"d6cfd67748a497771d604b8059ae7cf71ba17f28c5d611c9f83c914e60715eb3"},"package":"febd81b3e2ef615c6c8077347b33f3f3deec3d708ecd08194c9707b7a1eccfc9"} \ No newline at end of file +{"files":{".travis.yml":"39a6e7f97e6f5630593a2f6a45cf04d6759edcea1b447ffe3c902a41fd515038","Cargo.toml":"517999c03c76fc6cfe1eed9f025036a7fbc8bfcdcd2da3e53a24b71fb2b0ecc5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"163714e26a9180d52ea5fcde13981c9dd0fda9bc086f2e6197a7d79c772a1697","appveyor.yml":"65fe7e8f9acb2cb6305fe65f8fa5eb88d38dcd18988dd6932f085476c2fb70c7","benches/latency.rs":"fdb479530b48e08ec1437917c6f61b4d516c5dc04e4c01424d0861a724767950","benches/mio-ops.rs":"ea21132d525821f0876c6ba07d53ee3abc80d20c7f961dcdbb5785c011279ffa","benches/tcp.rs":"46765056193438ba8508c3a532f5841730a06390a2ef032863d5d1608cb825ff","examples/README.md":"6cf95e530d7ff9af879033b5d7e4950b47e2f3bdbb8e5285e4748743c5658ee0","examples/chat.rs":"ae8d1fd584cfee6ef47b8504d9adc52df19806a4dfd8d49dd2f7ee25490f0250","examples/compress.rs":"43f414da22dce04bb80e0c865d12cf8605007f3bce4d8585b3dfacc450a30092","examples/connect.rs":"258264176ebb324e51e8475670345dff392a4bef449bfc435cb24f9b38114d18","examples/echo-threads.rs":"2dfab055bbbef14d0a06f467bac761c1b6d513a21603ac58b4cca7ab073f4de5","examples/echo-udp.rs":"092bd4ae4edbbbc32151e2ca7202feb3ecdd213ef36f55b764e44ddddbe779b0","examples/echo.rs":"dce5e857646a21551f4f3f7e60ed0dd953f787c2ed0656375aee2f67f07f1077","examples/hello.rs":"a7c0ce08218f8f0f7a84b5aa536d43cb22a430f8b721a1b163d579fa21114a82","examples/proxy.rs":"f0e497196ae273f53d8fabfac8aba7a488ff293408a2577d0eb1ca6e709eb15e","examples/sink.rs":"5b6c70d3b71f1fa77fcfa4f1379e22d67119d0f852d8c5907264f8ddf8a92bcb","examples/tinydb.rs":"95a122068cc8eaeca32d6139ecf439b8cd0d98d17128b6b8465c9fc31019f257","examples/tinyhttp.rs":"0d061a0f415b78a661ab1dbaf6b6f974447b672eb16806741b496382ff11db26","examples/udp-codec.rs":"80a92c444b29bb90b943e78cf38ce527994afd9c93aa89b98ff3287b5ff84620","src/channel.rs":"151d48252d13fc5eab1f799a2501859862b4cc0af0f550f6cbb266fabc5ebc32","src/io/copy.rs":"a544cb2105fff7fd25371f0c4316e348173986bb6a15c41e998539c83638be25","src/io/flush.rs":"3585a1de5433fb33a4e1a3fc2e8c89eb3b110a43f6461ee8008ec75a91708748","src/io/frame.rs":"cd2b365bfc4dd9f522f40ce5a1754be6458c605507125a3f34d0edb99c76be2e","src/io/mod.rs":"042d0e2d1d5422b037d2d2a141b1b6719778536125af6684a860b5ba32b62e51","src/io/read.rs":"09d3e6b2fb619cf62f61f66bc850886ab299ff4deeac273f50d82b3241a78102","src/io/read_exact.rs":"6d7720826d530a6f3a619665b86aebc78d4c3135a3d9362b7a75c3f7f2a49fc2","src/io/read_to_end.rs":"7a49c8920486e43f24fd1bcc7b4aac7864f77546c5f3a9a5757fd76f93bceb0f","src/io/read_until.rs":"03667d8155db688f8ddf484eebc88313a689641ac33ddf5eeac0c4e430afb16e","src/io/split.rs":"4ba399c15873f83b0d3d63aa125ac408c60b89351e944ec5375303538332c699","src/io/window.rs":"263c3eae1f36563edbe2f37d728e0e0d0bcc9ed2f60540efc18e50bef5dd55f6","src/io/write_all.rs":"d0567671afd67ca604966e191920df5047a8926fa558d3d8db5c0a16f23a37ec","src/lib.rs":"eba957b8f4810f05332c1e9f4935b3494674d5a5a5a2b9763846005a37525717","src/net/mod.rs":"2d250c12416d2cc1b088f6a3a3226ec53861a9cf67bcd1f1c99b25dc5c9e416e","src/net/tcp.rs":"bbd4ef3053402dc90ca64ce0b967467892081c22de735b826abf031a7356b977","src/net/udp/frame.rs":"70f408980c6d1aafdfc7789bbdcec54e7ba18c614db885cbac4791d3243fad9d","src/net/udp/mod.rs":"d7020393234a8c3800f99cf260674e131d5ef87333fa82416a5cd844a5fd3e96","src/reactor/interval.rs":"665d46f727d0955dfd322aa57e78d5d789367d8b8731e8d7c9ccb73dae84fc2e","src/reactor/io_token.rs":"2a3d36040d20bb7e0643435dab88874f902074dd6e7b48bdf42309a8176db7a9","src/reactor/mod.rs":"d089dab343b5a0a51119c2a47f9695681e1bfeb6f838df93e5259aa67322d95f","src/reactor/poll_evented.rs":"841472e2eac90fec2438bf7dec93702520a0079b8b9f527ef49905d29877753a","src/reactor/poll_evented2.rs":"ca2ae7d159201c40ef8ad63f9c93a996ad094d9c8b26b061faa32866989f5d32","src/reactor/timeout.rs":"3afa1ce1871f51da718976ced3be9e31d69aafdf9a2f4319f2b1ea937a17a598","tests/buffered.rs":"6bb8371b8c35c277fcf33e213a18ed521534b4285c853b8773d94b843fe1e99f","tests/chain.rs":"e40145b41e48da31d6d0ab4dc49036bf5b0582b5b9cccb9e16e621c2c59286ff","tests/echo.rs":"ad686f974455ae5bab9a9745f6ec3dc6b5e9ae3f00c3171894706c2c285dccda","tests/interval.rs":"581c1027c286cc6b028bdc41ba6342945040a22d99321b8959ffc7b3a60c3225","tests/limit.rs":"da99b76fb6ef9c7e94877ed85fcde992be0f368070ea65fc9ea041542c5165a4","tests/line-frames.rs":"9431751f510d65024c973b640b8fa0bb910e83c59cc7c6f9f83287aee059b156","tests/pipe-hup.rs":"392b46b77cbf8c4145e75d0ca0f0bbc8d614b949a600b391fbea559ed32efdbd","tests/spawn.rs":"6f56629d693c5fe770a3cacf4555997feb3e83d1f4c4ceaed7811861e05a38c9","tests/stream-buffered.rs":"6ea52c26af8abdedf7839d78b30086662d622b9f7a8a36ed5f3554cc48a37c7a","tests/tcp.rs":"c7f3a9c6674084ce50e8bd7f011cbdea92c359dfc6136a3913adcd2b3f390d81","tests/timeout.rs":"ab2b022c080ec5c18dccdb828d696f790e34b8906b874b154cc0524a97d04fd7","tests/udp.rs":"24e82c25f31e3c6516792b26f59b87fd8fd410633b51ed530823ad8e9ae7151f"},"package":"aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71"} \ No newline at end of file diff --git a/third_party/rust/tokio-core/.travis.yml b/third_party/rust/tokio-core/.travis.yml index c9accf6eaf2c..eceacf0d0427 100644 --- a/third_party/rust/tokio-core/.travis.yml +++ b/third_party/rust/tokio-core/.travis.yml @@ -1,27 +1,31 @@ language: rust - -rust: - - stable - - beta - - nightly sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo build - - cargo test - - cargo doc --no-deps - - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo bench ; fi -after_success: - - travis-cargo --only nightly doc-upload +matrix: + include: + - rust: 1.21.0 + - rust: stable + - os: osx + - rust: beta + - rust: nightly + + - rust: nightly + before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH + script: + - cargo doc --no-deps --all-features + after_success: + - travis-cargo --only nightly doc-upload + +script: + - cargo test + env: global: + - RUSTFLAGS='--cfg assert_timer_heap_consistent' - secure: "gOETHEX34re+YOgwdPG+wxSWZ1Nn5Q4+pk5b3mpaPS2RRVLdNlm7oJFYJMp1MsO3r4t5z4ntpBQUy/rQXPzzSOUqb0E+wnOtAFD+rspY0z5rJMwOghfdNst/Jsa5+EJeGWHEXd6YNdH1fILg94OCzzzmdjQH59F5UqRtY4EfMZQ9BzxuH0nNrCtys4xf0fstmlezw6mCyKR7DL2JxMf7ux10JeCTsj8BCT/yFKZ4HhFiKGVUpWSSTY3+lESnI4rKLynZEnFAkrHlIMyNRXf+lLfoTCTdmG0LAjf4AMsxLA9sSHVEhz9gvazQB4lX4B+E2Tuq1v/QecKqpRvfb4nM+ldRrsIW6zNf5DGA4J07h1qnhB0DO0TftDNuZNArueDW/yaeO5u6M4TspozdKYRx8QVvHg609WEdQPiDg4HdR2EUHyGBYbWJTVoBbYM+Yv3Pa1zBw8r/82sH4SGj1GtBFfH4QxTwMzGpX8AF4l2HUUFlpLgCrrWwTCwTxuQUsvjUPfrKHIisZPFGeu92qjmMN+YZh8U1a/W9xOLFbrTOH+FVRt9XrkT2Cwtfcia/7TMS2kXWyxrz82zpAwL5SEpP0k84B7GqLGlZrCKboufMBrtE6Chycp2D2quyVM0/kF5x2ev6QHToT1FH2McVB1XwkxJNeCMZhOe4EDpyfovPweQ=" + notifications: email: on_success: never -os: - - linux - - osx diff --git a/third_party/rust/tokio-core/Cargo.toml b/third_party/rust/tokio-core/Cargo.toml index 1edf690e0959..57330971e305 100644 --- a/third_party/rust/tokio-core/Cargo.toml +++ b/third_party/rust/tokio-core/Cargo.toml @@ -1,31 +1,93 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "tokio-core" -version = "0.1.7" -authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" -repository = "https://github.com/tokio-rs/tokio-core" +version = "0.1.17" +authors = ["Carl Lerche "] +description = "Core I/O and event loop primitives for asynchronous I/O in Rust. Foundation for\nthe rest of the tokio crates.\n" homepage = "https://tokio.rs" documentation = "https://docs.rs/tokio-core/0.1" -description = """ -Core I/O and event loop primitives for asynchronous I/O in Rust. Foundation for -the rest of the tokio crates. -""" categories = ["asynchronous"] +license = "MIT/Apache-2.0" +repository = "https://github.com/tokio-rs/tokio-core" +[dependencies.bytes] +version = "0.4" -[badges] -travis-ci = { repository = "tokio-rs/tokio-core" } -appveyor = { repository = "alexcrichton/tokio-core" } +[dependencies.futures] +version = "0.1.21" -[dependencies] -bytes = "0.4" -log = "0.3" -mio = "0.6.5" -scoped-tls = "0.1.0" -slab = "0.3" -iovec = "0.1" -tokio-io = "0.1" -futures = "0.1.11" +[dependencies.iovec] +version = "0.1" -[dev-dependencies] -env_logger = { version = "0.3", default-features = false } -libc = "0.2" +[dependencies.log] +version = "0.4" + +[dependencies.mio] +version = "0.6.12" + +[dependencies.scoped-tls] +version = "0.1.0" + +[dependencies.tokio] +version = "0.1.5" + +[dependencies.tokio-executor] +version = "0.1.2" + +[dependencies.tokio-io] +version = "0.1" + +[dependencies.tokio-reactor] +version = "0.1.1" + +[dependencies.tokio-timer] +version = "0.2.1" +[dev-dependencies.env_logger] +version = "0.4" +default-features = false + +[dev-dependencies.flate2] +version = "1" +features = ["tokio"] + +[dev-dependencies.futures-cpupool] +version = "0.1" + +[dev-dependencies.http] +version = "0.1" + +[dev-dependencies.httparse] +version = "1.0" + +[dev-dependencies.libc] +version = "0.2" + +[dev-dependencies.num_cpus] +version = "1.0" + +[dev-dependencies.serde] +version = "1.0" + +[dev-dependencies.serde_derive] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.time] +version = "0.1" +[badges.appveyor] +repository = "alexcrichton/tokio-core" + +[badges.travis-ci] +repository = "tokio-rs/tokio-core" diff --git a/third_party/rust/tokio-core/README.md b/third_party/rust/tokio-core/README.md index 813bf80ff983..e99271caf0cd 100644 --- a/third_party/rust/tokio-core/README.md +++ b/third_party/rust/tokio-core/README.md @@ -1,3 +1,10 @@ +# Deprecation notice. + +This crate is scheduled for deprecation in favor of [tokio](http://github.com/tokio-rs/tokio). + +`tokio-core` is still actively maintained, but only bug fixes will be applied. +All new feature development is happening in [tokio](http://github.com/tokio-rs/tokio). + # tokio-core Core I/O and event loop abstraction for asynchronous I/O in Rust built on @@ -32,8 +39,17 @@ a great place to get started for the nitty-gritty. # License -`tokio-core` is primarily distributed under the terms of both the MIT license -and the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. +This project is licensed under either of -See LICENSE-APACHE, and LICENSE-MIT for details. + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in tokio-core by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/tokio-core/benches/tcp.rs b/third_party/rust/tokio-core/benches/tcp.rs new file mode 100644 index 000000000000..a8324a0d2ddd --- /dev/null +++ b/third_party/rust/tokio-core/benches/tcp.rs @@ -0,0 +1,353 @@ +#![feature(test)] + +extern crate futures; +extern crate tokio_core; + +#[macro_use] +extern crate tokio_io; + +pub extern crate test; + +mod prelude { + pub use futures::*; + pub use tokio_core::reactor::Core; + pub use tokio_core::net::{TcpListener, TcpStream}; + pub use tokio_io::io::read_to_end; + + pub use test::{self, Bencher}; + pub use std::thread; + pub use std::time::Duration; + pub use std::io::{self, Read, Write}; +} + +mod connect_churn { + use ::prelude::*; + + const NUM: usize = 300; + const CONCURRENT: usize = 8; + + #[bench] + fn one_thread(b: &mut Bencher) { + let addr = "127.0.0.1:0".parse().unwrap(); + let mut core = Core::new().unwrap(); + let handle = core.handle(); + let listener = TcpListener::bind(&addr, &handle).unwrap(); + let addr = listener.local_addr().unwrap(); + + // Spawn a single task that accepts & drops connections + handle.spawn( + listener.incoming() + .map_err(|e| panic!("server err: {:?}", e)) + .for_each(|_| Ok(()))); + + b.iter(move || { + let connects = stream::iter((0..NUM).map(|_| { + Ok(TcpStream::connect(&addr, &handle) + .and_then(|sock| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + read_to_end(sock, vec![]) + })) + })); + + core.run( + connects.buffer_unordered(CONCURRENT) + .map_err(|e| panic!("client err: {:?}", e)) + .for_each(|_| Ok(()))).unwrap(); + }); + } + + fn n_workers(n: usize, b: &mut Bencher) { + let (shutdown_tx, shutdown_rx) = sync::oneshot::channel(); + let (remote_tx, remote_rx) = ::std::sync::mpsc::channel(); + + // Spawn reactor thread + thread::spawn(move || { + // Create the core + let mut core = Core::new().unwrap(); + + // Reactor handles + let handle = core.handle(); + let remote = handle.remote().clone(); + + // Bind the TCP listener + let listener = TcpListener::bind( + &"127.0.0.1:0".parse().unwrap(), &handle).unwrap(); + + // Get the address being listened on. + let addr = listener.local_addr().unwrap(); + + // Send the remote & address back to the main thread + remote_tx.send((remote, addr)).unwrap(); + + // Spawn a single task that accepts & drops connections + handle.spawn( + listener.incoming() + .map_err(|e| panic!("server err: {:?}", e)) + .for_each(|_| Ok(()))); + + // Run the reactor + core.run(shutdown_rx).unwrap(); + }); + + // Get the remote info + let (remote, addr) = remote_rx.recv().unwrap(); + + b.iter(move || { + use std::sync::{Barrier, Arc}; + + // Create a barrier to coordinate threads + let barrier = Arc::new(Barrier::new(n + 1)); + + // Spawn worker threads + let threads: Vec<_> = (0..n).map(|_| { + let barrier = barrier.clone(); + let remote = remote.clone(); + let addr = addr.clone(); + + thread::spawn(move || { + let connects = stream::iter((0..(NUM / n)).map(|_| { + // TODO: Once `Handle` is `Send / Sync`, update this + + let (socket_tx, socket_rx) = sync::oneshot::channel(); + + remote.spawn(move |handle| { + TcpStream::connect(&addr, &handle) + .map_err(|e| panic!("connect err: {:?}", e)) + .then(|res| socket_tx.send(res)) + .map_err(|_| ()) + }); + + Ok(socket_rx + .then(|res| res.unwrap()) + .and_then(|sock| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + read_to_end(sock, vec![]) + })) + })); + + barrier.wait(); + + connects.buffer_unordered(CONCURRENT) + .map_err(|e| panic!("client err: {:?}", e)) + .for_each(|_| Ok(())).wait().unwrap(); + }) + }).collect(); + + barrier.wait(); + + for th in threads { + th.join().unwrap(); + } + }); + + // Shutdown the reactor + shutdown_tx.send(()).unwrap(); + } + + #[bench] + fn two_threads(b: &mut Bencher) { + n_workers(1, b); + } + + #[bench] + fn multi_threads(b: &mut Bencher) { + n_workers(4, b); + } +} + +mod transfer { + use ::prelude::*; + use std::{cmp, mem}; + + const MB: usize = 3 * 1024 * 1024; + + struct Drain { + sock: TcpStream, + chunk: usize, + } + + impl Future for Drain { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + let mut buf: [u8; 1024] = unsafe { mem::uninitialized() }; + + loop { + match try_nb!(self.sock.read(&mut buf[..self.chunk])) { + 0 => return Ok(Async::Ready(())), + _ => {} + } + } + } + } + + struct Transfer { + sock: TcpStream, + rem: usize, + chunk: usize, + } + + impl Future for Transfer { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + while self.rem > 0 { + let len = cmp::min(self.rem, self.chunk); + let buf = &DATA[..len]; + + let n = try_nb!(self.sock.write(&buf)); + self.rem -= n; + } + + Ok(Async::Ready(())) + } + } + + static DATA: [u8; 1024] = [0; 1024]; + + fn one_thread(b: &mut Bencher, read_size: usize, write_size: usize) { + let addr = "127.0.0.1:0".parse().unwrap(); + let mut core = Core::new().unwrap(); + let handle = core.handle(); + let listener = TcpListener::bind(&addr, &handle).unwrap(); + let addr = listener.local_addr().unwrap(); + + let h2 = handle.clone(); + + // Spawn a single task that accepts & drops connections + handle.spawn( + listener.incoming() + .map_err(|e| panic!("server err: {:?}", e)) + .for_each(move |(sock, _)| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + let drain = Drain { + sock: sock, + chunk: read_size, + }; + + h2.spawn(drain.map_err(|e| panic!("server error: {:?}", e))); + + Ok(()) + })); + + b.iter(move || { + let client = TcpStream::connect(&addr, &handle) + .and_then(|sock| { + Transfer { + sock: sock, + rem: MB, + chunk: write_size, + } + }); + + core.run( + client.map_err(|e| panic!("client err: {:?}", e)) + ).unwrap(); + }); + } + + fn cross_thread(b: &mut Bencher, read_size: usize, write_size: usize) { + let (shutdown_tx, shutdown_rx) = sync::oneshot::channel(); + let (remote_tx, remote_rx) = ::std::sync::mpsc::channel(); + + // Spawn reactor thread + thread::spawn(move || { + // Create the core + let mut core = Core::new().unwrap(); + + // Reactor handles + let handle = core.handle(); + let remote = handle.remote().clone(); + + remote_tx.send(remote).unwrap(); + core.run(shutdown_rx).unwrap(); + }); + + let remote = remote_rx.recv().unwrap(); + + b.iter(move || { + let (server_tx, server_rx) = sync::oneshot::channel(); + let (client_tx, client_rx) = sync::oneshot::channel(); + + remote.spawn(|handle| { + let sock = TcpListener::bind(&"127.0.0.1:0".parse().unwrap(), &handle).unwrap(); + server_tx.send(sock).unwrap(); + Ok(()) + }); + + let remote2 = remote.clone(); + + server_rx.and_then(move |server| { + let addr = server.local_addr().unwrap(); + + remote2.spawn(move |handle| { + let fut = TcpStream::connect(&addr, &handle); + client_tx.send(fut).ok().unwrap(); + Ok(()) + }); + + let client = client_rx + .then(|res| res.unwrap()) + .and_then(move |sock| { + Transfer { + sock: sock, + rem: MB, + chunk: write_size, + } + }); + + let server = server.incoming().into_future() + .map_err(|(e, _)| e) + .and_then(move |(sock, _)| { + let sock = sock.unwrap().0; + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + + Drain { + sock: sock, + chunk: read_size, + } + }); + + client + .join(server) + .then(|res| { + let _ = res.unwrap(); + Ok(()) + }) + }).wait().unwrap(); + }); + + // Shutdown the reactor + shutdown_tx.send(()).unwrap(); + } + + mod small_chunks { + use ::prelude::*; + + #[bench] + fn one_thread(b: &mut Bencher) { + super::one_thread(b, 32, 32); + } + + #[bench] + fn cross_thread(b: &mut Bencher) { + super::cross_thread(b, 32, 32); + } + } + + mod big_chunks { + use ::prelude::*; + + #[bench] + fn one_thread(b: &mut Bencher) { + super::one_thread(b, 1_024, 1_024); + } + + #[bench] + fn cross_thread(b: &mut Bencher) { + super::cross_thread(b, 1_024, 1_024); + } + } +} diff --git a/third_party/rust/tokio-core/examples/README.md b/third_party/rust/tokio-core/examples/README.md new file mode 100644 index 000000000000..8215a6c8d523 --- /dev/null +++ b/third_party/rust/tokio-core/examples/README.md @@ -0,0 +1,52 @@ +## Examples of `tokio-core` + +This directory contains a number of examples showcasing various capabilities of +the `tokio_core` crate. Most of these examples also leverage the `futures` and +`tokio_io` crates, along with a number of other miscellaneous dependencies for +various tasks. + +All examples can be executed with: + +``` +cargo run --example $name +``` + +A high level description of each example is: + +* `hello` - a tiny server that simply writes "Hello!" to all connected clients + and then terminates the connection, should help see how to create and + initialize `tokio_core`. +* `echo` - this is your standard TCP "echo server" which simply accepts + connections and then echos back any contents that are read from each connected + client. +* `echo-udp` - again your standard "echo server", except for UDP instead of TCP. + This will echo back any packets received to the original sender. +* `echo-threads` - servers the same purpose as the `echo` example, except this + shows off using multiple cores on a machine for doing I/O processing. +* `connect` - this is a `nc`-like clone which can be used to interact with most + other examples. The program creates a TCP connection or UDP socket to sends + all information read on stdin to the remote peer, displaying any data received + on stdout. Often quite useful when interacting with the various other servers + here! +* `chat` - this spins up a local TCP server which will broadcast from any + connected client to all other connected clients. You can connect to this in + multiple terminals and use it to chat between the terminals. +* `proxy` - an example proxy server that will forward all connected TCP clients + to the remote address specified when starting the program. +* `sink` - a benchmark-like example which shows writing 0s infinitely to any + connected client. +* `tinyhttp` - a tiny HTTP/1.1 server which doesn't support HTTP request bodies + showcasing running on multiple cores, working with futures and spawning + tasks, and finally framing a TCP connection to discrete request/response + objects. +* `udp-codec` - an example of using the `UdpCodec` trait along with a small + ping-pong protocol happening locally. +* `compress` - an echo-like server where instead of echoing back everything read + it echos back a gzip-compressed version of everything read! All compression + occurs on a CPU pool to offload work from the event loop. +* `tinydb` - an in-memory database which shows sharing state between all + connected clients, notably the key/value store of this database. + +If you've got an example you'd like to see here, please feel free to open an +issue. Otherwise if you've got an example you'd like to add, please feel free +to make a PR! diff --git a/third_party/rust/tokio-core/examples/chat.rs b/third_party/rust/tokio-core/examples/chat.rs index 267e0aa63275..039fb68596d3 100644 --- a/third_party/rust/tokio-core/examples/chat.rs +++ b/third_party/rust/tokio-core/examples/chat.rs @@ -10,7 +10,7 @@ //! //! And then in another window run: //! -//! nc -4 localhost 8080 +//! cargo run --example connect 127.0.0.1:8080 //! //! You can run the second command in multiple windows and then chat between the //! two, seeing the messages from the other client as they're received. For all @@ -68,7 +68,7 @@ fn main() { // Model the read portion of this socket by mapping an infinite // iterator to each line off the socket. This "loop" is then // terminated with an error once we hit EOF on the socket. - let iter = stream::iter(iter::repeat(()).map(Ok::<(), Error>)); + let iter = stream::iter_ok::<_, Error>(iter::repeat(())); let socket_reader = iter.fold(reader, move |reader, _| { // Read a line off the socket, failing if we're at EOF let line = io::read_until(reader, b'\n', Vec::new()); @@ -96,11 +96,11 @@ fn main() { .filter(|&(&k, _)| k != addr) .map(|(_, v)| v); for tx in iter { - tx.send(format!("{}: {}", addr, msg)).unwrap(); + tx.unbounded_send(format!("{}: {}", addr, msg)).unwrap(); } } else { let tx = conns.get_mut(&addr).unwrap(); - tx.send("You didn't send valid UTF-8.".to_string()).unwrap(); + tx.unbounded_send("You didn't send valid UTF-8.".to_string()).unwrap(); } reader }) diff --git a/third_party/rust/tokio-core/examples/compress.rs b/third_party/rust/tokio-core/examples/compress.rs new file mode 100644 index 000000000000..c263b1a29a92 --- /dev/null +++ b/third_party/rust/tokio-core/examples/compress.rs @@ -0,0 +1,123 @@ +//! An example of offloading work to a thread pool instead of doing work on the +//! main event loop. +//! +//! In this example the server will act as a form of echo server except that +//! it'll echo back gzip-compressed data. Each connected client will have the +//! data written streamed back as the compressed version is available, and all +//! compressing will occur on a thread pool rather than the main event loop. +//! +//! You can preview this example with in one terminal: +//! +//! cargo run --example compress +//! +//! and in another terminal; +//! +//! echo test | cargo run --example connect 127.0.0.1:8080 | gunzip +//! +//! The latter command will need to be tweaked for non-unix-like shells, but +//! you can also redirect the stdout of the `connect` program to a file +//! and then decompress that. + +extern crate futures; +extern crate futures_cpupool; +extern crate flate2; +extern crate tokio_core; +extern crate tokio_io; + +use std::io; +use std::env; +use std::net::SocketAddr; + +use futures::{Future, Stream, Poll}; +use futures_cpupool::CpuPool; +use tokio_core::net::{TcpListener, TcpStream}; +use tokio_core::reactor::Core; +use tokio_io::{AsyncRead, AsyncWrite}; +use flate2::write::GzEncoder; + +fn main() { + // As with many other examples, parse our CLI arguments and prepare the + // reactor. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + let mut core = Core::new().unwrap(); + let handle = core.handle(); + let socket = TcpListener::bind(&addr, &handle).unwrap(); + println!("Listening on: {}", addr); + + // This is where we're going to offload our computationally heavy work + // (compressing) to. Here we just use a convenience constructor to create a + // pool of threads equal to the number of CPUs we have. + let pool = CpuPool::new_num_cpus(); + + // The compress logic will happen in the function below, but everything's + // still a future! Each client is spawned to concurrently get processed. + let server = socket.incoming().for_each(move |(socket, addr)| { + handle.spawn(compress(socket, &pool).then(move |result| { + match result { + Ok((r, w)) => println!("{}: compressed {} bytes to {}", addr, r, w), + Err(e) => println!("{}: failed when compressing: {}", addr, e), + } + Ok(()) + })); + Ok(()) + }); + + core.run(server).unwrap(); +} + +/// The main workhorse of this example. This'll compress all data read from +/// `socket` on the `pool` provided, writing it back out to `socket` as it's +/// available. +fn compress(socket: TcpStream, pool: &CpuPool) + -> Box> +{ + use tokio_io::io; + + // The general interface that `CpuPool` provides is that we'll *spawn a + // future* onto it. All execution of the future will occur on the `CpuPool` + // and we'll get back a handle representing the completed value of the + // future. In essence it's our job here to create a future that represents + // compressing `socket`, and then we'll simply spawn it at the very end. + // + // Here we exploit the fact that `TcpStream` itself is `Send` in this + // function as well. That is, we can read/write the TCP stream on any + // thread, and we'll get notifications about it being ready from the reactor + // thread. + // + // Otherwise this is the same as the echo server except that after splitting + // we apply some encoding to one side, followed by a `shutdown` when we're + // done to ensure that all gz footers are written. + let (read, write) = socket.split(); + let write = Count { io: write, amt: 0 }; + let write = GzEncoder::new(write, flate2::Compression::best()); + let process = io::copy(read, write).and_then(|(amt, _read, write)| { + io::shutdown(write).map(move |io| (amt, io.get_ref().amt)) + }); + + // Spawn the future so is executes entirely on the thread pool here + Box::new(pool.spawn(process)) +} + +struct Count { + io: T, + amt: u64, +} + +impl io::Write for Count { + fn write(&mut self, buf: &[u8]) -> io::Result { + let n = self.io.write(buf)?; + self.amt += n as u64; + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + self.io.flush() + } +} + +impl AsyncWrite for Count { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.io.shutdown() + } +} diff --git a/third_party/rust/tokio-core/examples/connect.rs b/third_party/rust/tokio-core/examples/connect.rs index a167e006eeeb..f819983d3291 100644 --- a/third_party/rust/tokio-core/examples/connect.rs +++ b/third_party/rust/tokio-core/examples/connect.rs @@ -1,12 +1,18 @@ -//! A simple example of hooking up stdin/stdout to a TCP stream. +//! An example of hooking up stdin/stdout to either a TCP or UDP stream. //! -//! This example will connect to a server specified in the argument list and -//! then forward all data read on stdin to the server, printing out all data -//! received on stdout. +//! This example will connect to a socket address specified in the argument list +//! and then forward all data read on stdin to the server, printing out all data +//! received on stdout. An optional `--udp` argument can be passed to specify +//! that the connection should be made over UDP instead of TCP, translating each +//! line entered on stdin to a UDP packet to be sent to the remote address. //! -//! Note that this is not currently optimized for performance, especially around -//! buffer management. Rather it's intended to show an example of working with a -//! client. +//! Note that this is not currently optimized for performance, especially +//! around buffer management. Rather it's intended to show an example of +//! working with a client. +//! +//! This example can be quite useful when interacting with the other examples in +//! this repository! Many of them recommend running this as a simple "hook up +//! stdin/stdout to a server" to get up and running. extern crate futures; extern crate tokio_core; @@ -18,17 +24,23 @@ use std::io::{self, Read, Write}; use std::net::SocketAddr; use std::thread; -use bytes::{BufMut, BytesMut}; use futures::sync::mpsc; use futures::{Sink, Future, Stream}; -use tokio_core::net::TcpStream; use tokio_core::reactor::Core; -use tokio_io::AsyncRead; -use tokio_io::codec::{Encoder, Decoder}; fn main() { + // Determine if we're going to run in TCP or UDP mode + let mut args = env::args().skip(1).collect::>(); + let tcp = match args.iter().position(|a| a == "--udp") { + Some(i) => { + args.remove(i); + false + } + None => true, + }; + // Parse what address we're going to connect to - let addr = env::args().nth(1).unwrap_or_else(|| { + let addr = args.first().unwrap_or_else(|| { panic!("this program requires at least one argument") }); let addr = addr.parse::().unwrap(); @@ -36,82 +48,218 @@ fn main() { // Create the event loop and initiate the connection to the remote server let mut core = Core::new().unwrap(); let handle = core.handle(); - let tcp = TcpStream::connect(&addr, &handle); // Right now Tokio doesn't support a handle to stdin running on the event // loop, so we farm out that work to a separate thread. This thread will - // read data from stdin and then send it to the event loop over a standard - // futures channel. + // read data (with blocking I/O) from stdin and then send it to the event + // loop over a standard futures channel. let (stdin_tx, stdin_rx) = mpsc::channel(0); thread::spawn(|| read_stdin(stdin_tx)); let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx - // After the TCP connection has been established, we set up our client to - // start forwarding data. - // - // First we use the `Io::framed` method with a simple implementation of a - // `Codec` (listed below) that just ships bytes around. We then split that - // in two to work with the stream and sink separately. - // - // Half of the work we're going to do is to take all data we receive on - // stdin (`stdin_rx`) and send that along the TCP stream (`sink`). The - // second half is to take all the data we receive (`stream`) and then write - // that to stdout. Currently we just write to stdout in a synchronous - // fashion. - // - // Finally we set the client to terminate once either half of this work - // finishes. If we don't have any more data to read or we won't receive any - // more work from the remote then we can exit. - let mut stdout = io::stdout(); - let client = tcp.and_then(|stream| { - let (sink, stream) = stream.framed(Bytes).split(); - let send_stdin = stdin_rx.forward(sink); - let write_stdout = stream.for_each(move |buf| { - stdout.write_all(&buf) - }); + // Now that we've got our stdin read we either set up our TCP connection or + // our UDP connection to get a stream of bytes we're going to emit to + // stdout. + let stdout = if tcp { + tcp::connect(&addr, &handle, Box::new(stdin_rx)) + } else { + udp::connect(&addr, &handle, Box::new(stdin_rx)) + }; - send_stdin.map(|_| ()) - .select(write_stdout.map(|_| ())) - .then(|_| Ok(())) - }); - - // And now that we've got our client, we execute it in the event loop! - core.run(client).unwrap(); + // And now with our stream of bytes to write to stdout, we execute that in + // the event loop! Note that this is doing blocking I/O to emit data to + // stdout, and in general it's a no-no to do that sort of work on the event + // loop. In this case, though, we know it's ok as the event loop isn't + // otherwise running anything useful. + let mut out = io::stdout(); + core.run(stdout.for_each(|chunk| { + out.write_all(&chunk) + })).unwrap(); } -/// A simple `Codec` implementation that just ships bytes around. -/// -/// This type is used for "framing" a TCP stream of bytes but it's really just a -/// convenient method for us to work with streams/sinks for now. This'll just -/// take any data read and interpret it as a "frame" and conversely just shove -/// data into the output location without looking at it. -struct Bytes; +mod tcp { + use std::io::{self, Read, Write}; + use std::net::{SocketAddr, Shutdown}; -impl Decoder for Bytes { - type Item = BytesMut; - type Error = io::Error; + use bytes::{BufMut, BytesMut}; + use futures::prelude::*; + use tokio_core::net::TcpStream; + use tokio_core::reactor::Handle; + use tokio_io::{AsyncRead, AsyncWrite}; + use tokio_io::codec::{Encoder, Decoder}; - fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { - if buf.len() > 0 { - let len = buf.len(); - Ok(Some(buf.split_to(len))) - } else { - Ok(None) + pub fn connect(addr: &SocketAddr, + handle: &Handle, + stdin: Box, Error = io::Error>>) + -> Box> + { + let tcp = TcpStream::connect(addr, handle); + let handle = handle.clone(); + + // After the TCP connection has been established, we set up our client + // to start forwarding data. + // + // First we use the `Io::framed` method with a simple implementation of + // a `Codec` (listed below) that just ships bytes around. We then split + // that in two to work with the stream and sink separately. + // + // Half of the work we're going to do is to take all data we receive on + // `stdin` and send that along the TCP stream (`sink`). The second half + // is to take all the data we receive (`stream`) and then write that to + // stdout. We'll be passing this handle back out from this method. + // + // You'll also note that we *spawn* the work to read stdin and write it + // to the TCP stream. This is done to ensure that happens concurrently + // with us reading data from the stream. + Box::new(tcp.map(move |stream| { + let stream = CloseWithShutdown(stream); + let (sink, stream) = stream.framed(Bytes).split(); + let copy_stdin = stdin.forward(sink) + .then(|result| { + if let Err(e) = result { + panic!("failed to write to socket: {}", e) + } + Ok(()) + }); + handle.spawn(copy_stdin); + stream + }).flatten_stream()) + } + + /// A small adapter to layer over our TCP stream which uses the `shutdown` + /// syscall when the writer side is shut down. This'll allow us to correctly + /// inform the remote end that we're done writing. + struct CloseWithShutdown(TcpStream); + + impl Read for CloseWithShutdown { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) } } - fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result> { - self.decode(buf) + impl AsyncRead for CloseWithShutdown {} + + impl Write for CloseWithShutdown { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.0.flush() + } + } + + impl AsyncWrite for CloseWithShutdown { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.0.shutdown(Shutdown::Write)?; + Ok(().into()) + } + } + + /// A simple `Codec` implementation that just ships bytes around. + /// + /// This type is used for "framing" a TCP stream of bytes but it's really + /// just a convenient method for us to work with streams/sinks for now. + /// This'll just take any data read and interpret it as a "frame" and + /// conversely just shove data into the output location without looking at + /// it. + struct Bytes; + + impl Decoder for Bytes { + type Item = BytesMut; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } + + fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result> { + self.decode(buf) + } + } + + impl Encoder for Bytes { + type Item = Vec; + type Error = io::Error; + + fn encode(&mut self, data: Vec, buf: &mut BytesMut) -> io::Result<()> { + buf.put(&data[..]); + Ok(()) + } } } -impl Encoder for Bytes { - type Item = Vec; - type Error = io::Error; +mod udp { + use std::io; + use std::net::SocketAddr; - fn encode(&mut self, data: Vec, buf: &mut BytesMut) -> io::Result<()> { - buf.put(&data[..]); - Ok(()) + use bytes::BytesMut; + use futures::{Future, Stream}; + use tokio_core::net::{UdpCodec, UdpSocket}; + use tokio_core::reactor::Handle; + + pub fn connect(&addr: &SocketAddr, + handle: &Handle, + stdin: Box, Error = io::Error>>) + -> Box> + { + // We'll bind our UDP socket to a local IP/port, but for now we + // basically let the OS pick both of those. + let addr_to_bind = if addr.ip().is_ipv4() { + "0.0.0.0:0".parse().unwrap() + } else { + "[::]:0".parse().unwrap() + }; + let udp = UdpSocket::bind(&addr_to_bind, handle) + .expect("failed to bind socket"); + + // Like above with TCP we use an instance of `UdpCodec` to transform + // this UDP socket into a framed sink/stream which operates over + // discrete values. In this case we're working with *pairs* of socket + // addresses and byte buffers. + let (sink, stream) = udp.framed(Bytes).split(); + + // All bytes from `stdin` will go to the `addr` specified in our + // argument list. Like with TCP this is spawned concurrently + handle.spawn(stdin.map(move |chunk| { + (addr, chunk) + }).forward(sink).then(|result| { + if let Err(e) = result { + panic!("failed to write to socket: {}", e) + } + Ok(()) + })); + + // With UDP we could receive data from any source, so filter out + // anything coming from a different address + Box::new(stream.filter_map(move |(src, chunk)| { + if src == addr { + Some(chunk.into()) + } else { + None + } + })) + } + + struct Bytes; + + impl UdpCodec for Bytes { + type In = (SocketAddr, Vec); + type Out = (SocketAddr, Vec); + + fn decode(&mut self, addr: &SocketAddr, buf: &[u8]) -> io::Result { + Ok((*addr, buf.to_vec())) + } + + fn encode(&mut self, (addr, buf): Self::Out, into: &mut Vec) -> SocketAddr { + into.extend(buf); + addr + } } } @@ -127,6 +275,9 @@ fn read_stdin(mut tx: mpsc::Sender>) { Ok(n) => n, }; buf.truncate(n); - tx = tx.send(buf).wait().unwrap(); + tx = match tx.send(buf).wait() { + Ok(tx) => tx, + Err(_) => break, + }; } } diff --git a/third_party/rust/tokio-core/examples/echo-threads.rs b/third_party/rust/tokio-core/examples/echo-threads.rs new file mode 100644 index 000000000000..810fd04838fd --- /dev/null +++ b/third_party/rust/tokio-core/examples/echo-threads.rs @@ -0,0 +1,100 @@ +//! A multithreaded version of an echo server +//! +//! This server implements the same functionality as the `echo` example, except +//! that this example will use all cores of the machine to do I/O instead of +//! just one. This examples works by having the main thread using blocking I/O +//! and shipping accepted sockets to worker threads in a round-robin fashion. +//! +//! To see this server in action, you can run this in one terminal: +//! +//! cargo run --example echo-threads +//! +//! and in another terminal you can run: +//! +//! cargo run --example connect 127.0.0.1:8080 + +extern crate futures; +extern crate num_cpus; +extern crate tokio_core; +extern crate tokio_io; + +use std::env; +use std::net::{self, SocketAddr}; +use std::thread; + +use futures::Future; +use futures::stream::Stream; +use futures::sync::mpsc; +use tokio_io::AsyncRead; +use tokio_io::io::copy; +use tokio_core::net::TcpStream; +use tokio_core::reactor::Core; + +fn main() { + // First argument, the address to bind + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + + // Second argument, the number of threads we'll be using + let num_threads = env::args().nth(2).and_then(|s| s.parse().ok()) + .unwrap_or(num_cpus::get()); + + // Use `std::net` to bind the requested port, we'll use this on the main + // thread below + let listener = net::TcpListener::bind(&addr).expect("failed to bind"); + println!("Listening on: {}", addr); + + // Spin up our worker threads, creating a channel routing to each worker + // thread that we'll use below. + let mut channels = Vec::new(); + for _ in 0..num_threads { + let (tx, rx) = mpsc::unbounded(); + channels.push(tx); + thread::spawn(|| worker(rx)); + } + + // Infinitely accept sockets from our `std::net::TcpListener`, as this'll do + // blocking I/O. Each socket is then shipped round-robin to a particular + // thread which will associate the socket with the corresponding event loop + // and process the connection. + let mut next = 0; + for socket in listener.incoming() { + let socket = socket.expect("failed to accept"); + channels[next].unbounded_send(socket).expect("worker thread died"); + next = (next + 1) % channels.len(); + } +} + +fn worker(rx: mpsc::UnboundedReceiver) { + let mut core = Core::new().unwrap(); + let handle = core.handle(); + + let done = rx.for_each(move |socket| { + // First up when we receive a socket we associate it with our event loop + // using the `TcpStream::from_stream` API. After that the socket is not + // a `tokio_core::net::TcpStream` meaning it's in nonblocking mode and + // ready to be used with Tokio + let socket = TcpStream::from_stream(socket, &handle) + .expect("failed to associate TCP stream"); + let addr = socket.peer_addr().expect("failed to get remote address"); + + // Like the single-threaded `echo` example we split the socket halves + // and use the `copy` helper to ship bytes back and forth. Afterwards we + // spawn the task to run concurrently on this thread, and then print out + // what happened afterwards + let (reader, writer) = socket.split(); + let amt = copy(reader, writer); + let msg = amt.then(move |result| { + match result { + Ok((amt, _, _)) => println!("wrote {} bytes to {}", amt, addr), + Err(e) => println!("error on {}: {}", addr, e), + } + + Ok(()) + }); + handle.spawn(msg); + + Ok(()) + }); + core.run(done).unwrap(); +} diff --git a/third_party/rust/tokio-core/examples/echo-udp.rs b/third_party/rust/tokio-core/examples/echo-udp.rs index 1dd65ef7e80d..f555e609791c 100644 --- a/third_party/rust/tokio-core/examples/echo-udp.rs +++ b/third_party/rust/tokio-core/examples/echo-udp.rs @@ -6,7 +6,7 @@ //! //! and in another terminal you can run: //! -//! nc -4u localhost 8080 +//! cargo run --example connect -- --udp 127.0.0.1:8080 //! //! Each line you type in to the `nc` terminal should be echo'd back to you! @@ -58,7 +58,7 @@ fn main() { let mut l = Core::new().unwrap(); let handle = l.handle(); let socket = UdpSocket::bind(&addr, &handle).unwrap(); - println!("Listening on: {}", addr); + println!("Listening on: {}", socket.local_addr().unwrap()); // Next we'll create a future to spawn (the one we defined above) and then // we'll run the event loop by running the future. diff --git a/third_party/rust/tokio-core/examples/echo.rs b/third_party/rust/tokio-core/examples/echo.rs index 80e73ea74169..2bf8f391e8cc 100644 --- a/third_party/rust/tokio-core/examples/echo.rs +++ b/third_party/rust/tokio-core/examples/echo.rs @@ -1,4 +1,4 @@ -//! An "hello world" echo server with tokio-core +//! A "hello world" echo server with tokio-core //! //! This server will create a TCP listener, accept connections in a loop, and //! simply write back everything that's read off of each TCP connection. Each diff --git a/third_party/rust/tokio-core/examples/hello.rs b/third_party/rust/tokio-core/examples/hello.rs index a22517a5a87f..0d7a0ebcb135 100644 --- a/third_party/rust/tokio-core/examples/hello.rs +++ b/third_party/rust/tokio-core/examples/hello.rs @@ -7,7 +7,7 @@ //! //! and then in another terminal executing //! -//! nc -4 localhost 8080 +//! cargo run --example connect 127.0.0.1:8080 //! //! You should see `Hello!` printed out and then the `nc` program will exit. diff --git a/third_party/rust/tokio-core/examples/proxy.rs b/third_party/rust/tokio-core/examples/proxy.rs index 4920cc9fc20d..7bc538193caa 100644 --- a/third_party/rust/tokio-core/examples/proxy.rs +++ b/third_party/rust/tokio-core/examples/proxy.rs @@ -1,5 +1,20 @@ //! A proxy that forwards data to another server and forwards that server's //! responses back to clients. +//! +//! You can showcase this by running this in one terminal: +//! +//! cargo run --example proxy +//! +//! This in another terminal +//! +//! cargo run --example echo +//! +//! And finally this in another terminal +//! +//! cargo run --example connect 127.0.0.1:8081 +//! +//! This final terminal will connect to our proxy, which will in turn connect to +//! the echo server, and you'll be able to see data flowing between them. extern crate futures; extern crate tokio_core; diff --git a/third_party/rust/tokio-core/examples/sink.rs b/third_party/rust/tokio-core/examples/sink.rs index 2998b269f31c..d709178dc4ba 100644 --- a/third_party/rust/tokio-core/examples/sink.rs +++ b/third_party/rust/tokio-core/examples/sink.rs @@ -11,7 +11,7 @@ //! //! And then you can connect to it via: //! -//! nc -4 localhost 8080 > /dev/null +//! cargo run --example connect 127.0.0.1:8080 > /dev/null //! //! You should see your CPUs light up as data's being shove into the ether. @@ -35,23 +35,22 @@ fn main() { let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); - let mut l = Core::new().unwrap(); - let socket = TcpListener::bind(&addr, &l.handle()).unwrap(); + let mut core = Core::new().unwrap(); + let handle = core.handle(); + let socket = TcpListener::bind(&addr, &handle).unwrap(); println!("Listening on: {}", addr); - let server = socket.incoming().and_then(|(socket, addr)| { + let server = socket.incoming().for_each(|(socket, addr)| { println!("got a socket: {}", addr); - write(socket).or_else(|_| Ok(())) - }).for_each(|()| { - println!("lost the socket"); + handle.spawn(write(socket).or_else(|_| Ok(()))); Ok(()) }); - l.run(server).unwrap(); + core.run(server).unwrap(); } fn write(socket: TcpStream) -> IoFuture<()> { static BUF: &'static [u8] = &[0; 64 * 1024]; - let iter = iter::repeat(()).map(|()| Ok(())); - stream::iter(iter).fold(socket, |socket, ()| { + let iter = iter::repeat(()); + Box::new(stream::iter_ok(iter).fold(socket, |socket, ()| { tokio_io::io::write_all(socket, BUF).map(|(socket, _)| socket) - }).map(|_| ()).boxed() + }).map(|_| ())) } diff --git a/third_party/rust/tokio-core/examples/tinydb.rs b/third_party/rust/tokio-core/examples/tinydb.rs new file mode 100644 index 000000000000..fe7865c3304a --- /dev/null +++ b/third_party/rust/tokio-core/examples/tinydb.rs @@ -0,0 +1,209 @@ +//! A "tiny database" and accompanying protocol +//! +//! This example shows the usage of shared state amongst all connected clients, +//! namely a database of key/value pairs. Each connected client can send a +//! series of GET/SET commands to query the current value of a key or set the +//! value of a key. +//! +//! This example has a simple protocol you can use to interact with the server. +//! To run, first run this in one terminal window: +//! +//! cargo run --example tinydb +//! +//! and next in another windows run: +//! +//! cargo run --example connect 127.0.0.1:8080 +//! +//! In the `connect` window you can type in commands where when you hit enter +//! you'll get a response from the server for that command. An example session +//! is: +//! +//! +//! $ cargo run --example connect 127.0.0.1:8080 +//! GET foo +//! foo = bar +//! GET FOOBAR +//! error: no key FOOBAR +//! SET FOOBAR my awesome string +//! set FOOBAR = `my awesome string`, previous: None +//! SET foo tokio +//! set foo = `tokio`, previous: Some("bar") +//! GET foo +//! foo = tokio +//! +//! Namely you can issue two forms of commands: +//! +//! * `GET $key` - this will fetch the value of `$key` from the database and +//! return it. The server's database is initially populated with the key `foo` +//! set to the value `bar` +//! * `SET $key $value` - this will set the value of `$key` to `$value`, +//! returning the previous value, if any. + +extern crate futures; +extern crate tokio_core; +extern crate tokio_io; + +use std::cell::RefCell; +use std::collections::HashMap; +use std::io::BufReader; +use std::rc::Rc; +use std::env; +use std::net::SocketAddr; + +use futures::prelude::*; +use tokio_core::net::TcpListener; +use tokio_core::reactor::Core; +use tokio_io::AsyncRead; +use tokio_io::io::{lines, write_all}; + +/// The in-memory database shared amongst all clients. +/// +/// This database will be shared via `Rc`, so to mutate the internal map we're +/// also going to use a `RefCell` for interior mutability. +struct Database { + map: RefCell>, +} + +/// Possible requests our clients can send us +enum Request { + Get { key: String }, + Set { key: String, value: String }, +} + +/// Responses to the `Request` commands above +enum Response { + Value { key: String, value: String }, + Set { key: String, value: String, previous: Option }, + Error { msg: String }, +} + +fn main() { + // Parse the address we're going to run this server on, create a `Core`, and + // set up our TCP listener to accept connections. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + let mut core = Core::new().unwrap(); + let handle = core.handle(); + let listener = TcpListener::bind(&addr, &handle).expect("failed to bind"); + println!("Listening on: {}", addr); + + // Create the shared state of this server that will be shared amongst all + // clients. We populate the initial database and then create the `Database` + // structure. Note the usage of `Rc` here which will be used to ensure that + // each independently spawned client will have a reference to the in-memory + // database. + let mut initial_db = HashMap::new(); + initial_db.insert("foo".to_string(), "bar".to_string()); + let db = Rc::new(Database { + map: RefCell::new(initial_db), + }); + + let done = listener.incoming().for_each(move |(socket, _addr)| { + // As with many other small examples, the first thing we'll do is + // *split* this TCP stream into two separately owned halves. This'll + // allow us to work with the read and write halves independently. + let (reader, writer) = socket.split(); + + // Since our protocol is line-based we use `tokio_io`'s `lines` utility + // to convert our stream of bytes, `reader`, into a `Stream` of lines. + let lines = lines(BufReader::new(reader)); + + // Here's where the meat of the processing in this server happens. First + // we see a clone of the database being created, which is creating a + // new reference for this connected client to use. Also note the `move` + // keyword on the closure here which moves ownership of the reference + // into the closure, which we'll need for spawning the client below. + // + // The `map` function here means that we'll run some code for all + // requests (lines) we receive from the client. The actual handling here + // is pretty simple, first we parse the request and if it's valid we + // generate a response based on the values in the database. + let db = db.clone(); + let responses = lines.map(move |line| { + let request = match Request::parse(&line) { + Ok(req) => req, + Err(e) => return Response::Error { msg: e }, + }; + + let mut db = db.map.borrow_mut(); + match request { + Request::Get { key } => { + match db.get(&key) { + Some(value) => Response::Value { key, value: value.clone() }, + None => Response::Error { msg: format!("no key {}", key) }, + } + } + Request::Set { key, value } => { + let previous = db.insert(key.clone(), value.clone()); + Response::Set { key, value, previous } + } + } + }); + + // At this point `responses` is a stream of `Response` types which we + // now want to write back out to the client. To do that we use + // `Stream::fold` to perform a loop here, serializing each response and + // then writing it out to the client. + let writes = responses.fold(writer, |writer, response| { + let mut response = response.serialize(); + response.push('\n'); + write_all(writer, response.into_bytes()).map(|(w, _)| w) + }); + + // Like with other small servers, we'll `spawn` this client to ensure it + // runs concurrently with all other clients, for now ignoring any errors + // that we see. + let msg = writes.then(move |_| Ok(())); + handle.spawn(msg); + Ok(()) + }); + + core.run(done).unwrap(); +} + +impl Request { + fn parse(input: &str) -> Result { + let mut parts = input.splitn(3, " "); + match parts.next() { + Some("GET") => { + let key = match parts.next() { + Some(key) => key, + None => return Err(format!("GET must be followed by a key")), + }; + if parts.next().is_some() { + return Err(format!("GET's key must not be followed by anything")) + } + Ok(Request::Get { key: key.to_string() }) + } + Some("SET") => { + let key = match parts.next() { + Some(key) => key, + None => return Err(format!("SET must be followed by a key")), + }; + let value = match parts.next() { + Some(value) => value, + None => return Err(format!("SET needs a value")), + }; + Ok(Request::Set { key: key.to_string(), value: value.to_string() }) + } + Some(cmd) => Err(format!("unknown command: {}", cmd)), + None => Err(format!("empty input")), + } + } +} + +impl Response { + fn serialize(&self) -> String { + match *self { + Response::Value { ref key, ref value } => { + format!("{} = {}", key, value) + } + Response::Set { ref key, ref value, ref previous } => { + format!("set {} = `{}`, previous: {:?}", key, value, previous) + } + Response::Error { ref msg } => { + format!("error: {}", msg) + } + } + } +} diff --git a/third_party/rust/tokio-core/examples/tinyhttp.rs b/third_party/rust/tokio-core/examples/tinyhttp.rs new file mode 100644 index 000000000000..1689a3589f87 --- /dev/null +++ b/third_party/rust/tokio-core/examples/tinyhttp.rs @@ -0,0 +1,322 @@ +//! A "tiny" example of HTTP request/response handling using just tokio-core +//! +//! This example is intended for *learning purposes* to see how various pieces +//! hook up together and how HTTP can get up and running. Note that this example +//! is written with the restriction that it *can't* use any "big" library other +//! than tokio-core, if you'd like a "real world" HTTP library you likely want a +//! crate like Hyper. +//! +//! Code here is based on the `echo-threads` example and implements two paths, +//! the `/plaintext` and `/json` routes to respond with some text and json, +//! respectively. By default this will run I/O on all the cores your system has +//! available, and it doesn't support HTTP request bodies. + +extern crate bytes; +extern crate futures; +extern crate http; +extern crate httparse; +extern crate num_cpus; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; +extern crate time; +extern crate tokio_core; +extern crate tokio_io; + +use std::env; +use std::fmt; +use std::io; +use std::net::{self, SocketAddr}; +use std::thread; + +use bytes::BytesMut; +use futures::future; +use futures::sync::mpsc; +use futures::{Stream, Future, Sink}; +use http::{Request, Response, StatusCode}; +use http::header::HeaderValue; +use tokio_core::net::TcpStream; +use tokio_core::reactor::Core; +use tokio_io::codec::{Encoder, Decoder}; +use tokio_io::{AsyncRead}; + +fn main() { + // Parse the arguments, bind the TCP socket we'll be listening to, spin up + // our worker threads, and start shipping sockets to those worker threads. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + let num_threads = env::args().nth(2).and_then(|s| s.parse().ok()) + .unwrap_or(num_cpus::get()); + + let listener = net::TcpListener::bind(&addr).expect("failed to bind"); + println!("Listening on: {}", addr); + + let mut channels = Vec::new(); + for _ in 0..num_threads { + let (tx, rx) = mpsc::unbounded(); + channels.push(tx); + thread::spawn(|| worker(rx)); + } + let mut next = 0; + for socket in listener.incoming() { + if let Ok(socket) = socket { + channels[next].unbounded_send(socket).expect("worker thread died"); + next = (next + 1) % channels.len(); + } + } +} + +fn worker(rx: mpsc::UnboundedReceiver) { + let mut core = Core::new().unwrap(); + let handle = core.handle(); + + let done = rx.for_each(move |socket| { + // Associate each socket we get with our local event loop, and then use + // the codec support in the tokio-io crate to deal with discrete + // request/response types instead of bytes. Here we'll just use our + // framing defined below and then use the `send_all` helper to send the + // responses back on the socket after we've processed them + let socket = future::result(TcpStream::from_stream(socket, &handle)); + let req = socket.and_then(|socket| { + let (tx, rx) = socket.framed(Http).split(); + tx.send_all(rx.and_then(respond)) + }); + handle.spawn(req.then(move |result| { + drop(result); + Ok(()) + })); + Ok(()) + }); + core.run(done).unwrap(); +} + +/// "Server logic" is implemented in this function. +/// +/// This function is a map from and HTTP request to a future of a response and +/// represents the various handling a server might do. Currently the contents +/// here are pretty uninteresting. +fn respond(req: Request<()>) + -> Box, Error = io::Error>> +{ + let mut ret = Response::builder(); + let body = match req.uri().path() { + "/plaintext" => { + ret.header("Content-Type", "text/plain"); + "Hello, World!".to_string() + } + "/json" => { + ret.header("Content-Type", "application/json"); + + #[derive(Serialize)] + struct Message { + message: &'static str, + } + serde_json::to_string(&Message { message: "Hello, World!" }) + .unwrap() + } + _ => { + ret.status(StatusCode::NOT_FOUND); + String::new() + } + }; + Box::new(future::ok(ret.body(body).unwrap())) +} + +struct Http; + +/// Implementation of encoding an HTTP response into a `BytesMut`, basically +/// just writing out an HTTP/1.1 response. +impl Encoder for Http { + type Item = Response; + type Error = io::Error; + + fn encode(&mut self, item: Response, dst: &mut BytesMut) -> io::Result<()> { + use std::fmt::Write; + + write!(BytesWrite(dst), "\ + HTTP/1.1 {}\r\n\ + Server: Example\r\n\ + Content-Length: {}\r\n\ + Date: {}\r\n\ + ", item.status(), item.body().len(), date::now()).unwrap(); + + for (k, v) in item.headers() { + dst.extend_from_slice(k.as_str().as_bytes()); + dst.extend_from_slice(b": "); + dst.extend_from_slice(v.as_bytes()); + dst.extend_from_slice(b"\r\n"); + } + + dst.extend_from_slice(b"\r\n"); + dst.extend_from_slice(item.body().as_bytes()); + + return Ok(()); + + // Right now `write!` on `Vec` goes through io::Write and is not + // super speedy, so inline a less-crufty implementation here which + // doesn't go through io::Error. + struct BytesWrite<'a>(&'a mut BytesMut); + + impl<'a> fmt::Write for BytesWrite<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.0.extend_from_slice(s.as_bytes()); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } + } + } +} + +/// Implementation of decoding an HTTP request from the bytes we've read so far. +/// This leverages the `httparse` crate to do the actual parsing and then we use +/// that information to construct an instance of a `http::Request` object, +/// trying to avoid allocations where possible. +impl Decoder for Http { + type Item = Request<()>; + type Error = io::Error; + + fn decode(&mut self, src: &mut BytesMut) -> io::Result>> { + // TODO: we should grow this headers array if parsing fails and asks + // for more headers + let mut headers = [None; 16]; + let (method, path, version, amt) = { + let mut parsed_headers = [httparse::EMPTY_HEADER; 16]; + let mut r = httparse::Request::new(&mut parsed_headers); + let status = r.parse(src).map_err(|e| { + let msg = format!("failed to parse http request: {:?}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + let amt = match status { + httparse::Status::Complete(amt) => amt, + httparse::Status::Partial => return Ok(None), + }; + + let toslice = |a: &[u8]| { + let start = a.as_ptr() as usize - src.as_ptr() as usize; + assert!(start < src.len()); + (start, start + a.len()) + }; + + for (i, header) in r.headers.iter().enumerate() { + let k = toslice(header.name.as_bytes()); + let v = toslice(header.value); + headers[i] = Some((k, v)); + } + + (toslice(r.method.unwrap().as_bytes()), + toslice(r.path.unwrap().as_bytes()), + r.version.unwrap(), + amt) + }; + if version != 1 { + return Err(io::Error::new(io::ErrorKind::Other, "only HTTP/1.1 accepted")) + } + let data = src.split_to(amt).freeze(); + let mut ret = Request::builder(); + ret.method(&data[method.0..method.1]); + ret.uri(data.slice(path.0, path.1)); + ret.version(http::Version::HTTP_11); + for header in headers.iter() { + let (k, v) = match *header { + Some((ref k, ref v)) => (k, v), + None => break, + }; + let value = unsafe { + HeaderValue::from_shared_unchecked(data.slice(v.0, v.1)) + }; + ret.header(&data[k.0..k.1], value); + } + + let req = ret.body(()).map_err(|e| { + io::Error::new(io::ErrorKind::Other, e) + })?; + Ok(Some(req)) + } +} + +mod date { + use std::cell::RefCell; + use std::fmt::{self, Write}; + use std::str; + + use time::{self, Duration}; + + pub struct Now(()); + + /// Returns a struct, which when formatted, renders an appropriate `Date` + /// header value. + pub fn now() -> Now { + Now(()) + } + + // Gee Alex, doesn't this seem like premature optimization. Well you see + // there Billy, you're absolutely correct! If your server is *bottlenecked* + // on rendering the `Date` header, well then boy do I have news for you, you + // don't need this optimization. + // + // In all seriousness, though, a simple "hello world" benchmark which just + // sends back literally "hello world" with standard headers actually is + // bottlenecked on rendering a date into a byte buffer. Since it was at the + // top of a profile, and this was done for some competitive benchmarks, this + // module was written. + // + // Just to be clear, though, I was not intending on doing this because it + // really does seem kinda absurd, but it was done by someone else [1], so I + // blame them! :) + // + // [1]: https://github.com/rapidoid/rapidoid/blob/f1c55c0555007e986b5d069fe1086e6d09933f7b/rapidoid-commons/src/main/java/org/rapidoid/commons/Dates.java#L48-L66 + + struct LastRenderedNow { + bytes: [u8; 128], + amt: usize, + next_update: time::Timespec, + } + + thread_local!(static LAST: RefCell = RefCell::new(LastRenderedNow { + bytes: [0; 128], + amt: 0, + next_update: time::Timespec::new(0, 0), + })); + + impl fmt::Display for Now { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + LAST.with(|cache| { + let mut cache = cache.borrow_mut(); + let now = time::get_time(); + if now > cache.next_update { + cache.update(now); + } + f.write_str(cache.buffer()) + }) + } + } + + impl LastRenderedNow { + fn buffer(&self) -> &str { + str::from_utf8(&self.bytes[..self.amt]).unwrap() + } + + fn update(&mut self, now: time::Timespec) { + self.amt = 0; + write!(LocalBuffer(self), "{}", time::at(now).rfc822()).unwrap(); + self.next_update = now + Duration::seconds(1); + self.next_update.nsec = 0; + } + } + + struct LocalBuffer<'a>(&'a mut LastRenderedNow); + + impl<'a> fmt::Write for LocalBuffer<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let start = self.0.amt; + let end = start + s.len(); + self.0.bytes[start..end].copy_from_slice(s.as_bytes()); + self.0.amt += s.len(); + Ok(()) + } + } +} diff --git a/third_party/rust/tokio-core/examples/udp-codec.rs b/third_party/rust/tokio-core/examples/udp-codec.rs index e7456f71c293..bd243090b9f0 100644 --- a/third_party/rust/tokio-core/examples/udp-codec.rs +++ b/third_party/rust/tokio-core/examples/udp-codec.rs @@ -12,7 +12,6 @@ extern crate futures; use std::io; use std::net::SocketAddr; -use std::str; use futures::{Future, Stream, Sink}; use tokio_core::net::{UdpSocket, UdpCodec}; diff --git a/third_party/rust/tokio-core/src/channel.rs b/third_party/rust/tokio-core/src/channel.rs index 6251afb49922..e9e023867484 100644 --- a/third_party/rust/tokio-core/src/channel.rs +++ b/third_party/rust/tokio-core/src/channel.rs @@ -23,6 +23,7 @@ use reactor::{Handle, PollEvented}; /// This type is created by the [`channel`] function. /// /// [`channel`]: fn.channel.html +#[must_use = "sinks do nothing unless polled"] pub struct Sender { tx: channel::Sender, } @@ -37,6 +38,7 @@ pub struct Sender { /// `Stream` trait to represent received messages. /// /// [`channel`]: fn.channel.html +#[must_use = "streams do nothing unless polled"] pub struct Receiver { rx: PollEvented>, } diff --git a/third_party/rust/tokio-core/src/heap.rs b/third_party/rust/tokio-core/src/heap.rs deleted file mode 100644 index 953ab87a73f6..000000000000 --- a/third_party/rust/tokio-core/src/heap.rs +++ /dev/null @@ -1,305 +0,0 @@ -//! A simple binary heap with support for removal of arbitrary elements -//! -//! This heap is used to manage timer state in the event loop. All timeouts go -//! into this heap and we also cancel timeouts from this heap. The crucial -//! feature of this heap over the standard library's `BinaryHeap` is the ability -//! to remove arbitrary elements. (e.g. when a timer is canceled) -//! -//! Note that this heap is not at all optimized right now, it should hopefully -//! just work. - -use std::mem; - -use slab::Slab; - -pub struct Heap { - // Binary heap of items, plus the slab index indicating what position in the - // list they're in. - items: Vec<(T, usize)>, - - // A map from a slab index (assigned to an item above) to the actual index - // in the array the item appears at. - index: Slab, -} - -pub struct Slot { - idx: usize, -} - -impl Heap { - pub fn new() -> Heap { - Heap { - items: Vec::new(), - index: Slab::with_capacity(128), - } - } - - /// Pushes an element onto this heap, returning a slot token indicating - /// where it was pushed on to. - /// - /// The slot can later get passed to `remove` to remove the element from the - /// heap, but only if the element was previously not removed from the heap. - pub fn push(&mut self, t: T) -> Slot { - self.assert_consistent(); - let len = self.items.len(); - if self.index.available() == 0 { - self.index.reserve_exact(len); - } - let slot_idx = self.index.insert(len).unwrap(); - self.items.push((t, slot_idx)); - self.percolate_up(len); - self.assert_consistent(); - Slot { idx: slot_idx } - } - - pub fn peek(&self) -> Option<&T> { - self.assert_consistent(); - self.items.get(0).map(|i| &i.0) - } - - pub fn pop(&mut self) -> Option { - self.assert_consistent(); - if self.items.len() == 0 { - return None - } - let slot = Slot { idx: self.items[0].1 }; - Some(self.remove(slot)) - } - - pub fn remove(&mut self, slot: Slot) -> T { - self.assert_consistent(); - let idx = self.index.remove(slot.idx).unwrap(); - let (item, slot_idx) = self.items.swap_remove(idx); - debug_assert_eq!(slot.idx, slot_idx); - if idx < self.items.len() { - self.index[self.items[idx].1] = idx; - if self.items[idx].0 < item { - self.percolate_up(idx); - } else { - self.percolate_down(idx); - } - } - self.assert_consistent(); - return item - } - - fn percolate_up(&mut self, mut idx: usize) -> usize { - while idx > 0 { - let parent = (idx - 1) / 2; - if self.items[idx].0 >= self.items[parent].0 { - break - } - let (a, b) = self.items.split_at_mut(idx); - mem::swap(&mut a[parent], &mut b[0]); - self.index[a[parent].1] = parent; - self.index[b[0].1] = idx; - idx = parent; - } - return idx - } - - fn percolate_down(&mut self, mut idx: usize) -> usize { - loop { - let left = 2 * idx + 1; - let right = 2 * idx + 2; - - let mut swap_left = true; - match (self.items.get(left), self.items.get(right)) { - (Some(left), None) => { - if left.0 >= self.items[idx].0 { - break - } - } - (Some(left), Some(right)) => { - if left.0 < self.items[idx].0 { - if right.0 < left.0 { - swap_left = false; - } - } else if right.0 < self.items[idx].0 { - swap_left = false; - } else { - break - } - } - - (None, None) => break, - (None, Some(_right)) => panic!("not possible"), - } - - let (a, b) = if swap_left { - self.items.split_at_mut(left) - } else { - self.items.split_at_mut(right) - }; - mem::swap(&mut a[idx], &mut b[0]); - self.index[a[idx].1] = idx; - self.index[b[0].1] = a.len(); - idx = a.len(); - } - return idx - } - - fn assert_consistent(&self) { - if cfg!(not(debug_assertions)) { - return - } - - assert_eq!(self.items.len(), self.index.len()); - - for (i, &(_, j)) in self.items.iter().enumerate() { - if self.index[j] != i { - panic!("self.index[j] != i : i={} j={} self.index[j]={}", - i, j, self.index[j]); - } - } - - for (i, &(ref item, _)) in self.items.iter().enumerate() { - if i > 0 { - assert!(*item >= self.items[(i - 1) / 2].0, "bad at index: {}", i); - } - if let Some(left) = self.items.get(2 * i + 1) { - assert!(*item <= left.0, "bad left at index: {}", i); - } - if let Some(right) = self.items.get(2 * i + 2) { - assert!(*item <= right.0, "bad right at index: {}", i); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::Heap; - - #[test] - fn simple() { - let mut h = Heap::new(); - h.push(1); - h.push(2); - h.push(8); - h.push(4); - assert_eq!(h.pop(), Some(1)); - assert_eq!(h.pop(), Some(2)); - assert_eq!(h.pop(), Some(4)); - assert_eq!(h.pop(), Some(8)); - assert_eq!(h.pop(), None); - assert_eq!(h.pop(), None); - } - - #[test] - fn simple2() { - let mut h = Heap::new(); - h.push(5); - h.push(4); - h.push(3); - h.push(2); - h.push(1); - assert_eq!(h.pop(), Some(1)); - h.push(8); - assert_eq!(h.pop(), Some(2)); - h.push(1); - assert_eq!(h.pop(), Some(1)); - assert_eq!(h.pop(), Some(3)); - assert_eq!(h.pop(), Some(4)); - h.push(5); - assert_eq!(h.pop(), Some(5)); - assert_eq!(h.pop(), Some(5)); - assert_eq!(h.pop(), Some(8)); - } - - #[test] - fn remove() { - let mut h = Heap::new(); - h.push(5); - h.push(4); - h.push(3); - let two = h.push(2); - h.push(1); - assert_eq!(h.pop(), Some(1)); - assert_eq!(h.remove(two), 2); - h.push(1); - assert_eq!(h.pop(), Some(1)); - assert_eq!(h.pop(), Some(3)); - } - - fn vec2heap(v: Vec) -> Heap { - let mut h = Heap::new(); - for t in v { - h.push(t); - } - return h - } - - #[test] - fn test_peek_and_pop() { - let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; - let mut sorted = data.clone(); - sorted.sort(); - let mut heap = vec2heap(data); - while heap.peek().is_some() { - assert_eq!(heap.peek().unwrap(), sorted.first().unwrap()); - assert_eq!(heap.pop().unwrap(), sorted.remove(0)); - } - } - - #[test] - fn test_push() { - let mut heap = Heap::new(); - heap.push(-2); - heap.push(-4); - heap.push(-9); - assert!(*heap.peek().unwrap() == -9); - heap.push(-11); - assert!(*heap.peek().unwrap() == -11); - heap.push(-5); - assert!(*heap.peek().unwrap() == -11); - heap.push(-27); - assert!(*heap.peek().unwrap() == -27); - heap.push(-3); - assert!(*heap.peek().unwrap() == -27); - heap.push(-103); - assert!(*heap.peek().unwrap() == -103); - } - - fn check_to_vec(mut data: Vec) { - let mut heap = Heap::new(); - for data in data.iter() { - heap.push(*data); - } - data.sort(); - let mut v = Vec::new(); - while let Some(i) = heap.pop() { - v.push(i); - } - assert_eq!(v, data); - } - - #[test] - fn test_to_vec() { - check_to_vec(vec![]); - check_to_vec(vec![5]); - check_to_vec(vec![3, 2]); - check_to_vec(vec![2, 3]); - check_to_vec(vec![5, 1, 2]); - check_to_vec(vec![1, 100, 2, 3]); - check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]); - check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]); - check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]); - check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); - check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); - check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]); - check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]); - } - - #[test] - fn test_empty_pop() { - let mut heap = Heap::::new(); - assert!(heap.pop().is_none()); - } - - #[test] - fn test_empty_peek() { - let empty = Heap::::new(); - assert!(empty.peek().is_none()); - } -} diff --git a/third_party/rust/tokio-core/src/io/copy.rs b/third_party/rust/tokio-core/src/io/copy.rs index fa5677e04cc9..d5f5c34d67ac 100644 --- a/third_party/rust/tokio-core/src/io/copy.rs +++ b/third_party/rust/tokio-core/src/io/copy.rs @@ -8,6 +8,7 @@ use futures::{Future, Poll}; /// bytes copied or an error if one happens. /// /// [`copy`]: fn.copy.html +#[must_use = "futures do nothing unless polled"] pub struct Copy { reader: R, read_done: bool, diff --git a/third_party/rust/tokio-core/src/io/flush.rs b/third_party/rust/tokio-core/src/io/flush.rs index f65164c08656..5585e51b91c0 100644 --- a/third_party/rust/tokio-core/src/io/flush.rs +++ b/third_party/rust/tokio-core/src/io/flush.rs @@ -9,6 +9,7 @@ use futures::{Poll, Future, Async}; /// Created by the [`flush`] function. /// /// [`flush`]: fn.flush.html +#[must_use = "futures do nothing unless polled"] pub struct Flush { a: Option, } diff --git a/third_party/rust/tokio-core/src/io/frame.rs b/third_party/rust/tokio-core/src/io/frame.rs index 821dc3cfb50b..151648ac2361 100644 --- a/third_party/rust/tokio-core/src/io/frame.rs +++ b/third_party/rust/tokio-core/src/io/frame.rs @@ -310,6 +310,7 @@ pub trait Codec { /// the `Codec` trait to encode and decode frames. /// /// You can acquire a `Framed` instance by using the `Io::framed` adapter. +#[must_use = "streams do nothing unless polled"] pub struct Framed { upstream: T, codec: C, diff --git a/third_party/rust/tokio-core/src/io/mod.rs b/third_party/rust/tokio-core/src/io/mod.rs index ccd55443f17f..5fa11206aa64 100644 --- a/third_party/rust/tokio-core/src/io/mod.rs +++ b/third_party/rust/tokio-core/src/io/mod.rs @@ -10,6 +10,7 @@ //! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/ #![deprecated(note = "moved to the `tokio-io` crate")] +#![allow(deprecated)] use std::io; @@ -64,7 +65,7 @@ pub use self::write_all::{write_all, WriteAll}; /// A trait for read/write I/O objects /// -/// This trait represents I/O object which are readable and writable. +/// This trait represents I/O objects which are readable and writable. /// Additionally, they're associated with the ability to test whether they're /// readable or writable. /// diff --git a/third_party/rust/tokio-core/src/io/read.rs b/third_party/rust/tokio-core/src/io/read.rs index 51bf9667fbcf..f81007e9c1ff 100644 --- a/third_party/rust/tokio-core/src/io/read.rs +++ b/third_party/rust/tokio-core/src/io/read.rs @@ -26,6 +26,7 @@ pub fn read(rd: R, buf: T) -> Read /// a buffer. /// /// Created by the [`read`] function. +#[must_use = "futures do nothing unless polled"] pub struct Read { state: State, } diff --git a/third_party/rust/tokio-core/src/io/read_exact.rs b/third_party/rust/tokio-core/src/io/read_exact.rs index bb0cea135272..f4c51d54283e 100644 --- a/third_party/rust/tokio-core/src/io/read_exact.rs +++ b/third_party/rust/tokio-core/src/io/read_exact.rs @@ -9,6 +9,7 @@ use futures::{Poll, Future}; /// Created by the [`read_exact`] function. /// /// [`read_exact`]: fn.read_exact.html +#[must_use = "futures do nothing unless polled"] pub struct ReadExact { state: State, } diff --git a/third_party/rust/tokio-core/src/io/read_to_end.rs b/third_party/rust/tokio-core/src/io/read_to_end.rs index 63d5a82a5919..e1b792d2bac9 100644 --- a/third_party/rust/tokio-core/src/io/read_to_end.rs +++ b/third_party/rust/tokio-core/src/io/read_to_end.rs @@ -9,6 +9,7 @@ use futures::{Poll, Future}; /// Created by the [`read_to_end`] function. /// /// [`read_to_end`]: fn.read_to_end.html +#[must_use = "futures do nothing unless polled"] pub struct ReadToEnd { state: State, } diff --git a/third_party/rust/tokio-core/src/io/read_until.rs b/third_party/rust/tokio-core/src/io/read_until.rs index 4e275cdc5862..4263c5f1259a 100644 --- a/third_party/rust/tokio-core/src/io/read_until.rs +++ b/third_party/rust/tokio-core/src/io/read_until.rs @@ -9,6 +9,7 @@ use futures::{Poll, Future}; /// Created by the [`read_until`] function. /// /// [`read_until`]: fn.read_until.html +#[must_use = "futures do nothing unless polled"] pub struct ReadUntil { state: State, } diff --git a/third_party/rust/tokio-core/src/io/split.rs b/third_party/rust/tokio-core/src/io/split.rs index 22f3d63b4993..ac20294f3dba 100644 --- a/third_party/rust/tokio-core/src/io/split.rs +++ b/third_party/rust/tokio-core/src/io/split.rs @@ -46,7 +46,7 @@ impl Read for ReadHalf { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self.handle.poll_lock() { Async::Ready(mut l) => l.read(buf), - Async::NotReady => Err(::would_block()), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } } @@ -55,14 +55,14 @@ impl Write for WriteHalf { fn write(&mut self, buf: &[u8]) -> io::Result { match self.handle.poll_lock() { Async::Ready(mut l) => l.write(buf), - Async::NotReady => Err(::would_block()), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } fn flush(&mut self) -> io::Result<()> { match self.handle.poll_lock() { Async::Ready(mut l) => l.flush(), - Async::NotReady => Err(::would_block()), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } } diff --git a/third_party/rust/tokio-core/src/io/write_all.rs b/third_party/rust/tokio-core/src/io/write_all.rs index 949db4750c2b..8916be66282b 100644 --- a/third_party/rust/tokio-core/src/io/write_all.rs +++ b/third_party/rust/tokio-core/src/io/write_all.rs @@ -8,6 +8,7 @@ use futures::{Poll, Future}; /// This is created by the [`write_all`] top-level method. /// /// [`write_all`]: fn.write_all.html +#[must_use = "futures do nothing unless polled"] pub struct WriteAll { state: State, } diff --git a/third_party/rust/tokio-core/src/lib.rs b/third_party/rust/tokio-core/src/lib.rs index b34ff9825133..f080a0889586 100644 --- a/third_party/rust/tokio-core/src/lib.rs +++ b/third_party/rust/tokio-core/src/lib.rs @@ -44,9 +44,11 @@ //! ```no_run //! extern crate futures; //! extern crate tokio_core; +//! extern crate tokio_io; //! //! use futures::{Future, Stream}; -//! use tokio_core::io::{copy, Io}; +//! use tokio_io::AsyncRead; +//! use tokio_io::io::copy; //! use tokio_core::net::TcpListener; //! use tokio_core::reactor::Core; //! @@ -71,7 +73,7 @@ //! //! // ... after which we'll print what happened //! let handle_conn = bytes_copied.map(|amt| { -//! println!("wrote {} bytes", amt) +//! println!("wrote {:?} bytes", amt) //! }).map_err(|err| { //! println!("IO error {:?}", err) //! }); @@ -87,16 +89,21 @@ //! } //! ``` -#![doc(html_root_url = "https://docs.rs/tokio-core/0.1")] +#![doc(html_root_url = "https://docs.rs/tokio-core/0.1.17")] #![deny(missing_docs)] +#![deny(warnings)] +#![cfg_attr(test, allow(deprecated))] extern crate bytes; #[macro_use] extern crate futures; extern crate iovec; extern crate mio; -extern crate slab; +extern crate tokio; +extern crate tokio_executor; extern crate tokio_io; +extern crate tokio_reactor; +extern crate tokio_timer; #[macro_use] extern crate scoped_tls; @@ -105,16 +112,10 @@ extern crate scoped_tls; extern crate log; #[macro_use] +#[doc(hidden)] pub mod io; -mod heap; #[doc(hidden)] pub mod channel; pub mod net; pub mod reactor; - -use std::io as sio; - -fn would_block() -> sio::Error { - sio::Error::new(sio::ErrorKind::WouldBlock, "would block") -} diff --git a/third_party/rust/tokio-core/src/net/tcp.rs b/third_party/rust/tokio-core/src/net/tcp.rs index 06f8efd6ce40..8e43fd42a6b3 100644 --- a/third_party/rust/tokio-core/src/net/tcp.rs +++ b/third_party/rust/tokio-core/src/net/tcp.rs @@ -2,28 +2,28 @@ use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::{self, SocketAddr, Shutdown}; +use std::time::Duration; use bytes::{Buf, BufMut}; use futures::stream::Stream; -use futures::sync::oneshot; use futures::{Future, Poll, Async}; use iovec::IoVec; use mio; use tokio_io::{AsyncRead, AsyncWrite}; -use reactor::{Handle, PollEvented}; +use reactor::{Handle, PollEvented2}; /// An I/O object representing a TCP socket listening for incoming connections. /// /// This object can be converted into a stream of incoming connections for /// various forms of processing. pub struct TcpListener { - io: PollEvented, - pending_accept: Option>>, + io: PollEvented2, } /// Stream returned by the `TcpListener::incoming` function representing the /// stream of sockets received from a listener. +#[must_use = "streams do nothing unless polled"] pub struct Incoming { inner: TcpListener, } @@ -34,10 +34,19 @@ impl TcpListener { /// The TCP listener will bind to the provided `addr` address, if available. /// If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result { - let l = try!(mio::tcp::TcpListener::bind(addr)); + let l = try!(mio::net::TcpListener::bind(addr)); TcpListener::new(l, handle) } + /// Create a new TCP listener associated with this event loop. + /// + /// This is the same as `bind` but uses the default reactor instead of an + /// explicit `&Handle`. + pub fn bind2(addr: &SocketAddr) -> io::Result { + let l = try!(mio::net::TcpListener::bind(addr)); + TcpListener::new2(l) + } + /// Attempt to accept a connection and create a new connected `TcpStream` if /// successful. /// @@ -57,52 +66,32 @@ impl TcpListener { /// future's task. It's recommended to only call this from the /// implementation of a `Future::poll`, if necessary. pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { - loop { - if let Some(mut pending) = self.pending_accept.take() { - match pending.poll().expect("shouldn't be canceled") { - Async::NotReady => { - self.pending_accept = Some(pending); - return Err(::would_block()) - }, - Async::Ready(r) => return r, + let (io, addr) = self.accept_std()?; + + let io = mio::net::TcpStream::from_stream(io)?; + let io = PollEvented2::new(io); + let io = TcpStream { io }; + + Ok((io, addr)) + } + + /// Like `accept`, except that it returns a raw `std::net::TcpStream`. + /// + /// The stream is *in blocking mode*, and is not associated with the Tokio + /// event loop. + pub fn accept_std(&mut self) -> io::Result<(net::TcpStream, SocketAddr)> { + if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) + } + + match self.io.get_ref().accept_std() { + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + self.io.clear_read_ready(mio::Ready::readable())?; } - } - - if let Async::NotReady = self.io.poll_read() { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) - } - - match self.io.get_ref().accept() { - Err(e) => { - if e.kind() == io::ErrorKind::WouldBlock { - self.io.need_read(); - } - return Err(e) - }, - Ok((sock, addr)) => { - // Fast path if we haven't left the event loop - if let Some(handle) = self.io.remote().handle() { - let io = try!(PollEvented::new(sock, &handle)); - return Ok((TcpStream { io: io }, addr)) - } - - // If we're off the event loop then send the socket back - // over there to get registered and then we'll get it back - // eventually. - let (tx, rx) = oneshot::channel(); - let remote = self.io.remote().clone(); - remote.spawn(move |handle| { - let res = PollEvented::new(sock, handle) - .map(move |io| { - (TcpStream { io: io }, addr) - }); - drop(tx.send(res)); - Ok(()) - }); - self.pending_accept = Some(rx); - // continue to polling the `rx` at the beginning of the loop - } - } + Err(e) + }, + Ok((sock, addr)) => Ok((sock, addr)), } } @@ -134,21 +123,35 @@ impl TcpListener { /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as /// well (same for IPv6). pub fn from_listener(listener: net::TcpListener, - addr: &SocketAddr, + _addr: &SocketAddr, handle: &Handle) -> io::Result { - let l = try!(mio::tcp::TcpListener::from_listener(listener, addr)); + let l = try!(mio::net::TcpListener::from_std(listener)); TcpListener::new(l, handle) } - fn new(listener: mio::tcp::TcpListener, handle: &Handle) + fn new(listener: mio::net::TcpListener, handle: &Handle) -> io::Result { - let io = try!(PollEvented::new(listener, handle)); - Ok(TcpListener { io: io, pending_accept: None }) + let io = try!(PollEvented2::new_with_handle(listener, handle.new_tokio_handle())); + Ok(TcpListener { io: io }) + } + + fn new2(listener: mio::net::TcpListener) + -> io::Result { + let io = PollEvented2::new(listener); + Ok(TcpListener { io: io }) } /// Test whether this socket is ready to be read or not. pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() + self.io.poll_read_ready(mio::Ready::readable()) + .map(|r| { + if r.is_ready() { + Async::Ready(()) + } else { + Async::NotReady + } + }) + .unwrap_or(().into()) } /// Returns the local address that this listener is bound to. @@ -229,15 +232,17 @@ impl Stream for Incoming { /// raw underlying I/O object as well as streams for the read/write /// notifications on the stream itself. pub struct TcpStream { - io: PollEvented, + io: PollEvented2, } /// Future returned by `TcpStream::connect` which will resolve to a `TcpStream` /// when the stream is connected. +#[must_use = "futures do nothing unless polled"] pub struct TcpStreamNew { inner: TcpStreamNewState, } +#[must_use = "futures do nothing unless polled"] enum TcpStreamNewState { Waiting(TcpStream), Error(io::Error), @@ -253,21 +258,39 @@ impl TcpStream { /// connection or during the socket creation, that error will be returned to /// the future instead. pub fn connect(addr: &SocketAddr, handle: &Handle) -> TcpStreamNew { - let inner = match mio::tcp::TcpStream::connect(addr) { + let inner = match mio::net::TcpStream::connect(addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; TcpStreamNew { inner: inner } } - fn new(connected_stream: mio::tcp::TcpStream, handle: &Handle) + /// Create a new TCP stream connected to the specified address. + /// + /// This is the same as `connect`, but uses the default reactor instead of + /// taking an explicit `&Handle`. + pub fn connect2(addr: &SocketAddr) -> TcpStreamNew { + let inner = match mio::net::TcpStream::connect(addr) { + Ok(tcp) => TcpStream::new2(tcp), + Err(e) => TcpStreamNewState::Error(e), + }; + TcpStreamNew { inner: inner } + } + + fn new(connected_stream: mio::net::TcpStream, handle: &Handle) -> TcpStreamNewState { - match PollEvented::new(connected_stream, handle) { + match PollEvented2::new_with_handle(connected_stream, handle.new_tokio_handle()) { Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }), Err(e) => TcpStreamNewState::Error(e), } } + fn new2(connected_stream: mio::net::TcpStream) + -> TcpStreamNewState { + let io = PollEvented2::new(connected_stream); + TcpStreamNewState::Waiting(TcpStream { io: io }) + } + /// Create a new `TcpStream` from a `net::TcpStream`. /// /// This function will convert a TCP stream in the standard library to a TCP @@ -275,9 +298,9 @@ impl TcpStream { /// returned is associated with the event loop and ready to perform I/O. pub fn from_stream(stream: net::TcpStream, handle: &Handle) -> io::Result { - let inner = try!(mio::tcp::TcpStream::from_stream(stream)); + let inner = try!(mio::net::TcpStream::from_stream(stream)); Ok(TcpStream { - io: try!(PollEvented::new(inner, handle)), + io: try!(PollEvented2::new_with_handle(inner, handle.new_tokio_handle())), }) } @@ -303,11 +326,11 @@ impl TcpStream { addr: &SocketAddr, handle: &Handle) -> Box + Send> { - let state = match mio::tcp::TcpStream::connect_stream(stream, addr) { + let state = match mio::net::TcpStream::connect_stream(stream, addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; - state.boxed() + Box::new(state) } /// Test whether this socket is ready to be read or not. @@ -317,7 +340,15 @@ impl TcpStream { /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() + self.io.poll_read_ready(mio::Ready::readable()) + .map(|r| { + if r.is_ready() { + Async::Ready(()) + } else { + Async::NotReady + } + }) + .unwrap_or(().into()) } /// Test whether this socket is ready to be written to or not. @@ -327,7 +358,15 @@ impl TcpStream { /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { - self.io.poll_write() + self.io.poll_write_ready() + .map(|r| { + if r.is_ready() { + Async::Ready(()) + } else { + Async::NotReady + } + }) + .unwrap_or(().into()) } /// Returns the local address that this stream is bound to. @@ -340,6 +379,24 @@ impl TcpStream { self.io.get_ref().peer_addr() } + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying recv system call. + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read() { + return Err(io::ErrorKind::WouldBlock.into()) + } + let r = self.io.get_ref().peek(buf); + if is_wouldblock(&r) { + self.io.clear_read_ready(mio::Ready::readable())?; + } + return r + + } + /// Shuts down the read, write, or both halves of this connection. /// /// This function will cause all pending and future I/O on the specified @@ -369,6 +426,41 @@ impl TcpStream { self.io.get_ref().nodelay() } + /// Sets the value of the `SO_RCVBUF` option on this socket. + /// + /// Changes the size of the operating system's receive buffer associated + /// with the socket. + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_recv_buffer_size(size) + } + + /// Gets the value of the `SO_RCVBUF` option on this socket. + /// + /// For more information about this option, see + /// [`set_recv_buffer_size`][link]. + /// + /// [link]: #tymethod.set_recv_buffer_size + pub fn recv_buffer_size(&self) -> io::Result { + self.io.get_ref().recv_buffer_size() + } + + /// Sets the value of the `SO_SNDBUF` option on this socket. + /// + /// Changes the size of the operating system's send buffer associated with + /// the socket. + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_send_buffer_size(size) + } + + /// Gets the value of the `SO_SNDBUF` option on this socket. + /// + /// For more information about this option, see [`set_send_buffer`][link]. + /// + /// [link]: #tymethod.set_send_buffer + pub fn send_buffer_size(&self) -> io::Result { + self.io.get_ref().send_buffer_size() + } + /// Sets whether keepalive messages are enabled to be sent on this socket. /// /// On Unix, this option will set the `SO_KEEPALIVE` as well as the @@ -376,23 +468,23 @@ impl TcpStream { /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. /// /// If `None` is specified then keepalive messages are disabled, otherwise - /// the number of milliseconds specified will be the time to remain idle - /// before sending a TCP keepalive probe. + /// the duration specified will be the time to remain idle before sending a + /// TCP keepalive probe. /// - /// Some platforms specify this value in seconds, so sub-second millisecond + /// Some platforms specify this value in seconds, so sub-second /// specifications may be omitted. - pub fn set_keepalive_ms(&self, keepalive: Option) -> io::Result<()> { - self.io.get_ref().set_keepalive_ms(keepalive) + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + self.io.get_ref().set_keepalive(keepalive) } /// Returns whether keepalive messages are enabled on this socket, and if so - /// the amount of milliseconds between them. + /// the duration of time between them. /// - /// For more information about this option, see [`set_keepalive_ms`][link]. + /// For more information about this option, see [`set_keepalive`][link]. /// - /// [link]: #method.set_keepalive_ms - pub fn keepalive_ms(&self) -> io::Result> { - self.io.get_ref().keepalive_ms() + /// [link]: #tymethod.set_keepalive + pub fn keepalive(&self) -> io::Result> { + self.io.get_ref().keepalive() } /// Sets the value for the `IP_TTL` option on this socket. @@ -407,10 +499,55 @@ impl TcpStream { /// /// For more information about this option, see [`set_ttl`][link]. /// - /// [link]: #method.set_ttl + /// [link]: #tymethod.set_ttl pub fn ttl(&self) -> io::Result { self.io.get_ref().ttl() } + + /// Sets the value for the `IPV6_V6ONLY` option on this socket. + /// + /// If this is set to `true` then the socket is restricted to sending and + /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications + /// can bind the same port at the same time. + /// + /// If this is set to `false` then the socket can be used to send and + /// receive packets from an IPv4-mapped IPv6 address. + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.get_ref().set_only_v6(only_v6) + } + + /// Gets the value of the `IPV6_V6ONLY` option for this socket. + /// + /// For more information about this option, see [`set_only_v6`][link]. + /// + /// [link]: #tymethod.set_only_v6 + pub fn only_v6(&self) -> io::Result { + self.io.get_ref().only_v6() + } + + /// Sets the linger duration of this socket by setting the SO_LINGER option + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.io.get_ref().set_linger(dur) + } + + /// reads the linger duration for this socket by getting the SO_LINGER option + pub fn linger(&self) -> io::Result> { + self.io.get_ref().linger() + } + + #[deprecated(since = "0.1.8", note = "use set_keepalive")] + #[doc(hidden)] + pub fn set_keepalive_ms(&self, keepalive: Option) -> io::Result<()> { + #[allow(deprecated)] + self.io.get_ref().set_keepalive_ms(keepalive) + } + + #[deprecated(since = "0.1.8", note = "use keepalive")] + #[doc(hidden)] + pub fn keepalive_ms(&self) -> io::Result> { + #[allow(deprecated)] + self.io.get_ref().keepalive_ms() + } } impl Read for TcpStream { @@ -424,7 +561,7 @@ impl Write for TcpStream { self.io.write(buf) } fn flush(&mut self) -> io::Result<()> { - self.io.flush() + Ok(()) } } @@ -460,22 +597,22 @@ impl ::io::Io for TcpStream { fn read_vec(&mut self, bufs: &mut [&mut IoVec]) -> io::Result { if let Async::NotReady = ::poll_read(self) { - return Err(::would_block()) + return Err(io::ErrorKind::WouldBlock.into()) } let r = self.io.get_ref().read_bufs(bufs); if is_wouldblock(&r) { - self.io.need_read(); + self.io.clear_read_ready(mio::Ready::readable())?; } return r } fn write_vec(&mut self, bufs: &[&IoVec]) -> io::Result { if let Async::NotReady = ::poll_write(self) { - return Err(::would_block()) + return Err(io::ErrorKind::WouldBlock.into()) } let r = self.io.get_ref().write_bufs(bufs); if is_wouldblock(&r) { - self.io.need_write(); + self.io.clear_write_ready()?; } return r } @@ -514,7 +651,31 @@ impl<'a> AsyncRead for &'a TcpStream { return Ok(Async::NotReady) } let r = unsafe { - let mut bufs: [_; 16] = Default::default(); + // The `IoVec` type can't have a 0-length size, so we create a bunch + // of dummy versions on the stack with 1 length which we'll quickly + // overwrite. + let b1: &mut [u8] = &mut [0]; + let b2: &mut [u8] = &mut [0]; + let b3: &mut [u8] = &mut [0]; + let b4: &mut [u8] = &mut [0]; + let b5: &mut [u8] = &mut [0]; + let b6: &mut [u8] = &mut [0]; + let b7: &mut [u8] = &mut [0]; + let b8: &mut [u8] = &mut [0]; + let b9: &mut [u8] = &mut [0]; + let b10: &mut [u8] = &mut [0]; + let b11: &mut [u8] = &mut [0]; + let b12: &mut [u8] = &mut [0]; + let b13: &mut [u8] = &mut [0]; + let b14: &mut [u8] = &mut [0]; + let b15: &mut [u8] = &mut [0]; + let b16: &mut [u8] = &mut [0]; + let mut bufs: [&mut IoVec; 16] = [ + b1.into(), b2.into(), b3.into(), b4.into(), + b5.into(), b6.into(), b7.into(), b8.into(), + b9.into(), b10.into(), b11.into(), b12.into(), + b13.into(), b14.into(), b15.into(), b16.into(), + ]; let n = buf.bytes_vec_mut(&mut bufs); self.io.get_ref().read_bufs(&mut bufs[..n]) }; @@ -525,7 +686,7 @@ impl<'a> AsyncRead for &'a TcpStream { Ok(Async::Ready(n)) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.need_read(); + self.io.clear_read_ready(mio::Ready::readable())?; Ok(Async::NotReady) } Err(e) => Err(e), @@ -543,7 +704,12 @@ impl<'a> AsyncWrite for &'a TcpStream { return Ok(Async::NotReady) } let r = { - let mut bufs: [_; 16] = Default::default(); + // The `IoVec` type can't have a zero-length size, so create a dummy + // version from a 1-length slice which we'll overwrite with the + // `bytes_vec` method. + static DUMMY: &[u8] = &[0]; + let iovec = <&IoVec>::from(DUMMY); + let mut bufs = [iovec; 64]; let n = buf.bytes_vec(&mut bufs); self.io.get_ref().write_bufs(&bufs[..n]) }; @@ -553,7 +719,7 @@ impl<'a> AsyncWrite for &'a TcpStream { Ok(Async::Ready(n)) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.need_write(); + self.io.clear_write_ready()?; Ok(Async::NotReady) } Err(e) => Err(e), @@ -611,7 +777,7 @@ impl Future for TcpStreamNewState { // actually hit an error or not. // // If all that succeeded then we ship everything on up. - if let Async::NotReady = stream.io.poll_write() { + if let Async::NotReady = stream.io.poll_write_ready()? { return Ok(Async::NotReady) } if let Some(e) = try!(stream.io.get_ref().take_error()) { @@ -625,7 +791,7 @@ impl Future for TcpStreamNewState { } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] mod sys { use std::os::unix::prelude::*; use super::{TcpStream, TcpListener}; diff --git a/third_party/rust/tokio-core/src/net/udp/frame.rs b/third_party/rust/tokio-core/src/net/udp/frame.rs index 28705f959e11..5ae41b7384cd 100644 --- a/third_party/rust/tokio-core/src/net/udp/frame.rs +++ b/third_party/rust/tokio-core/src/net/udp/frame.rs @@ -55,12 +55,14 @@ pub trait UdpCodec { /// /// You can acquire a `UdpFramed` instance by using the `UdpSocket::framed` /// adapter. +#[must_use = "sinks do nothing unless polled"] pub struct UdpFramed { socket: UdpSocket, codec: C, rd: Vec, wr: Vec, out_addr: SocketAddr, + flushed: bool, } impl Stream for UdpFramed { @@ -81,29 +83,35 @@ impl Sink for UdpFramed { type SinkError = io::Error; fn start_send(&mut self, item: C::Out) -> StartSend { - if self.wr.len() > 0 { - try!(self.poll_complete()); - if self.wr.len() > 0 { - return Ok(AsyncSink::NotReady(item)); + trace!("sending frame"); + + if !self.flushed { + match try!(self.poll_complete()) { + Async::Ready(()) => {}, + Async::NotReady => return Ok(AsyncSink::NotReady(item)), } } self.out_addr = self.codec.encode(item, &mut self.wr); + self.flushed = false; + trace!("frame encoded; length={}", self.wr.len()); + Ok(AsyncSink::Ready) } fn poll_complete(&mut self) -> Poll<(), io::Error> { - trace!("flushing framed transport"); - - if self.wr.is_empty() { + if self.flushed { return Ok(Async::Ready(())) } - trace!("writing; remaining={}", self.wr.len()); + trace!("flushing frame; length={}", self.wr.len()); let n = try_nb!(self.socket.send_to(&self.wr, &self.out_addr)); trace!("written {}", n); + let wrote_all = n == self.wr.len(); self.wr.clear(); + self.flushed = true; + if wrote_all { Ok(Async::Ready(())) } else { @@ -125,6 +133,7 @@ pub fn new(socket: UdpSocket, codec: C) -> UdpFramed { out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), rd: vec![0; 64 * 1024], wr: Vec::with_capacity(8 * 1024), + flushed: true, } } diff --git a/third_party/rust/tokio-core/src/net/udp/mod.rs b/third_party/rust/tokio-core/src/net/udp/mod.rs index 7592ef21e8da..5f3b6e35aed8 100644 --- a/third_party/rust/tokio-core/src/net/udp/mod.rs +++ b/third_party/rust/tokio-core/src/net/udp/mod.rs @@ -1,16 +1,15 @@ use std::io; -use std::mem; use std::net::{self, SocketAddr, Ipv4Addr, Ipv6Addr}; use std::fmt; use futures::{Async, Future, Poll}; use mio; -use reactor::{Handle, PollEvented}; +use reactor::{Handle, PollEvented2}; /// An I/O object representing a UDP socket. pub struct UdpSocket { - io: PollEvented, + io: PollEvented2, } mod frame; @@ -22,12 +21,12 @@ impl UdpSocket { /// This function will create a new UDP socket and attempt to bind it to the /// `addr` provided. If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result { - let udp = try!(mio::udp::UdpSocket::bind(addr)); + let udp = try!(mio::net::UdpSocket::bind(addr)); UdpSocket::new(udp, handle) } - fn new(socket: mio::udp::UdpSocket, handle: &Handle) -> io::Result { - let io = try!(PollEvented::new(socket, handle)); + fn new(socket: mio::net::UdpSocket, handle: &Handle) -> io::Result { + let io = try!(PollEvented2::new_with_handle(socket, handle.new_tokio_handle())); Ok(UdpSocket { io: io }) } @@ -42,7 +41,7 @@ impl UdpSocket { /// `reuse_address` or binding to multiple addresses. pub fn from_socket(socket: net::UdpSocket, handle: &Handle) -> io::Result { - let udp = try!(mio::udp::UdpSocket::from_socket(socket)); + let udp = try!(mio::net::UdpSocket::from_socket(socket)); UdpSocket::new(udp, handle) } @@ -74,6 +73,46 @@ impl UdpSocket { self.io.get_ref().local_addr() } + /// Connects the UDP socket setting the default destination for send() and + /// limiting packets that are read via recv from the address specified in addr. + pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> { + self.io.get_ref().connect(*addr) + } + + /// Sends data on the socket to the address previously bound via connect(). + /// On success, returns the number of bytes written. + pub fn send(&self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.io.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + match self.io.get_ref().send(buf) { + Ok(n) => Ok(n), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + self.io.clear_write_ready()?; + } + Err(e) + } + } + } + + /// Receives data from the socket previously bound with connect(). + /// On success, returns the number of bytes read. + pub fn recv(&self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) + } + match self.io.get_ref().recv(buf) { + Ok(n) => Ok(n), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + self.io.clear_read_ready(mio::Ready::readable())?; + } + Err(e) + } + } + } + /// Test whether this socket is ready to be read or not. /// /// If the socket is *not* readable then the current task is scheduled to @@ -81,7 +120,15 @@ impl UdpSocket { /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() + self.io.poll_read_ready(mio::Ready::readable()) + .map(|r| { + if r.is_ready() { + Async::Ready(()) + } else { + Async::NotReady + } + }) + .unwrap_or(().into()) } /// Test whether this socket is ready to be written to or not. @@ -91,25 +138,34 @@ impl UdpSocket { /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { - self.io.poll_write() + self.io.poll_write_ready() + .map(|r| { + if r.is_ready() { + Async::Ready(()) + } else { + Async::NotReady + } + }) + .unwrap_or(().into()) } /// Sends data on the socket to the given address. On success, returns the /// number of bytes written. /// - /// Address type can be any implementor of `ToSocketAddrs` trait. See its + /// Address type can be any implementer of `ToSocketAddrs` trait. See its /// documentation for concrete examples. pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result { - if let Async::NotReady = self.io.poll_write() { - return Err(::would_block()) + if let Async::NotReady = self.io.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().send_to(buf, target) { - Ok(Some(n)) => Ok(n), - Ok(None) => { - self.io.need_write(); - Err(::would_block()) + Ok(n) => Ok(n), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + self.io.clear_write_ready()?; + } + Err(e) } - Err(e) => Err(e), } } @@ -131,28 +187,23 @@ impl UdpSocket { pub fn send_dgram(self, buf: T, addr: SocketAddr) -> SendDgram where T: AsRef<[u8]>, { - SendDgram { - state: SendState::Writing { - sock: self, - addr: addr, - buf: buf, - }, - } + SendDgram(Some((self, buf, addr))) } /// Receives data from the socket. On success, returns the number of bytes /// read and the address from whence the data came. pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - if let Async::NotReady = self.io.poll_read() { - return Err(::would_block()) + if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().recv_from(buf) { - Ok(Some(n)) => Ok(n), - Ok(None) => { - self.io.need_read(); - Err(::would_block()) + Ok(n) => Ok(n), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + self.io.clear_read_ready(mio::Ready::readable())?; + } + Err(e) } - Err(e) => Err(e), } } @@ -173,12 +224,7 @@ impl UdpSocket { pub fn recv_dgram(self, buf: T) -> RecvDgram where T: AsMut<[u8]>, { - RecvDgram { - state: RecvState::Reading { - sock: self, - buf: buf, - }, - } + RecvDgram(Some((self, buf))) } /// Gets the value of the `SO_BROADCAST` option for this socket. @@ -320,6 +366,27 @@ impl UdpSocket { interface: u32) -> io::Result<()> { self.io.get_ref().leave_multicast_v6(multiaddr, interface) } + + /// Sets the value for the `IPV6_V6ONLY` option on this socket. + /// + /// If this is set to `true` then the socket is restricted to sending and + /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications + /// can bind the same port at the same time. + /// + /// If this is set to `false` then the socket can be used to send and + /// receive packets from an IPv4-mapped IPv6 address. + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.io.get_ref().set_only_v6(only_v6) + } + + /// Gets the value of the `IPV6_V6ONLY` option for this socket. + /// + /// For more information about this option, see [`set_only_v6`][link]. + /// + /// [link]: #method.set_only_v6 + pub fn only_v6(&self) -> io::Result { + self.io.get_ref().only_v6() + } } impl fmt::Debug for UdpSocket { @@ -331,18 +398,8 @@ impl fmt::Debug for UdpSocket { /// A future used to write the entire contents of some data to a UDP socket. /// /// This is created by the `UdpSocket::send_dgram` method. -pub struct SendDgram { - state: SendState, -} - -enum SendState { - Writing { - sock: UdpSocket, - buf: T, - addr: SocketAddr, - }, - Empty, -} +#[must_use = "futures do nothing unless polled"] +pub struct SendDgram(Option<(UdpSocket, T, SocketAddr)>); fn incomplete_write(reason: &str) -> io::Error { io::Error::new(io::ErrorKind::Other, reason) @@ -355,40 +412,26 @@ impl Future for SendDgram type Error = io::Error; fn poll(&mut self) -> Poll<(UdpSocket, T), io::Error> { - match self.state { - SendState::Writing { ref sock, ref buf, ref addr } => { - let n = try_nb!(sock.send_to(buf.as_ref(), addr)); - if n != buf.as_ref().len() { - return Err(incomplete_write("failed to send entire message \ - in datagram")) - } + { + let (ref sock, ref buf, ref addr) = + *self.0.as_ref().expect("SendDgram polled after completion"); + let n = try_nb!(sock.send_to(buf.as_ref(), addr)); + if n != buf.as_ref().len() { + return Err(incomplete_write("failed to send entire message \ + in datagram")) } - SendState::Empty => panic!("poll a SendDgram after it's done"), } - match mem::replace(&mut self.state, SendState::Empty) { - SendState::Writing { sock, buf, addr: _ } => { - Ok(Async::Ready((sock, buf))) - } - SendState::Empty => panic!(), - } + let (sock, buf, _addr) = self.0.take().unwrap(); + Ok(Async::Ready((sock, buf))) } } /// A future used to receive a datagram from a UDP socket. /// /// This is created by the `UdpSocket::recv_dgram` method. -pub struct RecvDgram { - state: RecvState, -} - -enum RecvState { - Reading { - sock: UdpSocket, - buf: T, - }, - Empty, -} +#[must_use = "futures do nothing unless polled"] +pub struct RecvDgram(Option<(UdpSocket, T)>); impl Future for RecvDgram where T: AsMut<[u8]>, @@ -397,23 +440,19 @@ impl Future for RecvDgram type Error = io::Error; fn poll(&mut self) -> Poll { - let (n, addr) = match self.state { - RecvState::Reading { ref sock, ref mut buf } => { - try_nb!(sock.recv_from(buf.as_mut())) - } - RecvState::Empty => panic!("poll a RecvDgram after it's done"), + let (n, addr) = { + let (ref socket, ref mut buf) = + *self.0.as_mut().expect("RecvDgram polled after completion"); + + try_nb!(socket.recv_from(buf.as_mut())) }; - match mem::replace(&mut self.state, RecvState::Empty) { - RecvState::Reading { sock, buf } => { - Ok(Async::Ready((sock, buf, n, addr))) - } - RecvState::Empty => panic!(), - } + let (socket, buf) = self.0.take().unwrap(); + Ok(Async::Ready((socket, buf, n, addr))) } } -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "fuchsia")))] mod sys { use std::os::unix::prelude::*; use super::UdpSocket; diff --git a/third_party/rust/tokio-core/src/reactor/interval.rs b/third_party/rust/tokio-core/src/reactor/interval.rs index 0bf3143d29d8..27b3f8b83b27 100644 --- a/third_party/rust/tokio-core/src/reactor/interval.rs +++ b/third_party/rust/tokio-core/src/reactor/interval.rs @@ -6,11 +6,11 @@ use std::io; use std::time::{Duration, Instant}; -use futures::{Poll, Async}; -use futures::stream::{Stream}; +use futures::Poll; +use futures::Stream; +use tokio_timer::Interval as NewInterval; -use reactor::{Remote, Handle}; -use reactor::timeout_token::TimeoutToken; +use reactor::Handle; /// A stream representing notifications at fixed interval /// @@ -21,11 +21,9 @@ use reactor::timeout_token::TimeoutToken; /// Note that timeouts are not intended for high resolution timers, but rather /// they will likely fire some granularity after the exact instant that they're /// otherwise indicated to fire at. +#[must_use = "streams do nothing unless polled"] pub struct Interval { - token: TimeoutToken, - next: Instant, - interval: Duration, - handle: Remote, + new: NewInterval } impl Interval { @@ -49,10 +47,7 @@ impl Interval { -> io::Result { Ok(Interval { - token: try!(TimeoutToken::new(at, &handle)), - next: at, - interval: dur, - handle: handle.remote().clone(), + new: handle.remote.timer_handle.interval(at, dur) }) } } @@ -62,110 +57,8 @@ impl Stream for Interval { type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { - // TODO: is this fast enough? - let now = Instant::now(); - if self.next <= now { - self.next = next_interval(self.next, now, self.interval); - self.token.reset_timeout(self.next, &self.handle); - Ok(Async::Ready(Some(()))) - } else { - self.token.update_timeout(&self.handle); - Ok(Async::NotReady) - } + self.new.poll() + .map(|async| async.map(|option| option.map(|_| ()))) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } - -impl Drop for Interval { - fn drop(&mut self) { - self.token.cancel_timeout(&self.handle); - } -} - -/// Converts Duration object to raw nanoseconds if possible -/// -/// This is useful to divide intervals. -/// -/// While technically for large duration it's impossible to represent any -/// duration as nanoseconds, the largest duration we can represent is about -/// 427_000 years. Large enough for any interval we would use or calculate in -/// tokio. -fn duration_to_nanos(dur: Duration) -> Option { - dur.as_secs() - .checked_mul(1_000_000_000) - .and_then(|v| v.checked_add(dur.subsec_nanos() as u64)) -} - -fn next_interval(prev: Instant, now: Instant, interval: Duration) -> Instant { - let new = prev + interval; - if new > now { - return new; - } else { - let spent_ns = duration_to_nanos(now.duration_since(prev)) - .expect("interval should be expired"); - let interval_ns = duration_to_nanos(interval) - .expect("interval is less that 427 thousand years"); - let mult = spent_ns/interval_ns + 1; - assert!(mult < (1 << 32), - "can't skip more than 4 billion intervals of {:?} \ - (trying to skip {})", interval, mult); - return prev + interval * (mult as u32); - } -} - -#[cfg(test)] -mod test { - use std::time::{Instant, Duration}; - use super::next_interval; - - struct Timeline(Instant); - - impl Timeline { - fn new() -> Timeline { - Timeline(Instant::now()) - } - fn at(&self, millis: u64) -> Instant { - self.0 + Duration::from_millis(millis) - } - fn at_ns(&self, sec: u64, nanos: u32) -> Instant { - self.0 + Duration::new(sec, nanos) - } - } - - fn dur(millis: u64) -> Duration { - Duration::from_millis(millis) - } - - #[test] - fn norm_next() { - let tm = Timeline::new(); - assert_eq!(next_interval(tm.at(1), tm.at(2), dur(10)), tm.at(11)); - assert_eq!(next_interval(tm.at(7777), tm.at(7788), dur(100)), - tm.at(7877)); - assert_eq!(next_interval(tm.at(1), tm.at(1000), dur(2100)), - tm.at(2101)); - } - - #[test] - fn fast_forward() { - let tm = Timeline::new(); - assert_eq!(next_interval(tm.at(1), tm.at(1000), dur(10)), - tm.at(1001)); - assert_eq!(next_interval(tm.at(7777), tm.at(8888), dur(100)), - tm.at(8977)); - assert_eq!(next_interval(tm.at(1), tm.at(10000), dur(2100)), - tm.at(10501)); - } - - /// TODO: this test actually should be successful, but since we can't - /// multiply Duration on anything larger than u32 easily we decided - /// to allow it to fail for now - #[test] - #[should_panic(expected = "can't skip more than 4 billion intervals")] - fn large_skip() { - let tm = Timeline::new(); - assert_eq!(next_interval( - tm.at_ns(0, 1), tm.at_ns(25, 0), Duration::new(0, 2)), - tm.at_ns(25, 1)); - } - -} diff --git a/third_party/rust/tokio-core/src/reactor/io_token.rs b/third_party/rust/tokio-core/src/reactor/io_token.rs index e8c4880b1e00..3db6a265b728 100644 --- a/third_party/rust/tokio-core/src/reactor/io_token.rs +++ b/third_party/rust/tokio-core/src/reactor/io_token.rs @@ -20,7 +20,7 @@ impl IoToken { /// /// When a new I/O object is created it needs to be communicated to the /// event loop to ensure that it's registered and ready to receive - /// notifications. The event loop with then respond back with the I/O object + /// notifications. The event loop will then respond back with the I/O object /// and a token which can be used to send more messages to the event loop. /// /// The token returned is then passed in turn to each of the methods below @@ -83,7 +83,7 @@ impl IoToken { /// This function will also panic if there is not a currently running future /// task. pub fn schedule_read(&self, handle: &Remote) { - handle.send(Message::Schedule(self.token, task::park(), Direction::Read)); + handle.send(Message::Schedule(self.token, task::current(), Direction::Read)); } /// Schedule the current future task to receive a notification when the @@ -110,7 +110,7 @@ impl IoToken { /// This function will also panic if there is not a currently running future /// task. pub fn schedule_write(&self, handle: &Remote) { - handle.send(Message::Schedule(self.token, task::park(), Direction::Write)); + handle.send(Message::Schedule(self.token, task::current(), Direction::Write)); } /// Unregister all information associated with a token on an event loop, diff --git a/third_party/rust/tokio-core/src/reactor/mod.rs b/third_party/rust/tokio-core/src/reactor/mod.rs index c73108e0b760..cbd664547f08 100644 --- a/third_party/rust/tokio-core/src/reactor/mod.rs +++ b/third_party/rust/tokio-core/src/reactor/mod.rs @@ -5,33 +5,31 @@ //! futures, schedule tasks, issue I/O requests, etc. use std::cell::RefCell; -use std::cmp; use std::fmt; -use std::io::{self, ErrorKind}; -use std::mem; +use std::io; use std::rc::{Rc, Weak}; use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_USIZE_INIT, Ordering}; use std::time::{Instant, Duration}; +use tokio; +use tokio::executor::current_thread::{CurrentThread, TaskExecutor}; +use tokio_executor; +use tokio_executor::park::{Park, Unpark, ParkThread, UnparkThread}; +use tokio_timer::timer::{self, Timer}; + use futures::{Future, IntoFuture, Async}; -use futures::future; -use futures::executor::{self, Spawn, Unpark}; +use futures::future::{self, Executor, ExecuteError}; +use futures::executor::{self, Spawn, Notify}; use futures::sync::mpsc; -use futures::task::Task; use mio; -use mio::event::Evented; -use slab::Slab; - -use heap::{Heap, Slot}; - -mod io_token; -mod timeout_token; mod poll_evented; +mod poll_evented2; mod timeout; mod interval; pub use self::poll_evented::PollEvented; +pub(crate) use self::poll_evented2::PollEvented as PollEvented2; pub use self::timeout::Timeout; pub use self::interval::Interval; @@ -46,37 +44,37 @@ scoped_thread_local!(static CURRENT_LOOP: Core); /// various I/O objects to interact with the event loop in interesting ways. // TODO: expand this pub struct Core { - events: mio::Events, + /// Uniquely identifies the reactor + id: usize, + + /// Handle to the Tokio runtime + rt: tokio::runtime::Runtime, + + /// Executes tasks + executor: RefCell>>, + + /// Timer handle + timer_handle: timer::Handle, + + /// Wakes up the thread when the `run` future is notified + notify_future: Arc, + + /// Wakes up the thread when a message is posted to `rx` + notify_rx: Arc, + + /// Send messages across threads to the core tx: mpsc::UnboundedSender, + + /// Receive messages rx: RefCell>>, - _rx_registration: mio::Registration, - rx_readiness: Arc, + // Shared inner state inner: Rc>, - - // Used for determining when the future passed to `run` is ready. Once the - // registration is passed to `io` above we never touch it again, just keep - // it alive. - _future_registration: mio::Registration, - future_readiness: Arc, } struct Inner { - id: usize, - io: mio::Poll, - - // Dispatch slabs for I/O and futures events - io_dispatch: Slab, - task_dispatch: Slab, - - // Timer wheel keeping track of all timeouts. The `usize` stored in the - // timer wheel is an index into the slab below. - // - // The slab below keeps track of the timeouts themselves as well as the - // state of the timeout itself. The `TimeoutToken` type is an index into the - // `timeouts` slab. - timer_heap: Heap<(Instant, usize)>, - timeouts: Slab<(Option, TimeoutState)>, + // Tasks that need to be spawned onto the executor. + pending_spawn: Vec>>, } /// An unique ID for a Core @@ -97,6 +95,8 @@ pub struct CoreId(usize); pub struct Remote { id: usize, tx: mpsc::UnboundedSender, + new_handle: tokio::reactor::Handle, + timer_handle: timer::Handle, } /// A non-sendable handle to an event loop, useful for manufacturing instances @@ -105,87 +105,53 @@ pub struct Remote { pub struct Handle { remote: Remote, inner: Weak>, -} - -struct ScheduledIo { - readiness: Arc, - reader: Option, - writer: Option, -} - -struct ScheduledTask { - _registration: mio::Registration, - spawn: Option>>>, - wake: Arc, -} - -enum TimeoutState { - NotFired, - Fired, - Waiting(Task), -} - -enum Direction { - Read, - Write, + thread_pool: ::tokio::runtime::TaskExecutor, } enum Message { - DropSource(usize), - Schedule(usize, Task, Direction), - UpdateTimeout(usize, Task), - ResetTimeout(usize, Instant), - CancelTimeout(usize), Run(Box), } -#[repr(usize)] -#[derive(Clone, Copy, Debug, PartialEq)] -enum Readiness { - Readable = 1, - Writable = 2, -} - -const TOKEN_MESSAGES: mio::Token = mio::Token(0); -const TOKEN_FUTURE: mio::Token = mio::Token(1); -const TOKEN_START: usize = 2; +// ===== impl Core ===== impl Core { /// Creates a new event loop, returning any error that happened during the /// creation. pub fn new() -> io::Result { - let io = try!(mio::Poll::new()); - let future_pair = mio::Registration::new2(); - try!(io.register(&future_pair.0, - TOKEN_FUTURE, - mio::Ready::readable(), - mio::PollOpt::level())); + // Create a new parker + let timer = Timer::new(ParkThread::new()); + + // Create notifiers + let notify_future = Arc::new(MyNotify::new(timer.unpark())); + let notify_rx = Arc::new(MyNotify::new(timer.unpark())); + + // New Tokio reactor + threadpool + let rt = tokio::runtime::Runtime::new()?; + + let timer_handle = timer.handle(); + + // Executor to run !Send futures + let executor = RefCell::new(CurrentThread::new_with_park(timer)); + + // Used to send messages across threads let (tx, rx) = mpsc::unbounded(); - let channel_pair = mio::Registration::new2(); - try!(io.register(&channel_pair.0, - TOKEN_MESSAGES, - mio::Ready::readable(), - mio::PollOpt::level())); - let rx_readiness = Arc::new(MySetReadiness(channel_pair.1)); - rx_readiness.unpark(); + + // Wrap the rx half with a future context and refcell + let rx = RefCell::new(executor::spawn(rx)); + + let id = NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed); Ok(Core { - events: mio::Events::with_capacity(1024), - tx: tx, - rx: RefCell::new(executor::spawn(rx)), - _rx_registration: channel_pair.0, - rx_readiness: rx_readiness, - - _future_registration: future_pair.0, - future_readiness: Arc::new(MySetReadiness(future_pair.1)), - + id, + rt, + notify_future, + notify_rx, + tx, + rx, + executor, + timer_handle, inner: Rc::new(RefCell::new(Inner { - id: NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed), - io: io, - io_dispatch: Slab::with_capacity(1), - task_dispatch: Slab::with_capacity(1), - timeouts: Slab::with_capacity(1), - timer_heap: Heap::new(), + pending_spawn: vec![], })), }) } @@ -200,15 +166,25 @@ impl Core { Handle { remote: self.remote(), inner: Rc::downgrade(&self.inner), + thread_pool: self.rt.executor().clone(), } } + /// Returns a reference to the runtime backing the instance + /// + /// This provides access to the newer features of Tokio. + pub fn runtime(&self) -> &tokio::runtime::Runtime { + &self.rt + } + /// Generates a remote handle to this event loop which can be used to spawn /// tasks from other threads into this event loop. pub fn remote(&self) -> Remote { Remote { - id: self.inner.borrow().id, + id: self.id, tx: self.tx.clone(), + new_handle: self.rt.reactor().clone(), + timer_handle: self.timer_handle.clone() } } @@ -234,19 +210,42 @@ impl Core { where F: Future, { let mut task = executor::spawn(f); - let ready = self.future_readiness.clone(); - let mut future_fired = true; + let handle1 = self.rt.reactor().clone(); + let handle2 = self.rt.reactor().clone(); + let mut executor1 = self.rt.executor().clone(); + let mut executor2 = self.rt.executor().clone(); + let timer_handle = self.timer_handle.clone(); + + // Make sure the future will run at least once on enter + self.notify_future.notify(0); loop { - if future_fired { + if self.notify_future.take() { + let mut enter = tokio_executor::enter() + .ok().expect("cannot recursively call into `Core`"); + + let notify = &self.notify_future; + let mut current_thread = self.executor.borrow_mut(); + let res = try!(CURRENT_LOOP.set(self, || { - task.poll_future(ready.clone()) + ::tokio_reactor::with_default(&handle1, &mut enter, |enter| { + tokio_executor::with_default(&mut executor1, enter, |enter| { + timer::with_default(&timer_handle, enter, |enter| { + current_thread.enter(enter) + .block_on(future::lazy(|| { + Ok::<_, ()>(task.poll_future_notify(notify, 0)) + })).unwrap() + }) + }) + }) })); + if let Async::Ready(e) = res { return Ok(e) } } - future_fired = self.poll(None); + + self.poll(None, &handle2, &mut executor2); } } @@ -259,161 +258,64 @@ impl Core { /// `loop { lp.turn(None) }` is equivalent to calling `run` with an /// empty future (one that never finishes). pub fn turn(&mut self, max_wait: Option) { - self.poll(max_wait); + let handle = self.rt.reactor().clone(); + let mut executor = self.rt.executor().clone(); + self.poll(max_wait, &handle, &mut executor); } - fn poll(&mut self, max_wait: Option) -> bool { - // Given the `max_wait` variable specified, figure out the actual - // timeout that we're going to pass to `poll`. This involves taking a - // look at active timers on our heap as well. - let start = Instant::now(); - let timeout = self.inner.borrow_mut().timer_heap.peek().map(|t| { - if t.0 < start { - Duration::new(0, 0) - } else { - t.0 - start - } + fn poll(&mut self, max_wait: Option, + handle: &tokio::reactor::Handle, + sender: &mut tokio::runtime::TaskExecutor) { + let mut enter = tokio_executor::enter() + .ok().expect("cannot recursively call into `Core`"); + let timer_handle = self.timer_handle.clone(); + + ::tokio_reactor::with_default(handle, &mut enter, |enter| { + tokio_executor::with_default(sender, enter, |enter| { + timer::with_default(&timer_handle, enter, |enter| { + let start = Instant::now(); + + // Process all the events that came in, dispatching appropriately + if self.notify_rx.take() { + CURRENT_LOOP.set(self, || self.consume_queue()); + } + + // Drain any futures pending spawn + { + let mut e = self.executor.borrow_mut(); + let mut i = self.inner.borrow_mut(); + + for f in i.pending_spawn.drain(..) { + // Little hack + e.enter(enter).block_on(future::lazy(|| { + TaskExecutor::current().spawn_local(f).unwrap(); + Ok::<_, ()>(()) + })).unwrap(); + } + } + + CURRENT_LOOP.set(self, || { + self.executor.borrow_mut() + .enter(enter) + .turn(max_wait) + .ok().expect("error in `CurrentThread::turn`"); + }); + + let after_poll = Instant::now(); + debug!("loop poll - {:?}", after_poll - start); + debug!("loop time - {:?}", after_poll); + + debug!("loop process, {:?}", after_poll.elapsed()); + }) + }); }); - let timeout = match (max_wait, timeout) { - (Some(d1), Some(d2)) => Some(cmp::min(d1, d2)), - (max_wait, timeout) => max_wait.or(timeout), - }; - - // Block waiting for an event to happen, peeling out how many events - // happened. - let amt = match self.inner.borrow_mut().io.poll(&mut self.events, timeout) { - Ok(a) => a, - Err(ref e) if e.kind() == ErrorKind::Interrupted => return false, - Err(e) => panic!("error in poll: {}", e), - }; - - let after_poll = Instant::now(); - debug!("loop poll - {:?}", after_poll - start); - debug!("loop time - {:?}", after_poll); - - // Process all timeouts that may have just occurred, updating the - // current time since - self.consume_timeouts(after_poll); - - // Process all the events that came in, dispatching appropriately - let mut fired = false; - for i in 0..self.events.len() { - let event = self.events.get(i).unwrap(); - let token = event.token(); - trace!("event {:?} {:?}", event.readiness(), event.token()); - - if token == TOKEN_MESSAGES { - self.rx_readiness.0.set_readiness(mio::Ready::empty()).unwrap(); - CURRENT_LOOP.set(&self, || self.consume_queue()); - } else if token == TOKEN_FUTURE { - self.future_readiness.0.set_readiness(mio::Ready::empty()).unwrap(); - fired = true; - } else { - self.dispatch(token, event.readiness()); - } - } - debug!("loop process - {} events, {:?}", amt, after_poll.elapsed()); - return fired - } - - fn dispatch(&mut self, token: mio::Token, ready: mio::Ready) { - let token = usize::from(token) - TOKEN_START; - if token % 2 == 0 { - self.dispatch_io(token / 2, ready) - } else { - self.dispatch_task(token / 2) - } - } - - fn dispatch_io(&mut self, token: usize, ready: mio::Ready) { - let mut reader = None; - let mut writer = None; - let mut inner = self.inner.borrow_mut(); - if let Some(io) = inner.io_dispatch.get_mut(token) { - if ready.is_readable() || platform::is_hup(&ready) { - reader = io.reader.take(); - io.readiness.fetch_or(Readiness::Readable as usize, - Ordering::Relaxed); - } - if ready.is_writable() { - writer = io.writer.take(); - io.readiness.fetch_or(Readiness::Writable as usize, - Ordering::Relaxed); - } - } - drop(inner); - // TODO: don't notify the same task twice - if let Some(reader) = reader { - self.notify_handle(reader); - } - if let Some(writer) = writer { - self.notify_handle(writer); - } - } - - fn dispatch_task(&mut self, token: usize) { - let mut inner = self.inner.borrow_mut(); - let (task, wake) = match inner.task_dispatch.get_mut(token) { - Some(slot) => (slot.spawn.take(), slot.wake.clone()), - None => return, - }; - wake.0.set_readiness(mio::Ready::empty()).unwrap(); - let mut task = match task { - Some(task) => task, - None => return, - }; - drop(inner); - let res = CURRENT_LOOP.set(self, || task.poll_future(wake)); - let _task_to_drop; - inner = self.inner.borrow_mut(); - match res { - Ok(Async::NotReady) => { - assert!(inner.task_dispatch[token].spawn.is_none()); - inner.task_dispatch[token].spawn = Some(task); - } - Ok(Async::Ready(())) | - Err(()) => { - _task_to_drop = inner.task_dispatch.remove(token).unwrap(); - } - } - drop(inner); - } - - fn consume_timeouts(&mut self, now: Instant) { - loop { - let mut inner = self.inner.borrow_mut(); - match inner.timer_heap.peek() { - Some(head) if head.0 <= now => {} - Some(_) => break, - None => break, - }; - let (_, slab_idx) = inner.timer_heap.pop().unwrap(); - - trace!("firing timeout: {}", slab_idx); - inner.timeouts[slab_idx].0.take().unwrap(); - let handle = inner.timeouts[slab_idx].1.fire(); - drop(inner); - if let Some(handle) = handle { - self.notify_handle(handle); - } - } - } - - /// Method used to notify a task handle. - /// - /// Note that this should be used instead of `handle.unpark()` to ensure - /// that the `CURRENT_LOOP` variable is set appropriately. - fn notify_handle(&self, handle: Task) { - debug!("notifying a task handle"); - CURRENT_LOOP.set(&self, || handle.unpark()); } fn consume_queue(&self) { debug!("consuming notification queue"); // TODO: can we do better than `.unwrap()` here? - let unpark = self.rx_readiness.clone(); loop { - let msg = self.rx.borrow_mut().poll_stream(unpark.clone()).unwrap(); + let msg = self.rx.borrow_mut().poll_stream_notify(&self.notify_rx, 0).unwrap(); match msg { Async::Ready(Some(msg)) => self.notify(msg), Async::NotReady | @@ -423,33 +325,21 @@ impl Core { } fn notify(&self, msg: Message) { - match msg { - Message::DropSource(tok) => self.inner.borrow_mut().drop_source(tok), - Message::Schedule(tok, wake, dir) => { - let task = self.inner.borrow_mut().schedule(tok, wake, dir); - if let Some(task) = task { - self.notify_handle(task); - } - } - Message::UpdateTimeout(t, handle) => { - let task = self.inner.borrow_mut().update_timeout(t, handle); - if let Some(task) = task { - self.notify_handle(task); - } - } - Message::ResetTimeout(t, at) => { - self.inner.borrow_mut().reset_timeout(t, at); - } - Message::CancelTimeout(t) => { - self.inner.borrow_mut().cancel_timeout(t) - } - Message::Run(r) => r.call_box(self), - } + let Message::Run(r) = msg; + r.call_box(self); } /// Get the ID of this loop pub fn id(&self) -> CoreId { - CoreId(self.inner.borrow().id) + CoreId(self.id) + } +} + +impl Executor for Core + where F: Future + 'static, +{ + fn execute(&self, future: F) -> Result<(), ExecuteError> { + self.handle().execute(future) } } @@ -461,128 +351,31 @@ impl fmt::Debug for Core { } } -impl Inner { - fn add_source(&mut self, source: &Evented) - -> io::Result<(Arc, usize)> { - debug!("adding a new I/O source"); - let sched = ScheduledIo { - readiness: Arc::new(AtomicUsize::new(0)), - reader: None, - writer: None, - }; - if self.io_dispatch.vacant_entry().is_none() { - let amt = self.io_dispatch.len(); - self.io_dispatch.reserve_exact(amt); - } - let entry = self.io_dispatch.vacant_entry().unwrap(); - try!(self.io.register(source, - mio::Token(TOKEN_START + entry.index() * 2), - mio::Ready::readable() | - mio::Ready::writable() | - platform::hup(), - mio::PollOpt::edge())); - Ok((sched.readiness.clone(), entry.insert(sched).index())) - } - - fn deregister_source(&mut self, source: &Evented) -> io::Result<()> { - self.io.deregister(source) - } - - fn drop_source(&mut self, token: usize) { - debug!("dropping I/O source: {}", token); - self.io_dispatch.remove(token).unwrap(); - } - - fn schedule(&mut self, token: usize, wake: Task, dir: Direction) - -> Option { - debug!("scheduling direction for: {}", token); - let sched = self.io_dispatch.get_mut(token).unwrap(); - let (slot, bit) = match dir { - Direction::Read => (&mut sched.reader, Readiness::Readable as usize), - Direction::Write => (&mut sched.writer, Readiness::Writable as usize), - }; - if sched.readiness.load(Ordering::SeqCst) & bit != 0 { - *slot = None; - Some(wake) - } else { - *slot = Some(wake); - None - } - } - - fn add_timeout(&mut self, at: Instant) -> usize { - if self.timeouts.vacant_entry().is_none() { - let len = self.timeouts.len(); - self.timeouts.reserve_exact(len); - } - let entry = self.timeouts.vacant_entry().unwrap(); - let slot = self.timer_heap.push((at, entry.index())); - let entry = entry.insert((Some(slot), TimeoutState::NotFired)); - debug!("added a timeout: {}", entry.index()); - return entry.index(); - } - - fn update_timeout(&mut self, token: usize, handle: Task) -> Option { - debug!("updating a timeout: {}", token); - self.timeouts[token].1.block(handle) - } - - fn reset_timeout(&mut self, token: usize, at: Instant) { - let pair = &mut self.timeouts[token]; - // TODO: avoid remove + push and instead just do one sift of the heap? - // In theory we could update it in place and then do the percolation - // as necessary - if let Some(slot) = pair.0.take() { - self.timer_heap.remove(slot); - } - let slot = self.timer_heap.push((at, token)); - *pair = (Some(slot), TimeoutState::NotFired); - debug!("set a timeout: {}", token); - } - - fn cancel_timeout(&mut self, token: usize) { - debug!("cancel a timeout: {}", token); - let pair = self.timeouts.remove(token); - if let Some((Some(slot), _state)) = pair { - self.timer_heap.remove(slot); - } - } - - fn spawn(&mut self, future: Box>) { - if self.task_dispatch.vacant_entry().is_none() { - let len = self.task_dispatch.len(); - self.task_dispatch.reserve_exact(len); - } - let entry = self.task_dispatch.vacant_entry().unwrap(); - let token = TOKEN_START + 2 * entry.index() + 1; - let pair = mio::Registration::new2(); - self.io.register(&pair.0, - mio::Token(token), - mio::Ready::readable(), - mio::PollOpt::level()) - .expect("cannot fail future registration with mio"); - let unpark = Arc::new(MySetReadiness(pair.1)); - let entry = entry.insert(ScheduledTask { - spawn: Some(executor::spawn(future)), - wake: unpark, - _registration: pair.0, - }); - entry.get().wake.clone().unpark(); - } -} - impl Remote { fn send(&self, msg: Message) { self.with_loop(|lp| { match lp { Some(lp) => { - // Need to execute all existing requests first, to ensure - // that our message is processed "in order" - lp.consume_queue(); + // We want to make sure that all messages are received in + // order, so we need to consume pending messages before + // delivering this message to the core. The actually + // `consume_queue` function, however, can be somewhat slow + // right now where receiving on a channel will acquire a + // lock and block the current task. + // + // To speed this up check the message queue's readiness as a + // sort of preflight check to see if we've actually got any + // messages. This should just involve some atomics and if it + // comes back false then we know for sure there are no + // pending messages, so we can immediately deliver our + // message. + if lp.notify_rx.take() { + lp.consume_queue(); + } lp.notify(msg); } None => { - match mpsc::UnboundedSender::send(&self.tx, msg) { + match self.tx.unbounded_send(msg) { Ok(()) => {} // TODO: this error should punt upwards and we should @@ -600,7 +393,7 @@ impl Remote { { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { - let same = lp.inner.borrow().id == self.id; + let same = lp.id == self.id; if same { f(Some(lp)) } else { @@ -616,10 +409,16 @@ impl Remote { /// /// This function takes a closure which is executed within the context of /// the I/O loop itself. The future returned by the closure will be - /// scheduled on the event loop an run to completion. + /// scheduled on the event loop and run to completion. /// /// Note that while the closure, `F`, requires the `Send` bound as it might /// cross threads, the future `R` does not. + /// + /// # Panics + /// + /// This method will **not** catch panics from polling the future `f`. If + /// the future panics then it's the responsibility of the caller to catch + /// that panic and handle it as appropriate. pub fn spawn(&self, f: F) where F: FnOnce(&Handle) -> R + Send + 'static, R: IntoFuture, @@ -627,7 +426,7 @@ impl Remote { { self.send(Message::Run(Box::new(|lp: &Core| { let f = f(&lp.handle()); - lp.inner.borrow_mut().spawn(Box::new(f.into_future())); + lp.handle().spawn(f.into_future()); }))); } @@ -651,7 +450,7 @@ impl Remote { pub fn handle(&self) -> Option { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { - let same = lp.inner.borrow().id == self.id; + let same = lp.id == self.id; if same { Some(lp.handle()) } else { @@ -664,6 +463,15 @@ impl Remote { } } +impl Executor for Remote + where F: Future + Send + 'static, +{ + fn execute(&self, future: F) -> Result<(), ExecuteError> { + self.spawn(|_| future); + Ok(()) + } +} + impl fmt::Debug for Remote { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Remote") @@ -673,20 +481,54 @@ impl fmt::Debug for Remote { } impl Handle { + /// Returns a reference to the new Tokio handle + pub fn new_tokio_handle(&self) -> &::tokio::reactor::Handle { + &self.remote.new_handle + } + /// Returns a reference to the underlying remote handle to the event loop. pub fn remote(&self) -> &Remote { &self.remote } /// Spawns a new future on the event loop this handle is associated with. + /// + /// # Panics + /// + /// This method will **not** catch panics from polling the future `f`. If + /// the future panics then it's the responsibility of the caller to catch + /// that panic and handle it as appropriate. pub fn spawn(&self, f: F) where F: Future + 'static, { let inner = match self.inner.upgrade() { Some(inner) => inner, - None => return, + None => { + return; + } }; - inner.borrow_mut().spawn(Box::new(f)); + + // Try accessing the executor directly + if let Ok(mut inner) = inner.try_borrow_mut() { + inner.pending_spawn.push(Box::new(f)); + return; + } + + // If that doesn't work, the executor is probably active, so spawn using + // the global fn. + let _ = TaskExecutor::current().spawn_local(Box::new(f)); + } + + /// Spawns a new future onto the threadpool + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + pub fn spawn_send(&self, f: F) + where F: Future + Send + 'static, + { + self.thread_pool.spawn(f); } /// Spawns a closure on this event loop. @@ -695,6 +537,12 @@ impl Handle { /// for running a closure wrapped in `futures::lazy`. It will spawn the /// function `f` provided onto the event loop, and continue to run the /// future returned by `f` on the event loop as well. + /// + /// # Panics + /// + /// This method will **not** catch panics from polling the future `f`. If + /// the future panics then it's the responsibility of the caller to catch + /// that panic and handle it as appropriate. pub fn spawn_fn(&self, f: F) where F: FnOnce() -> R + 'static, R: IntoFuture + 'static, @@ -708,6 +556,15 @@ impl Handle { } } +impl Executor for Handle + where F: Future + 'static, +{ + fn execute(&self, future: F) -> Result<(), ExecuteError> { + self.spawn(future); + Ok(()) + } +} + impl fmt::Debug for Handle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Handle") @@ -716,31 +573,28 @@ impl fmt::Debug for Handle { } } -impl TimeoutState { - fn block(&mut self, handle: Task) -> Option { - match *self { - TimeoutState::Fired => return Some(handle), - _ => {} +struct MyNotify { + unpark: UnparkThread, + notified: AtomicBool, +} + +impl MyNotify { + fn new(unpark: UnparkThread) -> Self { + MyNotify { + unpark, + notified: AtomicBool::new(true), } - *self = TimeoutState::Waiting(handle); - None } - fn fire(&mut self) -> Option { - match mem::replace(self, TimeoutState::Fired) { - TimeoutState::NotFired => None, - TimeoutState::Fired => panic!("fired twice?"), - TimeoutState::Waiting(handle) => Some(handle), - } + fn take(&self) -> bool { + self.notified.swap(false, Ordering::SeqCst) } } -struct MySetReadiness(mio::SetReadiness); - -impl Unpark for MySetReadiness { - fn unpark(&self) { - self.0.set_readiness(mio::Ready::readable()) - .expect("failed to set readiness"); +impl Notify for MyNotify { + fn notify(&self, _: usize) { + self.notified.store(true, Ordering::SeqCst); + self.unpark.unpark(); } } @@ -754,29 +608,101 @@ impl FnBox for F { } } -#[cfg(unix)] +const READ: usize = 1 << 0; +const WRITE: usize = 1 << 1; + +fn ready2usize(ready: mio::Ready) -> usize { + let mut bits = 0; + if ready.is_readable() { + bits |= READ; + } + if ready.is_writable() { + bits |= WRITE; + } + bits | platform::ready2usize(ready) +} + +fn usize2ready(bits: usize) -> mio::Ready { + let mut ready = mio::Ready::empty(); + if bits & READ != 0 { + ready.insert(mio::Ready::readable()); + } + if bits & WRITE != 0 { + ready.insert(mio::Ready::writable()); + } + ready | platform::usize2ready(bits) +} + +#[cfg(all(unix, not(target_os = "fuchsia")))] mod platform { use mio::Ready; use mio::unix::UnixReady; - pub fn is_hup(event: &Ready) -> bool { - UnixReady::from(*event).is_hup() + const HUP: usize = 1 << 2; + const ERROR: usize = 1 << 3; + const AIO: usize = 1 << 4; + + #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] + fn is_aio(ready: &Ready) -> bool { + UnixReady::from(*ready).is_aio() } - pub fn hup() -> Ready { - UnixReady::hup().into() - } -} - -#[cfg(windows)] -mod platform { - use mio::Ready; - - pub fn is_hup(_event: &Ready) -> bool { + #[cfg(not(any(target_os = "dragonfly", target_os = "freebsd")))] + fn is_aio(_ready: &Ready) -> bool { false } - pub fn hup() -> Ready { + pub fn ready2usize(ready: Ready) -> usize { + let ready = UnixReady::from(ready); + let mut bits = 0; + if is_aio(&ready) { + bits |= AIO; + } + if ready.is_error() { + bits |= ERROR; + } + if ready.is_hup() { + bits |= HUP; + } + bits + } + + #[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", + target_os = "macos"))] + fn usize2ready_aio(ready: &mut UnixReady) { + ready.insert(UnixReady::aio()); + } + + #[cfg(not(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos")))] + fn usize2ready_aio(_ready: &mut UnixReady) { + // aio not available here → empty + } + + pub fn usize2ready(bits: usize) -> Ready { + let mut ready = UnixReady::from(Ready::empty()); + if bits & AIO != 0 { + usize2ready_aio(&mut ready); + } + if bits & HUP != 0 { + ready.insert(UnixReady::hup()); + } + if bits & ERROR != 0 { + ready.insert(UnixReady::error()); + } + ready.into() + } +} + +#[cfg(any(windows, target_os = "fuchsia"))] +mod platform { + use mio::Ready; + + pub fn ready2usize(_r: Ready) -> usize { + 0 + } + + pub fn usize2ready(_r: usize) -> Ready { Ready::empty() } } diff --git a/third_party/rust/tokio-core/src/reactor/poll_evented.rs b/third_party/rust/tokio-core/src/reactor/poll_evented.rs index 26934b69deb0..f8019ae01c75 100644 --- a/third_party/rust/tokio-core/src/reactor/poll_evented.rs +++ b/third_party/rust/tokio-core/src/reactor/poll_evented.rs @@ -8,15 +8,16 @@ use std::fmt; use std::io::{self, Read, Write}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; -use futures::{Async, Poll}; +use futures::{task, Async, Poll}; use mio::event::Evented; +use mio::Ready; use tokio_io::{AsyncRead, AsyncWrite}; +use tokio::reactor::{Registration}; use reactor::{Handle, Remote}; -use reactor::Readiness::*; -use reactor::io_token::IoToken; /// A concrete implementation of a stream of readiness notifications for I/O /// objects that originates from an event loop. @@ -25,6 +26,10 @@ use reactor::io_token::IoToken; /// associated with a specific event loop and source of events that will be /// registered with an event loop. /// +/// An instance of `PollEvented` is essentially the bridge between the `mio` +/// world and the `tokio-core` world, providing abstractions to receive +/// notifications about changes to an object's `mio::Ready` state. +/// /// Each readiness stream has a number of methods to test whether the underlying /// object is readable or writable. Once the methods return that an object is /// readable/writable, then it will continue to do so until the `need_read` or @@ -38,19 +43,41 @@ use reactor::io_token::IoToken; /// You can find more information about creating a custom I/O object [online]. /// /// [online]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/#custom-io +/// +/// ## Readiness to read/write +/// +/// A `PollEvented` allows listening and waiting for an arbitrary `mio::Ready` +/// instance, including the platform-specific contents of `mio::Ready`. At most +/// two future tasks, however, can be waiting on a `PollEvented`. The +/// `need_read` and `need_write` methods can block two separate tasks, one on +/// reading and one on writing. Not all I/O events correspond to read/write, +/// however! +/// +/// To account for this a `PollEvented` gets a little interesting when working +/// with an arbitrary instance of `mio::Ready` that may not map precisely to +/// "write" and "read" tasks. Currently it is defined that instances of +/// `mio::Ready` that do *not* return true from `is_writable` are all notified +/// through `need_read`, or the read task. +/// +/// In other words, `poll_ready` with the `mio::UnixReady::hup` event will block +/// the read task of this `PollEvented` if the `hup` event isn't available. +/// Essentially a good rule of thumb is that if you're using the `poll_ready` +/// method you want to also use `need_read` to signal blocking and you should +/// otherwise probably avoid using two tasks on the same `PollEvented`. pub struct PollEvented { - token: IoToken, - handle: Remote, - readiness: AtomicUsize, io: E, + inner: Inner, + remote: Remote, } -impl fmt::Debug for PollEvented { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("PollEvented") - .field("io", &self.io) - .finish() - } +struct Inner { + registration: Registration, + + /// Currently visible read readiness + read_readiness: AtomicUsize, + + /// Currently visible write readiness + write_readiness: AtomicUsize, } impl PollEvented { @@ -60,11 +87,17 @@ impl PollEvented { /// This method returns a future which will resolve to the readiness stream /// when it's ready. pub fn new(io: E, handle: &Handle) -> io::Result> { + let registration = Registration::new(); + registration.register_with(&io, handle.new_tokio_handle())?; + Ok(PollEvented { - token: try!(IoToken::new(&io, handle)), - handle: handle.remote().clone(), - readiness: AtomicUsize::new(0), io: io, + inner: Inner { + registration, + read_readiness: AtomicUsize::new(0), + write_readiness: AtomicUsize::new(0), + }, + remote: handle.remote().clone(), }) } @@ -74,19 +107,16 @@ impl PollEvented { /// object with the event loop that the `handle` provided points to. /// Typically this method is not required as this automatically happens when /// `E` is dropped, but for some use cases the `E` object doesn't represent - /// an owned reference, so dropping it won't automatically unreigster with + /// an owned reference, so dropping it won't automatically unregister with /// the event loop. /// /// This consumes `self` as it will no longer provide events after the /// method is called, and will likely return an error if this `PollEvented` /// was created on a separate event loop from the `handle` specified. - pub fn deregister(self, handle: &Handle) -> io::Result<()> { - let inner = match handle.inner.upgrade() { - Some(inner) => inner, - None => return Ok(()), - }; - let ret = inner.borrow_mut().deregister_source(&self.io); - return ret + pub fn deregister(self, _: &Handle) -> io::Result<()> { + // Nothing has to happen here anymore as I/O objects are explicitly + // deregistered before dropped. + Ok(()) } } @@ -98,17 +128,45 @@ impl PollEvented { /// the stream is readable again. In other words, this method is only safe /// to call from within the context of a future's task, typically done in a /// `Future::poll` method. + /// + /// This is mostly equivalent to `self.poll_ready(Ready::readable())`. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. pub fn poll_read(&self) -> Async<()> { - if self.readiness.load(Ordering::SeqCst) & Readable as usize != 0 { - return Async::Ready(()) + if self.poll_read2().is_ready() { + return ().into(); } - self.readiness.fetch_or(self.token.take_readiness(), Ordering::SeqCst); - if self.readiness.load(Ordering::SeqCst) & Readable as usize != 0 { - Async::Ready(()) - } else { - self.token.schedule_read(&self.handle); - Async::NotReady + + Async::NotReady + } + + fn poll_read2(&self) -> Async { + // Load the cached readiness + match self.inner.read_readiness.load(Relaxed) { + 0 => {} + mut n => { + // Check what's new with the reactor. + if let Some(ready) = self.inner.registration.take_read_ready().unwrap() { + n |= super::ready2usize(ready); + self.inner.read_readiness.store(n, Relaxed); + } + + return super::usize2ready(n).into(); + } } + + let ready = match self.inner.registration.poll_read_ready().unwrap() { + Async::Ready(r) => r, + _ => return Async::NotReady, + }; + + // Cache the value + self.inner.read_readiness.store(super::ready2usize(ready), Relaxed); + + ready.into() } /// Tests to see if this source is ready to be written to or not. @@ -118,16 +176,94 @@ impl PollEvented { /// the stream is writable again. In other words, this method is only safe /// to call from within the context of a future's task, typically done in a /// `Future::poll` method. + /// + /// This is mostly equivalent to `self.poll_ready(Ready::writable())`. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. pub fn poll_write(&self) -> Async<()> { - if self.readiness.load(Ordering::SeqCst) & Writable as usize != 0 { - return Async::Ready(()) + match self.inner.write_readiness.load(Relaxed) { + 0 => {} + mut n => { + // Check what's new with the reactor. + if let Some(ready) = self.inner.registration.take_write_ready().unwrap() { + n |= super::ready2usize(ready); + self.inner.write_readiness.store(n, Relaxed); + } + + return ().into(); + } } - self.readiness.fetch_or(self.token.take_readiness(), Ordering::SeqCst); - if self.readiness.load(Ordering::SeqCst) & Writable as usize != 0 { - Async::Ready(()) - } else { - self.token.schedule_write(&self.handle); + + let ready = match self.inner.registration.poll_write_ready().unwrap() { + Async::Ready(r) => r, + _ => return Async::NotReady, + }; + + // Cache the value + self.inner.write_readiness.store(super::ready2usize(ready), Relaxed); + + ().into() + } + + /// Test to see whether this source fulfills any condition listed in `mask` + /// provided. + /// + /// The `mask` given here is a mio `Ready` set of possible events. This can + /// contain any events like read/write but also platform-specific events + /// such as hup and error. The `mask` indicates events that are interested + /// in being ready. + /// + /// If any event in `mask` is ready then it is returned through + /// `Async::Ready`. The `Ready` set returned is guaranteed to not be empty + /// and contains all events that are currently ready in the `mask` provided. + /// + /// If no events are ready in the `mask` provided then the current task is + /// scheduled to receive a notification when any of them become ready. If + /// the `writable` event is contained within `mask` then this + /// `PollEvented`'s `write` task will be blocked and otherwise the `read` + /// task will be blocked. This is generally only relevant if you're working + /// with this `PollEvented` object on multiple tasks. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_ready(&self, mask: Ready) -> Async { + let mut ret = Ready::empty(); + + if mask.is_empty() { + return ret.into(); + } + + if mask.is_writable() { + if self.poll_write().is_ready() { + ret = Ready::writable(); + } + } + + let mask = mask - Ready::writable(); + + if !mask.is_empty() { + if let Async::Ready(v) = self.poll_read2() { + ret |= v & mask; + } + } + + if ret.is_empty() { + if mask.is_writable() { + self.need_write(); + } + + if mask.is_readable() { + self.need_read(); + } + Async::NotReady + } else { + ret.into() } } @@ -139,16 +275,28 @@ impl PollEvented { /// informs this readiness stream that the underlying object is no longer /// readable, typically because a "would block" error was seen. /// - /// The flag indicating that this stream is readable is unset and the - /// current task is scheduled to receive a notification when the stream is - /// then again readable. + /// *All* readiness bits associated with this stream except the writable bit + /// will be reset when this method is called. The current task is then + /// scheduled to receive a notification whenever anything changes other than + /// the writable bit. Note that this typically just means the readable bit + /// is used here, but if you're using a custom I/O object for events like + /// hup/error this may also be relevant. /// /// Note that it is also only valid to call this method if `poll_read` /// previously indicated that the object is readable. That is, this function /// must always be paired with calls to `poll_read` previously. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. pub fn need_read(&self) { - self.readiness.fetch_and(!(Readable as usize), Ordering::SeqCst); - self.token.schedule_read(&self.handle) + self.inner.read_readiness.store(0, Relaxed); + + if self.poll_read().is_ready() { + // Notify the current task + task::current().notify(); + } } /// Indicates to this source of events that the corresponding I/O object is @@ -166,15 +314,24 @@ impl PollEvented { /// Note that it is also only valid to call this method if `poll_write` /// previously indicated that the object is writable. That is, this function /// must always be paired with calls to `poll_write` previously. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. pub fn need_write(&self) { - self.readiness.fetch_and(!(Writable as usize), Ordering::SeqCst); - self.token.schedule_write(&self.handle) + self.inner.write_readiness.store(0, Relaxed); + + if self.poll_write().is_ready() { + // Notify the current task + task::current().notify(); + } } /// Returns a reference to the event loop handle that this readiness stream /// is associated with. pub fn remote(&self) -> &Remote { - &self.handle + &self.remote } /// Returns a shared reference to the underlying I/O object this readiness @@ -192,38 +349,47 @@ impl PollEvented { impl Read for PollEvented { fn read(&mut self, buf: &mut [u8]) -> io::Result { - if let Async::NotReady = self.poll_read() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_read(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_mut().read(buf); + if is_wouldblock(&r) { self.need_read(); } - return r + + r } } impl Write for PollEvented { fn write(&mut self, buf: &[u8]) -> io::Result { - if let Async::NotReady = self.poll_write() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_write(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_mut().write(buf); + if is_wouldblock(&r) { self.need_write(); } - return r + + r } fn flush(&mut self) -> io::Result<()> { - if let Async::NotReady = self.poll_write() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_write(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_mut().flush(); + if is_wouldblock(&r) { self.need_write(); } - return r + + r } } @@ -251,14 +417,17 @@ impl<'a, E> Read for &'a PollEvented where &'a E: Read, { fn read(&mut self, buf: &mut [u8]) -> io::Result { - if let Async::NotReady = self.poll_read() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_read(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_ref().read(buf); + if is_wouldblock(&r) { self.need_read(); } - return r + + r } } @@ -266,25 +435,31 @@ impl<'a, E> Write for &'a PollEvented where &'a E: Write, { fn write(&mut self, buf: &[u8]) -> io::Result { - if let Async::NotReady = self.poll_write() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_write(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_ref().write(buf); + if is_wouldblock(&r) { self.need_write(); } - return r + + r } fn flush(&mut self) -> io::Result<()> { - if let Async::NotReady = self.poll_write() { - return Err(::would_block()) + if let Async::NotReady = PollEvented::poll_write(self) { + return Err(io::ErrorKind::WouldBlock.into()) } + let r = self.get_ref().flush(); + if is_wouldblock(&r) { self.need_write(); } - return r + + r } } @@ -321,8 +496,10 @@ fn is_wouldblock(r: &io::Result) -> bool { } } -impl Drop for PollEvented { - fn drop(&mut self) { - self.token.drop_source(&self.handle); +impl fmt::Debug for PollEvented { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PollEvented") + .field("io", &self.io) + .finish() } } diff --git a/third_party/rust/tokio-core/src/reactor/poll_evented2.rs b/third_party/rust/tokio-core/src/reactor/poll_evented2.rs new file mode 100644 index 000000000000..ed42258be21a --- /dev/null +++ b/third_party/rust/tokio-core/src/reactor/poll_evented2.rs @@ -0,0 +1,485 @@ +use tokio::reactor::{Handle, Registration}; + +use futures::{task, Async, Poll}; +use mio; +use mio::event::Evented; +use tokio_io::{AsyncRead, AsyncWrite}; + +use std::fmt; +use std::io::{self, Read, Write}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; + +/// Associates an I/O resource that implements the [`std::Read`] and / or +/// [`std::Write`] traits with the reactor that drives it. +/// +/// `PollEvented` uses [`Registration`] internally to take a type that +/// implements [`mio::Evented`] as well as [`std::Read`] and or [`std::Write`] +/// and associate it with a reactor that will drive it. +/// +/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be +/// used from within the future's execution model. As such, the `PollEvented` +/// type provides [`AsyncRead`] and [`AsyncWrite`] implementations using the +/// underlying I/O resource as well as readiness events provided by the reactor. +/// +/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is +/// `Sync`), the caller must ensure that there are at most two tasks that use a +/// `PollEvented` instance concurrenty. One for reading and one for writing. +/// While violating this requirement is "safe" from a Rust memory model point of +/// view, it will result in unexpected behavior in the form of lost +/// notifications and tasks hanging. +/// +/// ## Readiness events +/// +/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, +/// this type also supports access to the underlying readiness event stream. +/// While similar in function to what [`Registration`] provides, the semantics +/// are a bit different. +/// +/// Two functions are provided to access the readiness events: +/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the +/// current readiness state of the `PollEvented` instance. If +/// [`poll_read_ready`] indicates read readiness, immediately calling +/// [`poll_read_ready`] again will also indicate read readiness. +/// +/// When the operation is attempted and is unable to succeed due to the I/O +/// resource not being ready, the caller must call [`clear_read_ready`] or +/// [`clear_write_ready`]. This clears the readiness state until a new readiness +/// event is received. +/// +/// This allows the caller to implement additional funcitons. For example, +/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and +/// [`clear_write_ready`]. +/// +/// ```rust,ignore +/// pub fn poll_accept(&mut self) -> Poll<(net::TcpStream, SocketAddr), io::Error> { +/// let ready = Ready::readable(); +/// +/// try_ready!(self.poll_evented.poll_read_ready(ready)); +/// +/// match self.poll_evented.get_ref().accept_std() { +/// Ok(pair) => Ok(Async::Ready(pair)), +/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { +/// self.poll_evented.clear_read_ready(ready); +/// Ok(Async::NotReady) +/// } +/// Err(e) => Err(e), +/// } +/// } +/// ``` +/// +/// ## Platform-specific events +/// +/// `PollEvented` also allows receiving platform-specific `mio::Ready` events. +/// These events are included as part of the read readiness event stream. The +/// write readiness event stream is only for `Ready::writable()` events. +/// +/// [`std::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// [`std::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// [`AsyncRead`]: ../io/trait.AsyncRead.html +/// [`AsyncWrite`]: ../io/trait.AsyncWrite.html +/// [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html +/// [`Registration`]: struct.Registration.html +/// [`TcpListener`]: ../net/struct.TcpListener.html +pub struct PollEvented { + io: Option, + inner: Inner, +} + +struct Inner { + registration: Registration, + + /// Currently visible read readiness + read_readiness: AtomicUsize, + + /// Currently visible write readiness + write_readiness: AtomicUsize, +} + +// ===== impl PollEvented ===== + +macro_rules! poll_ready { + ($me:expr, $mask:expr, $cache:ident, $poll:ident, $take:ident) => {{ + $me.register()?; + + // Load cached & encoded readiness. + let mut cached = $me.inner.$cache.load(Relaxed); + let mask = $mask | platform::hup(); + + // See if the current readiness matches any bits. + let mut ret = mio::Ready::from_usize(cached) & $mask; + + if ret.is_empty() { + // Readiness does not match, consume the registration's readiness + // stream. This happens in a loop to ensure that the stream gets + // drained. + loop { + let ready = try_ready!($me.inner.registration.$poll()); + cached |= ready.as_usize(); + + // Update the cache store + $me.inner.$cache.store(cached, Relaxed); + + ret |= ready & mask; + + if !ret.is_empty() { + return Ok(ret.into()); + } + } + } else { + // Check what's new with the registration stream. This will not + // request to be notified + if let Some(ready) = $me.inner.registration.$take()? { + cached |= ready.as_usize(); + $me.inner.$cache.store(cached, Relaxed); + } + + Ok(mio::Ready::from_usize(cached).into()) + } + }} +} + +impl PollEvented +where E: Evented +{ + /// Creates a new `PollEvented` associated with the default reactor. + pub fn new(io: E) -> PollEvented { + PollEvented { + io: Some(io), + inner: Inner { + registration: Registration::new(), + read_readiness: AtomicUsize::new(0), + write_readiness: AtomicUsize::new(0), + } + } + } + + /// Creates a new `PollEvented` associated with the specified reactor. + pub fn new_with_handle(io: E, handle: &Handle) -> io::Result { + let ret = PollEvented::new(io); + ret.inner.registration.register_with(ret.io.as_ref().unwrap(), handle)?; + Ok(ret) + } + + /// Returns a shared reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_ref(&self) -> &E { + self.io.as_ref().unwrap() + } + + /// Returns a mutable reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_mut(&mut self) -> &mut E { + self.io.as_mut().unwrap() + } + + /// Consumes self, returning the inner I/O object + /// + /// This function will deregister the I/O resource from the reactor before + /// returning. If the deregistration operation fails, an error is returned. + /// + /// Note that deregistering does not guarantee that the I/O resource can be + /// registered with a different reactor. Some I/O resource types can only be + /// associated with a single reactor instance for their lifetime. + pub fn into_inner(mut self) -> io::Result { + let io = self.io.take().unwrap(); + self.inner.registration.deregister(&io)?; + Ok(io) + } + + /// Check the I/O resource's read readiness state. + /// + /// The mask argument allows specifying what readiness to notify on. This + /// can be any value, including platform specific readiness, **except** + /// `writable`. HUP is always implicitly included on platforms that support + /// it. + /// + /// If the resource is not ready for a read then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a read-ready state until readiness is + /// cleared by calling [`clear_read_ready`]. + /// + /// [`clear_read_ready`]: #method.clear_read_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable. + /// * called from outside of a task context. + pub fn poll_read_ready(&self, mask: mio::Ready) -> Poll { + assert!(!mask.is_writable(), "cannot poll for write readiness"); + poll_ready!(self, mask, read_readiness, poll_read_ready, take_read_ready) + } + + /// Clears the I/O resource's read readiness state and registers the current + /// task to be notified once a read readiness event is received. + /// + /// After calling this function, `poll_read_ready` will return `NotReady` + /// until a new read readiness event has been received. + /// + /// The `mask` argument specifies the readiness bits to clear. This may not + /// include `writable` or `hup`. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable or HUP + /// * called from outside of a task context. + pub fn clear_read_ready(&self, ready: mio::Ready) -> io::Result<()> { + // Cannot clear write readiness + assert!(!ready.is_writable(), "cannot clear write readiness"); + assert!(!platform::is_hup(&ready), "cannot clear HUP readiness"); + + self.inner.read_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_read_ready(ready)?.is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Check the I/O resource's write readiness state. + /// + /// This always checks for writable readiness and also checks for HUP + /// readiness on platforms that support it. + /// + /// If the resource is not ready for a write then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a write-ready state until readiness is + /// cleared by calling [`clear_write_ready`]. + /// + /// [`clear_write_ready`]: #method.clear_write_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` contains bits besides `writable` and `hup`. + /// * called from outside of a task context. + pub fn poll_write_ready(&self) -> Poll { + poll_ready!(self, + mio::Ready::writable(), + write_readiness, + poll_write_ready, + take_write_ready) + } + + /// Resets the I/O resource's write readiness state and registers the current + /// task to be notified once a write readiness event is received. + /// + /// This only clears writable readiness. HUP (on platforms that support HUP) + /// cannot be cleared as it is a final state. + /// + /// After calling this function, `poll_write_ready(Ready::writable())` will + /// return `NotReady` until a new read readiness event has been received. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn clear_write_ready(&self) -> io::Result<()> { + let ready = mio::Ready::writable(); + + self.inner.write_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_write_ready()?.is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Ensure that the I/O resource is registered with the reactor. + fn register(&self) -> io::Result<()> { + self.inner.registration.register(self.io.as_ref().unwrap())?; + Ok(()) + } +} + +// ===== Read / Write impls ===== + +impl Read for PollEvented +where E: Evented + Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().read(buf); + + if is_wouldblock(&r) { + self.clear_read_ready(mio::Ready::readable())?; + } + + return r + } +} + +impl Write for PollEvented +where E: Evented + Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().write(buf); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } + + fn flush(&mut self) -> io::Result<()> { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().flush(); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } +} + +impl AsyncRead for PollEvented +where E: Evented + Read, +{ +} + +impl AsyncWrite for PollEvented +where E: Evented + Write, +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +// ===== &'a Read / &'a Write impls ===== + +impl<'a, E> Read for &'a PollEvented +where E: Evented, &'a E: Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().read(buf); + + if is_wouldblock(&r) { + self.clear_read_ready(mio::Ready::readable())?; + } + + return r + } +} + +impl<'a, E> Write for &'a PollEvented +where E: Evented, &'a E: Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().write(buf); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } + + fn flush(&mut self) -> io::Result<()> { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().flush(); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } +} + +impl<'a, E> AsyncRead for &'a PollEvented +where E: Evented, &'a E: Read, +{ +} + +impl<'a, E> AsyncWrite for &'a PollEvented +where E: Evented, &'a E: Write, +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +fn is_wouldblock(r: &io::Result) -> bool { + match *r { + Ok(_) => false, + Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, + } +} + + +impl fmt::Debug for PollEvented { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PollEvented") + .field("io", &self.io) + .finish() + } +} + +impl Drop for PollEvented { + fn drop(&mut self) { + if let Some(io) = self.io.as_ref() { + // Ignore errors + let _ = self.inner.registration.deregister(io); + } + } +} + +#[cfg(all(unix, not(target_os = "fuchsia")))] +mod platform { + use mio::Ready; + use mio::unix::UnixReady; + + pub fn hup() -> Ready { + UnixReady::hup().into() + } + + pub fn is_hup(ready: &Ready) -> bool { + UnixReady::from(*ready).is_hup() + } +} + +#[cfg(any(windows, target_os = "fuchsia"))] +mod platform { + use mio::Ready; + + pub fn hup() -> Ready { + Ready::empty() + } + + pub fn is_hup(_: &Ready) -> bool { + false + } +} diff --git a/third_party/rust/tokio-core/src/reactor/timeout.rs b/third_party/rust/tokio-core/src/reactor/timeout.rs index 76fba9fd416e..2b6bafa6fcc5 100644 --- a/third_party/rust/tokio-core/src/reactor/timeout.rs +++ b/third_party/rust/tokio-core/src/reactor/timeout.rs @@ -6,10 +6,10 @@ use std::io; use std::time::{Duration, Instant}; -use futures::{Future, Poll, Async}; +use futures::{Future, Poll}; +use tokio_timer::Delay; -use reactor::{Remote, Handle}; -use reactor::timeout_token::TimeoutToken; +use reactor::Handle; /// A future representing the notification that a timeout has occurred. /// @@ -18,17 +18,17 @@ use reactor::timeout_token::TimeoutToken; /// Note that timeouts are not intended for high resolution timers, but rather /// they will likely fire some granularity after the exact instant that they're /// otherwise indicated to fire at. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] pub struct Timeout { - token: TimeoutToken, - when: Instant, - handle: Remote, + delay: Delay } impl Timeout { /// Creates a new timeout which will fire at `dur` time into the future. /// - /// This function will return a future that will resolve to the actual - /// timeout object. The timeout object itself is then a future which will be + /// This function will return a Result with the actual timeout object or an + /// error. The timeout object itself is then a future which will be /// set to fire at the specified point in the future. pub fn new(dur: Duration, handle: &Handle) -> io::Result { Timeout::new_at(Instant::now() + dur, handle) @@ -36,16 +36,31 @@ impl Timeout { /// Creates a new timeout which will fire at the time specified by `at`. /// - /// This function will return a future that will resolve to the actual - /// timeout object. The timeout object itself is then a future which will be + /// This function will return a Result with the actual timeout object or an + /// error. The timeout object itself is then a future which will be /// set to fire at the specified point in the future. pub fn new_at(at: Instant, handle: &Handle) -> io::Result { Ok(Timeout { - token: try!(TimeoutToken::new(at, &handle)), - when: at, - handle: handle.remote().clone(), + delay: handle.remote.timer_handle.delay(at) }) } + + /// Resets this timeout to an new timeout which will fire at the time + /// specified by `at`. + /// + /// This method is usable even of this instance of `Timeout` has "already + /// fired". That is, if this future has resolved, calling this method means + /// that the future will still re-resolve at the specified instant. + /// + /// If `at` is in the past then this future will immediately be resolved + /// (when `poll` is called). + /// + /// Note that if any task is currently blocked on this future then that task + /// will be dropped. It is required to call `poll` again after this method + /// has been called to ensure that a task is blocked on this future. + pub fn reset(&mut self, at: Instant) { + self.delay.reset(at) + } } impl Future for Timeout { @@ -53,19 +68,7 @@ impl Future for Timeout { type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { - // TODO: is this fast enough? - let now = Instant::now(); - if self.when <= now { - Ok(Async::Ready(())) - } else { - self.token.update_timeout(&self.handle); - Ok(Async::NotReady) - } - } -} - -impl Drop for Timeout { - fn drop(&mut self) { - self.token.cancel_timeout(&self.handle); + self.delay.poll() + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } diff --git a/third_party/rust/tokio-core/src/reactor/timeout_token.rs b/third_party/rust/tokio-core/src/reactor/timeout_token.rs deleted file mode 100644 index ca82bde2a547..000000000000 --- a/third_party/rust/tokio-core/src/reactor/timeout_token.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::io; -use std::time::Instant; - -use futures::task; - -use reactor::{Message, Handle, Remote}; - -/// A token that identifies an active timeout. -pub struct TimeoutToken { - token: usize, -} - -impl TimeoutToken { - /// Adds a new timeout to get fired at the specified instant, notifying the - /// specified task. - pub fn new(at: Instant, handle: &Handle) -> io::Result { - match handle.inner.upgrade() { - Some(inner) => { - let token = inner.borrow_mut().add_timeout(at); - Ok(TimeoutToken { token: token }) - } - None => Err(io::Error::new(io::ErrorKind::Other, "event loop gone")), - } - } - - /// Updates a previously added timeout to notify a new task instead. - /// - /// # Panics - /// - /// This method will panic if the timeout specified was not created by this - /// loop handle's `add_timeout` method. - pub fn update_timeout(&self, handle: &Remote) { - handle.send(Message::UpdateTimeout(self.token, task::park())) - } - - /// Resets previously added (or fired) timeout to an new timeout - /// - /// # Panics - /// - /// This method will panic if the timeout specified was not created by this - /// loop handle's `add_timeout` method. - pub fn reset_timeout(&mut self, at: Instant, handle: &Remote) { - handle.send(Message::ResetTimeout(self.token, at)); - } - - /// Cancel a previously added timeout. - /// - /// # Panics - /// - /// This method will panic if the timeout specified was not created by this - /// loop handle's `add_timeout` method. - pub fn cancel_timeout(&self, handle: &Remote) { - debug!("cancel timeout {}", self.token); - handle.send(Message::CancelTimeout(self.token)) - } -} diff --git a/third_party/rust/tokio-core/tests/interval.rs b/third_party/rust/tokio-core/tests/interval.rs index 90ece77bfd58..3d056c361b91 100644 --- a/third_party/rust/tokio-core/tests/interval.rs +++ b/third_party/rust/tokio-core/tests/interval.rs @@ -19,8 +19,8 @@ fn single() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); - let interval = t!(Interval::new(dur, &l.handle())); let start = Instant::now(); + let interval = t!(Interval::new(dur, &l.handle())); t!(l.run(interval.take(1).collect())); assert!(start.elapsed() >= dur); } @@ -30,8 +30,8 @@ fn two_times() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); - let interval = t!(Interval::new(dur, &l.handle())); let start = Instant::now(); + let interval = t!(Interval::new(dur, &l.handle())); let result = t!(l.run(interval.take(2).collect())); assert!(start.elapsed() >= dur*2); assert_eq!(result, vec![(), ()]); diff --git a/third_party/rust/tokio-core/tests/line-frames.rs b/third_party/rust/tokio-core/tests/line-frames.rs index 3270d685b69a..8f68ad09034f 100644 --- a/third_party/rust/tokio-core/tests/line-frames.rs +++ b/third_party/rust/tokio-core/tests/line-frames.rs @@ -23,7 +23,10 @@ impl Decoder for LineCodec { fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { match buf.iter().position(|&b| b == b'\n') { - Some(i) => Ok(Some(buf.split_to(i + 1).into())), + Some(i) => { + let ret = buf.split_to(i + 1); + Ok(Some(ret)) + } None => Ok(None), } } @@ -59,7 +62,11 @@ fn echo() { let addr = listener.local_addr().unwrap(); let srv = listener.incoming().for_each(move |(socket, _)| { let (sink, stream) = socket.framed(LineCodec).split(); - handle.spawn(sink.send_all(stream).map(|_| ()).map_err(|_| ())); + handle.spawn({ + ::futures::future::lazy(|| { + sink.send_all(stream).map(|_| ()).map_err(|_| ()) + }) + }); Ok(()) }); diff --git a/third_party/rust/tokio-core/tests/spawn.rs b/third_party/rust/tokio-core/tests/spawn.rs index 98434a818031..aafae85f8eef 100644 --- a/third_party/rust/tokio-core/tests/spawn.rs +++ b/third_party/rust/tokio-core/tests/spawn.rs @@ -1,3 +1,4 @@ +extern crate tokio; extern crate tokio_core; extern crate env_logger; extern crate futures; @@ -33,6 +34,66 @@ fn simple() { assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } +#[test] +fn simple_send() { + drop(env_logger::init()); + let mut lp = Core::new().unwrap(); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + lp.handle().spawn_send(future::lazy(|| { + tx1.send(1).unwrap(); + Ok(()) + })); + lp.remote().spawn(|_| { + future::lazy(|| { + tx2.send(2).unwrap(); + Ok(()) + }) + }); + + assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); +} + +#[test] +fn simple_send_current_thread() { + drop(env_logger::init()); + let mut lp = Core::new().unwrap(); + + let (tx, rx) = oneshot::channel(); + + lp.run(future::lazy(move || { + tokio::executor::current_thread::spawn(future::lazy(move || { + tx.send(1).unwrap(); + Ok(()) + })); + + rx.map_err(|_| panic!()) + .and_then(|v| { + assert_eq!(v, 1); + Ok(()) + }) + })).unwrap(); +} + +#[test] +fn tokio_spawn_from_fut() { + drop(env_logger::init()); + let mut lp = Core::new().unwrap(); + + let (tx1, rx1) = oneshot::channel(); + + lp.run(future::lazy(|| { + tokio::spawn(future::lazy(|| { + tx1.send(1).unwrap(); + Ok(()) + })); + Ok::<_, ()>(()) + })).unwrap(); + + assert_eq!(lp.run(rx1).unwrap(), 1); +} + #[test] fn simple_core_poll() { drop(env_logger::init()); @@ -79,6 +140,26 @@ fn spawn_in_poll() { assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } +#[test] +fn spawn_in_poll2() { + drop(env_logger::init()); + let mut lp = Core::new().unwrap(); + + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + lp.handle().spawn(future::lazy(move || { + tx1.send(1).unwrap(); + tokio::spawn(future::lazy(|| { + tx2.send(2).unwrap(); + Ok(()) + })); + + Ok(()) + })); + + assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); +} + #[test] fn drop_timeout_in_spawn() { drop(env_logger::init()); diff --git a/third_party/rust/tokio-core/tests/tcp.rs b/third_party/rust/tokio-core/tests/tcp.rs index 929c5962ede4..d0b9a9ce7b5e 100644 --- a/third_party/rust/tokio-core/tests/tcp.rs +++ b/third_party/rust/tokio-core/tests/tcp.rs @@ -36,6 +36,24 @@ fn connect() { assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); } +#[test] +fn connect2() { + drop(env_logger::init()); + let mut l = t!(Core::new()); + let srv = t!(net::TcpListener::bind("127.0.0.1:0")); + let addr = t!(srv.local_addr()); + let t = thread::spawn(move || { + t!(srv.accept()).0 + }); + + let stream = TcpStream::connect2(&addr); + let mine = t!(l.run(stream)); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} + #[test] fn accept() { drop(env_logger::init()); @@ -83,3 +101,28 @@ fn accept2() { mine.unwrap(); t.join().unwrap(); } + +#[test] +fn accept_2() { + drop(env_logger::init()); + let mut l = t!(Core::new()); + let srv = t!(TcpListener::bind2(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let (tx, rx) = channel(); + let client = srv.incoming().map(move |t| { + tx.send(()).unwrap(); + t.0 + }).into_future().map_err(|e| e.0); + assert!(rx.try_recv().is_err()); + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap() + }); + + let (mine, _remaining) = t!(l.run(client)); + let mine = mine.unwrap(); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} diff --git a/third_party/rust/tokio-core/tests/timeout.rs b/third_party/rust/tokio-core/tests/timeout.rs index 50cebf052d9a..c1e22417cc8c 100644 --- a/third_party/rust/tokio-core/tests/timeout.rs +++ b/third_party/rust/tokio-core/tests/timeout.rs @@ -18,8 +18,8 @@ fn smoke() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); - let timeout = t!(Timeout::new(dur, &l.handle())); let start = Instant::now(); + let timeout = t!(Timeout::new(dur, &l.handle())); t!(l.run(timeout)); assert!(start.elapsed() >= (dur / 2)); } diff --git a/third_party/rust/tokio-core/tests/udp.rs b/third_party/rust/tokio-core/tests/udp.rs index c5b0ef57dbd0..7c6627a9be3b 100644 --- a/third_party/rust/tokio-core/tests/udp.rs +++ b/third_party/rust/tokio-core/tests/udp.rs @@ -5,8 +5,8 @@ extern crate tokio_core; use std::io; use std::net::SocketAddr; -use futures::{Future, Poll}; -use tokio_core::net::UdpSocket; +use futures::{Future, Poll, Stream, Sink}; +use tokio_core::net::{UdpSocket, UdpCodec}; use tokio_core::reactor::Core; macro_rules! t { @@ -16,50 +16,232 @@ macro_rules! t { }) } -#[test] -fn send_messages() { +fn send_messages(send: S, recv: R) { let mut l = t!(Core::new()); - let a = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); - let b = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); + let mut a = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into(), &l.handle())); + let mut b = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into(), &l.handle())); let a_addr = t!(a.local_addr()); let b_addr = t!(b.local_addr()); - let send = SendMessage { socket: a, addr: b_addr }; - let recv = RecvMessage { socket: b, expected_addr: a_addr }; - t!(l.run(send.join(recv))); + { + let send = SendMessage::new(a, send.clone(), b_addr, b"1234"); + let recv = RecvMessage::new(b, recv.clone(), a_addr, b"1234"); + let (sendt, received) = t!(l.run(send.join(recv))); + a = sendt; + b = received; + } + + { + let send = SendMessage::new(a, send, b_addr, b""); + let recv = RecvMessage::new(b, recv, a_addr, b""); + t!(l.run(send.join(recv))); + } } -struct SendMessage { - socket: UdpSocket, +#[test] +fn send_to_and_recv_from() { + send_messages(SendTo {}, RecvFrom {}); +} + +#[test] +fn send_and_recv() { + send_messages(Send {}, Recv {}); +} + +trait SendFn { + fn send(&self, &UdpSocket, &[u8], &SocketAddr) -> Result; +} + +#[derive(Debug, Clone)] +struct SendTo {} + +impl SendFn for SendTo { + fn send(&self, socket: &UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { + socket.send_to(buf, addr) + } +} + +#[derive(Debug, Clone)] +struct Send {} + +impl SendFn for Send { + fn send(&self, socket: &UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { + socket.connect(addr).expect("could not connect"); + socket.send(buf) + } +} + +struct SendMessage { + socket: Option, + send: S, addr: SocketAddr, + data: &'static [u8], } -impl Future for SendMessage { - type Item = (); - type Error = io::Error; - - fn poll(&mut self) -> Poll<(), io::Error> { - let n = try_nb!(self.socket.send_to(b"1234", &self.addr)); - assert_eq!(n, 4); - Ok(().into()) +impl SendMessage { + fn new(socket: UdpSocket, send: S, addr: SocketAddr, data: &'static [u8]) -> SendMessage { + SendMessage { + socket: Some(socket), + send: send, + addr: addr, + data: data, + } } } -struct RecvMessage { - socket: UdpSocket, +impl Future for SendMessage { + type Item = UdpSocket; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let n = try_nb!(self.send.send(self.socket.as_ref().unwrap(), &self.data[..], &self.addr)); + + assert_eq!(n, self.data.len()); + + Ok(self.socket.take().unwrap().into()) + } +} + +trait RecvFn { + fn recv(&self, &UdpSocket, &mut [u8], &SocketAddr) -> Result; +} + +#[derive(Debug, Clone)] +struct RecvFrom {} + +impl RecvFn for RecvFrom { + fn recv(&self, socket: &UdpSocket, buf: &mut [u8], + expected_addr: &SocketAddr) -> Result { + socket.recv_from(buf).map(|(s, addr)| { + assert_eq!(addr, *expected_addr); + s + }) + } +} + +#[derive(Debug, Clone)] +struct Recv {} + +impl RecvFn for Recv { + fn recv(&self, socket: &UdpSocket, buf: &mut [u8], _: &SocketAddr) -> Result { + socket.recv(buf) + } +} + +struct RecvMessage { + socket: Option, + recv: R, expected_addr: SocketAddr, + expected_data: &'static [u8], } -impl Future for RecvMessage { - type Item = (); - type Error = io::Error; - - fn poll(&mut self) -> Poll<(), io::Error> { - let mut buf = [0; 32]; - let (n, addr) = try_nb!(self.socket.recv_from(&mut buf)); - assert_eq!(n, 4); - assert_eq!(&buf[..4], b"1234"); - assert_eq!(addr, self.expected_addr); - Ok(().into()) +impl RecvMessage { + fn new(socket: UdpSocket, recv: R, expected_addr: SocketAddr, + expected_data: &'static [u8]) -> RecvMessage { + RecvMessage { + socket: Some(socket), + recv: recv, + expected_addr: expected_addr, + expected_data: expected_data, + } } } + +impl Future for RecvMessage { + type Item = UdpSocket; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let mut buf = vec![0u8; 10 + self.expected_data.len() * 10]; + let n = try_nb!(self.recv.recv(&self.socket.as_ref().unwrap(), &mut buf[..], + &self.expected_addr)); + + assert_eq!(n, self.expected_data.len()); + assert_eq!(&buf[..self.expected_data.len()], &self.expected_data[..]); + + Ok(self.socket.take().unwrap().into()) + } +} + +#[test] +fn send_dgrams() { + let mut l = t!(Core::new()); + let mut a = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); + let mut b = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); + let mut buf = [0u8; 50]; + let b_addr = t!(b.local_addr()); + + { + let send = a.send_dgram(&b"4321"[..], b_addr); + let recv = b.recv_dgram(&mut buf[..]); + let (sendt, received) = t!(l.run(send.join(recv))); + assert_eq!(received.2, 4); + assert_eq!(&received.1[..4], b"4321"); + a = sendt.0; + b = received.0; + } + + { + let send = a.send_dgram(&b""[..], b_addr); + let recv = b.recv_dgram(&mut buf[..]); + let received = t!(l.run(send.join(recv))).1; + assert_eq!(received.2, 0); + } +} + +#[derive(Debug, Clone)] +struct Codec { + data: &'static [u8], + from: SocketAddr, + to: SocketAddr, +} + +impl UdpCodec for Codec { + type In = (); + type Out = &'static [u8]; + + fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result { + assert_eq!(src, &self.from); + assert_eq!(buf, self.data); + Ok(()) + } + + fn encode(&mut self, msg: Self::Out, buf: &mut Vec) -> SocketAddr { + assert_eq!(msg, self.data); + buf.extend_from_slice(msg); + self.to + } +} + +#[test] +fn send_framed() { + let mut l = t!(Core::new()); + let mut a_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); + let mut b_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); + let a_addr = t!(a_soc.local_addr()); + let b_addr = t!(b_soc.local_addr()); + + { + let a = a_soc.framed(Codec { data: &b"4567"[..], from: a_addr, to: b_addr}); + let b = b_soc.framed(Codec { data: &b"4567"[..], from: a_addr, to: b_addr}); + + let send = a.send(&b"4567"[..]); + let recv = b.into_future().map_err(|e| e.0); + let (sendt, received) = t!(l.run(send.join(recv))); + assert_eq!(received.0, Some(())); + + a_soc = sendt.into_inner(); + b_soc = received.1.into_inner(); + } + + { + let a = a_soc.framed(Codec { data: &b""[..], from: a_addr, to: b_addr}); + let b = b_soc.framed(Codec { data: &b""[..], from: a_addr, to: b_addr}); + + let send = a.send(&b""[..]); + let recv = b.into_future().map_err(|e| e.0); + let received = t!(l.run(send.join(recv))).1; + assert_eq!(received.0, Some(())); + } +} + diff --git a/third_party/rust/tokio-executor/.cargo-checksum.json b/third_party/rust/tokio-executor/.cargo-checksum.json new file mode 100644 index 000000000000..17ad9cf25a45 --- /dev/null +++ b/third_party/rust/tokio-executor/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"93011402fabbdbb889cc773a80955db34c328c45932d78b27c61c69a4b39af47","Cargo.toml":"b1393748c39eb5c392a77a81818751e0e575dee8466ee36638f69db53b5f4cc9","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"d44d2ddf3859e108f0cf747160f498e648ab5994addf2413935aa8f63f88c3fb","src/enter.rs":"1ae1c57afedec5935ae6bf31f5349dceb426dcf3d937024e5c36dab1296c9228","src/global.rs":"86fc1779b34e0a51f50f21e04c338a044d166b276ef858c8dc82801521e8fc3b","src/lib.rs":"eef29b494ffebf50c557d2f2cfcbe70b9cd9363086941655c54209bb2d397c99","src/park.rs":"6f4af9815a76c72593b83e1e0d0f2106bf261ad9f8334a2ad95c80c80e3af073","tests/executor.rs":"43f618a461e0c02cefe2661ce6597c0ff387a20d657c1c6b6bce8348a14ffc96"},"package":"424f0c87ecd66b863045d84e384cb7ce0ae384d8b065b9f0363d29c0d1b30b2f"} \ No newline at end of file diff --git a/third_party/rust/tokio-executor/CHANGELOG.md b/third_party/rust/tokio-executor/CHANGELOG.md new file mode 100644 index 000000000000..3fb039a78253 --- /dev/null +++ b/third_party/rust/tokio-executor/CHANGELOG.md @@ -0,0 +1,19 @@ +# 0.1.3 (August 6, 2018) + +* Implement `Executor` for `Box` (#420). +* Improve `EnterError` debug message (#410). +* Implement `status`, `Send`, and `Sync` for `DefaultExecutor` (#463, #472). +* Fix race in `ParkThread` (#507). +* Handle recursive calls into `DefaultExecutor` (#473). + +# 0.1.2 (March 30, 2018) + +* Implement `Unpark` for `Box`. + +# 0.1.1 (March 22, 2018) + +* Optionally support futures 0.2. + +# 0.1.0 (March 09, 2018) + +* Initial release diff --git a/third_party/rust/tokio-executor/Cargo.toml b/third_party/rust/tokio-executor/Cargo.toml new file mode 100644 index 000000000000..73b223cca142 --- /dev/null +++ b/third_party/rust/tokio-executor/Cargo.toml @@ -0,0 +1,25 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-executor" +version = "0.1.3" +authors = ["Carl Lerche "] +description = "Future execution primitives\n" +homepage = "https://github.com/tokio-rs/tokio" +documentation = "https://docs.rs/tokio-executor" +keywords = ["futures", "tokio"] +categories = ["concurrency", "asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.futures] +version = "0.1.19" diff --git a/third_party/rust/tokio-executor/LICENSE b/third_party/rust/tokio-executor/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-executor/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-executor/README.md b/third_party/rust/tokio-executor/README.md new file mode 100644 index 000000000000..988a46b4e1b7 --- /dev/null +++ b/third_party/rust/tokio-executor/README.md @@ -0,0 +1,47 @@ +# tokio-executor + +Task execution related traits and utilities. + +[Documentation](https://tokio-rs.github.io/tokio/tokio_executor/) + +## Overview + +In the Tokio execution model, futures are lazy. When a future is created, no +work is performed. In order for the work defined by the future to happen, the +future must be submitted to an executor. A future that is submitted to an +executor is called a "task". + +The executor is responsible for ensuring that [`Future::poll`] is called +whenever the task is [notified]. Notification happens when the internal state of +a task transitions from "not ready" to ready. For example, a socket might have +received data and a call to `read` will now be able to succeed. + +This crate provides traits and utilities that are necessary for building an +executor, including: + +* The [`Executor`] trait describes the API for spawning a future onto an + executor. + +* [`enter`] marks that the the current thread is entering an execution + context. This prevents a second executor from accidentally starting from + within the context of one that is already running. + +* [`DefaultExecutor`] spawns tasks onto the default executor for the current + context. + +* [`Park`] abstracts over blocking and unblocking the current thread. + +[`Executor`]: https://tokio-rs.github.io/tokio/tokio_executor/trait.Executor.html +[`enter`]: https://tokio-rs.github.io/tokio/tokio_executor/fn.enter.html +[`DefaultExecutor`]: https://tokio-rs.github.io/tokio/tokio_executor/struct.DefaultExecutor.html +[`Park`]: https://tokio-rs.github.io/tokio/tokio_executor/park/index.html + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-executor/src/enter.rs b/third_party/rust/tokio-executor/src/enter.rs new file mode 100644 index 000000000000..3da0d796bed8 --- /dev/null +++ b/third_party/rust/tokio-executor/src/enter.rs @@ -0,0 +1,113 @@ +use std::prelude::v1::*; +use std::cell::Cell; +use std::fmt; + +#[cfg(feature = "unstable-futures")] +use futures2; + +thread_local!(static ENTERED: Cell = Cell::new(false)); + +/// Represents an executor context. +/// +/// For more details, see [`enter` documentation](fn.enter.html) +pub struct Enter { + on_exit: Vec>, + permanent: bool, + + #[cfg(feature = "unstable-futures")] + _enter2: futures2::executor::Enter, +} + +/// An error returned by `enter` if an execution scope has already been +/// entered. +pub struct EnterError { + _a: (), +} + +impl fmt::Debug for EnterError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EnterError") + .field("reason", &"attempted to run an executor while another executor is already running") + .finish() + } +} + +/// Marks the current thread as being within the dynamic extent of an +/// executor. +/// +/// Executor implementations should call this function before blocking the +/// thread. If `None` is returned, the executor should fail by panicking or +/// taking some other action without blocking the current thread. This prevents +/// deadlocks due to multiple executors competing for the same thread. +/// +/// # Error +/// +/// Returns an error if the current thread is already marked +pub fn enter() -> Result { + ENTERED.with(|c| { + if c.get() { + Err(EnterError { _a: () }) + } else { + c.set(true); + + Ok(Enter { + on_exit: Vec::new(), + permanent: false, + + #[cfg(feature = "unstable-futures")] + _enter2: futures2::executor::enter().unwrap(), + }) + } + }) +} + +impl Enter { + /// Register a callback to be invoked if and when the thread + /// ceased to act as an executor. + pub fn on_exit(&mut self, f: F) where F: FnOnce() + 'static { + self.on_exit.push(Box::new(f)); + } + + /// Treat the remainder of execution on this thread as part of an + /// executor; used mostly for thread pool worker threads. + /// + /// All registered `on_exit` callbacks are *dropped* without being + /// invoked. + pub fn make_permanent(mut self) { + self.permanent = true; + } +} + +impl fmt::Debug for Enter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Enter").finish() + } +} + +impl Drop for Enter { + fn drop(&mut self) { + ENTERED.with(|c| { + assert!(c.get()); + + if self.permanent { + return + } + + for callback in self.on_exit.drain(..) { + callback.call(); + } + + c.set(false); + }); + } +} + +trait Callback: 'static { + fn call(self: Box); +} + +impl Callback for F { + fn call(self: Box) { + (*self)() + } +} diff --git a/third_party/rust/tokio-executor/src/global.rs b/third_party/rust/tokio-executor/src/global.rs new file mode 100644 index 000000000000..ca7dbcfaf616 --- /dev/null +++ b/third_party/rust/tokio-executor/src/global.rs @@ -0,0 +1,224 @@ +use super::{Executor, Enter, SpawnError}; + +use futures::Future; + +use std::cell::Cell; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Executes futures on the default executor for the current execution context. +/// +/// `DefaultExecutor` implements `Executor` and can be used to spawn futures +/// without referencing a specific executor. +/// +/// When an executor starts, it sets the `DefaultExecutor` handle to point to an +/// executor (usually itself) that is used to spawn new tasks. +/// +/// The current `DefaultExecutor` reference is tracked using a thread-local +/// variable and is set using `tokio_executor::with_default` +#[derive(Debug, Clone)] +pub struct DefaultExecutor { + _dummy: (), +} + +impl DefaultExecutor { + /// Returns a handle to the default executor for the current context. + /// + /// Futures may be spawned onto the default executor using this handle. + /// + /// The returned handle will reference whichever executor is configured as + /// the default **at the time `spawn` is called**. This enables + /// `DefaultExecutor::current()` to be called before an execution context is + /// setup, then passed **into** an execution context before it is used. + /// + /// This is also true for sending the handle across threads, so calling + /// `DefaultExecutor::current()` on thread A and then sending the result to + /// thread B will _not_ reference the default executor that was set on thread A. + pub fn current() -> DefaultExecutor { + DefaultExecutor { + _dummy: (), + } + } + + #[inline] + fn with_current R, R>(f: F) -> Option { + EXECUTOR.with(|current_executor| { + match current_executor.replace(State::Active) { + State::Ready(executor_ptr) => { + let executor = unsafe { &mut *executor_ptr }; + let result = f(executor); + current_executor.set(State::Ready(executor_ptr)); + Some(result) + }, + State::Empty | State::Active => None, + } + }) + } +} + +#[derive(Clone, Copy)] +enum State { + // default executor not defined + Empty, + // default executor is defined and ready to be used + Ready(*mut Executor), + // default executor is currently active (used to detect recursive calls) + Active +} + +/// Thread-local tracking the current executor +thread_local!(static EXECUTOR: Cell = Cell::new(State::Empty)); + +// ===== impl DefaultExecutor ===== + +impl super::Executor for DefaultExecutor { + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + DefaultExecutor::with_current(|executor| executor.spawn(future)) + .unwrap_or_else(|| Err(SpawnError::shutdown())) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, future: Box + Send>) + -> Result<(), futures2::executor::SpawnError> + { + DefaultExecutor::with_current(|executor| executor.spawn2(future)) + .unwrap_or_else(|| Err(futures2::executor::SpawnError::shutdown())) + } + + fn status(&self) -> Result<(), SpawnError> { + DefaultExecutor::with_current(|executor| executor.status()) + .unwrap_or_else(|| Err(SpawnError::shutdown())) + } +} + +// ===== global spawn fns ===== + +/// Submits a future for execution on the default executor -- usually a +/// threadpool. +/// +/// Futures are lazy constructs. When they are defined, no work happens. In +/// order for the logic defined by the future to be run, the future must be +/// spawned on an executor. This function is the easiest way to do so. +/// +/// This function must be called from an execution context, i.e. from a future +/// that has been already spawned onto an executor. +/// +/// Once spawned, the future will execute. The details of how that happens is +/// left up to the executor instance. If the executor is a thread pool, the +/// future will be pushed onto a queue that a worker thread polls from. If the +/// executor is a "current thread" executor, the future might be polled +/// immediately from within the call to `spawn` or it might be pushed onto an +/// internal queue. +/// +/// # Panics +/// +/// This function will panic if the default executor is not set or if spawning +/// onto the default executor returns an error. To avoid the panic, use the +/// `DefaultExecutor` handle directly. +/// +/// # Examples +/// +/// ```rust +/// # extern crate futures; +/// # extern crate tokio_executor; +/// # use tokio_executor::spawn; +/// # pub fn dox() { +/// use futures::future::lazy; +/// +/// spawn(lazy(|| { +/// println!("running on the default executor"); +/// Ok(()) +/// })); +/// # } +/// # pub fn main() {} +/// ``` +pub fn spawn(future: T) + where T: Future + Send + 'static, +{ + DefaultExecutor::current().spawn(Box::new(future)) + .unwrap() +} + +/// Like `spawn` but compatible with futures 0.2 +#[cfg(feature = "unstable-futures")] +pub fn spawn2(future: T) + where T: futures2::Future + Send + 'static, +{ + DefaultExecutor::current().spawn2(Box::new(future)) + .unwrap() +} + +/// Set the default executor for the duration of the closure +/// +/// # Panics +/// +/// This function panics if there already is a default executor set. +pub fn with_default(executor: &mut T, enter: &mut Enter, f: F) -> R +where T: Executor, + F: FnOnce(&mut Enter) -> R +{ + EXECUTOR.with(|cell| { + match cell.get() { + State::Ready(_) | State::Active => + panic!("default executor already set for execution context"), + _ => {} + } + + // Ensure that the executor is removed from the thread-local context + // when leaving the scope. This handles cases that involve panicking. + struct Reset<'a>(&'a Cell); + + impl<'a> Drop for Reset<'a> { + fn drop(&mut self) { + self.0.set(State::Empty); + } + } + + let _reset = Reset(cell); + + // While scary, this is safe. The function takes a + // `&mut Executor`, which guarantees that the reference lives for the + // duration of `with_default`. + // + // Because we are always clearing the TLS value at the end of the + // function, we can cast the reference to 'static which thread-local + // cells require. + let executor = unsafe { hide_lt(executor as &mut _ as *mut _) }; + + cell.set(State::Ready(executor)); + + f(enter) + }) +} + +unsafe fn hide_lt<'a>(p: *mut (Executor + 'a)) -> *mut (Executor + 'static) { + use std::mem; + mem::transmute(p) +} + +#[cfg(test)] +mod tests { + use super::{Executor, DefaultExecutor, with_default}; + + #[test] + fn default_executor_is_send_and_sync() { + fn assert_send_sync() {} + + assert_send_sync::(); + } + + #[test] + fn nested_default_executor_status() { + let mut enter = super::super::enter().unwrap(); + let mut executor = DefaultExecutor::current(); + + let result = with_default(&mut executor, &mut enter, |_| { + DefaultExecutor::current().status() + }); + + assert!(result.err().unwrap().is_shutdown()) + } +} diff --git a/third_party/rust/tokio-executor/src/lib.rs b/third_party/rust/tokio-executor/src/lib.rs new file mode 100644 index 000000000000..e4a886907244 --- /dev/null +++ b/third_party/rust/tokio-executor/src/lib.rs @@ -0,0 +1,240 @@ +//! Task execution related traits and utilities. +//! +//! In the Tokio execution model, futures are lazy. When a future is created, no +//! work is performed. In order for the work defined by the future to happen, +//! the future must be submitted to an executor. A future that is submitted to +//! an executor is called a "task". +//! +//! The executor is responsible for ensuring that [`Future::poll`] is called +//! whenever the task is notified. Notification happens when the internal +//! state of a task transitions from *not ready* to *ready*. For example, a +//! socket might have received data and a call to `read` will now be able to +//! succeed. +//! +//! This crate provides traits and utilities that are necessary for building an +//! executor, including: +//! +//! * The [`Executor`] trait describes the API for spawning a future onto an +//! executor. +//! +//! * [`enter`] marks that the the current thread is entering an execution +//! context. This prevents a second executor from accidentally starting from +//! within the context of one that is already running. +//! +//! * [`DefaultExecutor`] spawns tasks onto the default executor for the current +//! context. +//! +//! * [`Park`] abstracts over blocking and unblocking the current thread. +//! +//! [`Executor`]: trait.Executor.html +//! [`enter`]: fn.enter.html +//! [`DefaultExecutor`]: struct.DefaultExecutor.html +//! [`Park`]: park/index.html +//! [`Future::poll`]: https://docs.rs/futures/0.1/futures/future/trait.Future.html#tymethod.poll + +#![deny(missing_docs, missing_debug_implementations, warnings)] +#![doc(html_root_url = "https://docs.rs/tokio-executor/0.1.3")] + +extern crate futures; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +mod enter; +mod global; +pub mod park; + +pub use enter::{enter, Enter, EnterError}; +pub use global::{spawn, with_default, DefaultExecutor}; + +#[cfg(feature = "unstable-futures")] +pub use global::spawn2; + +use futures::Future; + +/// A value that executes futures. +/// +/// The [`spawn`] function is used to submit a future to an executor. Once +/// submitted, the executor takes ownership of the future and becomes +/// responsible for driving the future to completion. +/// +/// The strategy employed by the executor to handle the future is less defined +/// and is left up to the `Executor` implementation. The `Executor` instance is +/// expected to call [`poll`] on the future once it has been notified, however +/// the "when" and "how" can vary greatly. +/// +/// For example, the executor might be a thread pool, in which case a set of +/// threads have already been spawned up and the future is inserted into a +/// queue. A thread will acquire the future and poll it. +/// +/// The `Executor` trait is only for futures that **are** `Send`. These are most +/// common. There currently is no trait that describes executors that operate +/// entirely on the current thread (i.e., are able to spawn futures that are not +/// `Send`). Note that single threaded executors can still implement `Executor`, +/// but only futures that are `Send` can be spawned via the trait. +/// +/// # Errors +/// +/// The [`spawn`] function returns `Result` with an error type of `SpawnError`. +/// This error type represents the reason that the executor was unable to spawn +/// the future. The two current represented scenarios are: +/// +/// * An executor being at capacity or full. As such, the executor is not able +/// to accept a new future. This error state is expected to be transient. +/// * An executor has been shutdown and can no longer accept new futures. This +/// error state is expected to be permanent. +/// +/// If a caller encounters an at capacity error, the caller should try to shed +/// load. This can be as simple as dropping the future that was spawned. +/// +/// If the caller encounters a shutdown error, the caller should attempt to +/// gracefully shutdown. +/// +/// # Examples +/// +/// ```rust +/// # extern crate futures; +/// # extern crate tokio_executor; +/// # use tokio_executor::Executor; +/// # fn docs(my_executor: &mut Executor) { +/// use futures::future::lazy; +/// my_executor.spawn(Box::new(lazy(|| { +/// println!("running on the executor"); +/// Ok(()) +/// }))).unwrap(); +/// # } +/// # fn main() {} +/// ``` +/// +/// [`spawn`]: #tymethod.spawn +/// [`poll`]: https://docs.rs/futures/0.1/futures/future/trait.Future.html#tymethod.poll +pub trait Executor { + /// Spawns a future object to run on this executor. + /// + /// `future` is passed to the executor, which will begin running it. The + /// future may run on the current thread or another thread at the discretion + /// of the `Executor` implementation. + /// + /// # Panics + /// + /// Implementors are encouraged to avoid panics. However, a panic is + /// permitted and the caller should check the implementation specific + /// documentation for more details on possible panics. + /// + /// # Examples + /// + /// ```rust + /// # extern crate futures; + /// # extern crate tokio_executor; + /// # use tokio_executor::Executor; + /// # fn docs(my_executor: &mut Executor) { + /// use futures::future::lazy; + /// my_executor.spawn(Box::new(lazy(|| { + /// println!("running on the executor"); + /// Ok(()) + /// }))).unwrap(); + /// # } + /// # fn main() {} + /// ``` + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError>; + + /// Like `spawn`, but compatible with futures 0.2 + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, future: Box + Send>) + -> Result<(), futures2::executor::SpawnError>; + + /// Provides a best effort **hint** to whether or not `spawn` will succeed. + /// + /// This function may return both false positives **and** false negatives. + /// If `status` returns `Ok`, then a call to `spawn` will *probably* + /// succeed, but may fail. If `status` returns `Err`, a call to `spawn` will + /// *probably* fail, but may succeed. + /// + /// This allows a caller to avoid creating the task if the call to `spawn` + /// has a high likelihood of failing. + /// + /// # Panics + /// + /// This function must not panic. Implementors must ensure that panics do + /// not happen. + /// + /// # Examples + /// + /// ```rust + /// # extern crate futures; + /// # extern crate tokio_executor; + /// # use tokio_executor::Executor; + /// # fn docs(my_executor: &mut Executor) { + /// use futures::future::lazy; + /// + /// if my_executor.status().is_ok() { + /// my_executor.spawn(Box::new(lazy(|| { + /// println!("running on the executor"); + /// Ok(()) + /// }))).unwrap(); + /// } else { + /// println!("the executor is not in a good state"); + /// } + /// # } + /// # fn main() {} + /// ``` + fn status(&self) -> Result<(), SpawnError> { + Ok(()) + } +} + +impl Executor for Box { + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + (**self).spawn(future) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, future: Box + Send>) + -> Result<(), futures2::executor::SpawnError> + { + (**self).spawn2(future) + } + + fn status(&self) -> Result<(), SpawnError> { + (**self).status() + } +} + +/// Errors returned by `Executor::spawn`. +/// +/// Spawn errors should represent relatively rare scenarios. Currently, the two +/// scenarios represented by `SpawnError` are: +/// +/// * An executor being at capacity or full. As such, the executor is not able +/// to accept a new future. This error state is expected to be transient. +/// * An executor has been shutdown and can no longer accept new futures. This +/// error state is expected to be permanent. +#[derive(Debug)] +pub struct SpawnError { + is_shutdown: bool, +} + +impl SpawnError { + /// Return a new `SpawnError` reflecting a shutdown executor failure. + pub fn shutdown() -> Self { + SpawnError { is_shutdown: true } + } + + /// Return a new `SpawnError` reflecting an executor at capacity failure. + pub fn at_capacity() -> Self { + SpawnError { is_shutdown: false } + } + + /// Returns `true` if the error reflects a shutdown executor failure. + pub fn is_shutdown(&self) -> bool { + self.is_shutdown + } + + /// Returns `true` if the error reflects an executor at capacity failure. + pub fn is_at_capacity(&self) -> bool { + !self.is_shutdown + } +} diff --git a/third_party/rust/tokio-executor/src/park.rs b/third_party/rust/tokio-executor/src/park.rs new file mode 100644 index 000000000000..4f278fdd3af8 --- /dev/null +++ b/third_party/rust/tokio-executor/src/park.rs @@ -0,0 +1,302 @@ +//! Abstraction over blocking and unblocking the current thread. +//! +//! Provides an abstraction over blocking the current thread. This is similar to +//! the park / unpark constructs provided by [`std`] but made generic. This +//! allows embedding custom functionality to perform when the thread is blocked. +//! +//! A blocked [`Park`][p] instance is unblocked by calling [`unpark`] on its +//! [`Unpark`][up] handle. +//! +//! The [`ParkThread`] struct implements [`Park`][p] using +//! [`thread::park`][`std`] to put the thread to sleep. The Tokio reactor also +//! implements park, but uses [`mio::Poll`][mio] to block the thread instead. +//! +//! The [`Park`][p] trait is composable. A timer implementation might decorate a +//! [`Park`][p] implementation by checking if any timeouts have elapsed after +//! the inner [`Park`][p] implementation unblocks. +//! +//! # Model +//! +//! Conceptually, each [`Park`][p] instance has an associated token, which is +//! initially not present: +//! +//! * The [`park`] method blocks the current thread unless or until the token +//! is available, at which point it atomically consumes the token. +//! * The [`unpark`] method atomically makes the token available if it wasn't +//! already. +//! +//! Some things to note: +//! +//! * If [`unpark`] is called before [`park`], the next call to [`park`] will +//! **not** block the thread. +//! * **Spurious** wakeups are permitted, i.e., the [`park`] method may unblock +//! even if [`unpark`] was not called. +//! * [`park_timeout`] does the same as [`park`] but allows specifying a maximum +//! time to block the thread for. +//! +//! [`std`]: https://doc.rust-lang.org/std/thread/fn.park.html +//! [`thread::park`]: https://doc.rust-lang.org/std/thread/fn.park.html +//! [`ParkThread`]: struct.ParkThread.html +//! [p]: trait.Park.html +//! [`park`]: trait.Park.html#tymethod.park +//! [`park_timeout`]: trait.Park.html#tymethod.park_timeout +//! [`unpark`]: trait.Unpark.html#tymethod.unpark +//! [up]: trait.Unpark.html +//! [mio]: https://docs.rs/mio/0.6/mio/struct.Poll.html + +use std::marker::PhantomData; +use std::rc::Rc; +use std::sync::{Arc, Mutex, Condvar}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::Duration; + +/// Block the current thread. +/// +/// See [module documentation][mod] for more details. +/// +/// [mod]: ../index.html +pub trait Park { + /// Unpark handle type for the `Park` implementation. + type Unpark: Unpark; + + /// Error returned by `park` + type Error; + + /// Get a new `Unpark` handle associated with this `Park` instance. + fn unpark(&self) -> Self::Unpark; + + /// Block the current thread unless or until the token is available. + /// + /// A call to `park` does not guarantee that the thread will remain blocked + /// forever, and callers should be prepared for this possibility. This + /// function may wakeup spuriously for any reason. + /// + /// See [module documentation][mod] for more details. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Park` implementation + /// + /// [mod]: ../index.html + fn park(&mut self) -> Result<(), Self::Error>; + + /// Park the current thread for at most `duration`. + /// + /// This function is the same as `park` but allows specifying a maximum time + /// to block the thread for. + /// + /// Same as `park`, there is no guarantee that the thread will remain + /// blocked for any amount of time. Spurious wakeups are permitted for any + /// reason. + /// + /// See [module documentation][mod] for more details. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Park` implementation + /// + /// [mod]: ../index.html + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error>; +} + +/// Unblock a thread blocked by the associated [`Park`] instance. +/// +/// See [module documentation][mod] for more details. +/// +/// [mod]: ../index.html +/// [`Park`]: trait.Park.html +pub trait Unpark: Sync + Send + 'static { + /// Unblock a thread that is blocked by the associated `Park` handle. + /// + /// Calling `unpark` atomically makes available the unpark token, if it is + /// not already available. + /// + /// See [module documentation][mod] for more details. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Unpark` implementation + /// + /// [mod]: ../index.html + fn unpark(&self); +} + +impl Unpark for Box { + fn unpark(&self) { + (**self).unpark() + } +} + +/// Blocks the current thread using a condition variable. +/// +/// Implements the [`Park`] functionality by using a condition variable. An +/// atomic variable is also used to avoid using the condition variable if +/// possible. +/// +/// The condition variable is cached in a thread-local variable and is shared +/// across all `ParkThread` instances created on the same thread. This also +/// means that an instance of `ParkThread` might be unblocked by a handle +/// associated with a different `ParkThread` instance. +#[derive(Debug)] +pub struct ParkThread { + _anchor: PhantomData>, +} + +/// Error returned by [`ParkThread`] +/// +/// This currently is never returned, but might at some point in the future. +/// +/// [`ParkThread`]: struct.ParkThread.html +#[derive(Debug)] +pub struct ParkError { + _p: (), +} + +/// Unblocks a thread that was blocked by `ParkThread`. +#[derive(Clone, Debug)] +pub struct UnparkThread { + inner: Arc, +} + +#[derive(Debug)] +struct Inner { + state: AtomicUsize, + mutex: Mutex<()>, + condvar: Condvar, +} + +const IDLE: usize = 0; +const NOTIFY: usize = 1; +const SLEEP: usize = 2; + +thread_local! { + static CURRENT_PARK_THREAD: Arc = Arc::new(Inner { + state: AtomicUsize::new(IDLE), + mutex: Mutex::new(()), + condvar: Condvar::new(), + }); +} + +// ===== impl ParkThread ===== + +impl ParkThread { + /// Create a new `ParkThread` handle for the current thread. + /// + /// This type cannot be moved to other threads, so it should be created on + /// the thread that the caller intends to park. + pub fn new() -> ParkThread { + ParkThread { + _anchor: PhantomData, + } + } + + /// Get a reference to the `ParkThread` handle for this thread. + fn with_current(&self, f: F) -> R + where F: FnOnce(&Arc) -> R, + { + CURRENT_PARK_THREAD.with(|inner| f(inner)) + } +} + +impl Park for ParkThread { + type Unpark = UnparkThread; + type Error = ParkError; + + fn unpark(&self) -> Self::Unpark { + let inner = self.with_current(|inner| inner.clone()); + UnparkThread { inner } + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.with_current(|inner| inner.park(None)) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.with_current(|inner| inner.park(Some(duration))) + } +} + +// ===== impl UnparkThread ===== + +impl Unpark for UnparkThread { + fn unpark(&self) { + self.inner.unpark(); + } +} + +// ===== impl Inner ===== + +impl Inner { + /// Park the current thread for at most `dur`. + fn park(&self, timeout: Option) -> Result<(), ParkError> { + // If currently notified, then we skip sleeping. This is checked outside + // of the lock to avoid acquiring a mutex if not necessary. + match self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) { + NOTIFY => return Ok(()), + IDLE => {}, + _ => unreachable!(), + } + + // The state is currently idle, so obtain the lock and then try to + // transition to a sleeping state. + let mut m = self.mutex.lock().unwrap(); + + // Transition to sleeping + match self.state.compare_and_swap(IDLE, SLEEP, Ordering::SeqCst) { + NOTIFY => { + // Notified before we could sleep, consume the notification and + // exit + self.state.store(IDLE, Ordering::SeqCst); + return Ok(()); + } + IDLE => {}, + _ => unreachable!(), + } + + m = match timeout { + Some(timeout) => self.condvar.wait_timeout(m, timeout).unwrap().0, + None => self.condvar.wait(m).unwrap(), + }; + + // Transition back to idle. If the state has transitioned to `NOTIFY`, + // this will consume that notification + self.state.store(IDLE, Ordering::SeqCst); + + // Explicitly drop the mutex guard. There is no real point in doing it + // except that I find it helpful to make it explicit where we want the + // mutex to unlock. + drop(m); + + Ok(()) + } + + fn unpark(&self) { + // First, try transitioning from IDLE -> NOTIFY, this does not require a + // lock. + match self.state.compare_and_swap(IDLE, NOTIFY, Ordering::SeqCst) { + IDLE | NOTIFY => return, + SLEEP => {} + _ => unreachable!(), + } + + // The other half is sleeping, this requires a lock + let _m = self.mutex.lock().unwrap(); + + // Transition to NOTIFY + match self.state.swap(NOTIFY, Ordering::SeqCst) { + SLEEP => {} + NOTIFY => return, + IDLE => return, + _ => unreachable!(), + } + + // Wakeup the sleeper + self.condvar.notify_one(); + } +} diff --git a/third_party/rust/tokio-executor/tests/executor.rs b/third_party/rust/tokio-executor/tests/executor.rs new file mode 100644 index 000000000000..77436ec92f17 --- /dev/null +++ b/third_party/rust/tokio-executor/tests/executor.rs @@ -0,0 +1,11 @@ +extern crate tokio_executor; +extern crate futures; + +use tokio_executor::*; +use futures::future::lazy; + +#[test] +fn spawn_out_of_executor_context() { + let res = DefaultExecutor::current().spawn(Box::new(lazy(|| Ok(())))); + assert!(res.is_err()); +} diff --git a/third_party/rust/tokio-fs/.cargo-checksum.json b/third_party/rust/tokio-fs/.cargo-checksum.json new file mode 100644 index 000000000000..7b7e36abd76d --- /dev/null +++ b/third_party/rust/tokio-fs/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"d07abfd675dfa12dda0f46b7ad56fc0f6da677a26a6cfa0531e623e103b68abd","Cargo.toml":"69399fbac7423aa14484b8e89cb8fd187ae7277a024378fd7d74fb7eff033f1c","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"2b714a9a29f09a4555b1ee7b7ef27bfd12522c98589eb94f59ef9c315700781b","examples/std-echo.rs":"66927bb61b29565deedbb4fe4a6271a624fed4baf5a5b41bb040059c71b2ad7b","src/create_dir.rs":"e2c4f913fa8d89d10bc2c549dc25c70ec1a119fc7e63909729c19d74a672a80d","src/create_dir_all.rs":"c873c34f00a7075cfeece7fdf76fc85e70871f776ff3811c6d3e8a6b84dd799a","src/file/create.rs":"ad7ef003a65a5bbcd16f28104f19bd7187fa3617c488dd0edb4070074919300b","src/file/metadata.rs":"bddcee02d00a05c272d5d9e2def359f51270d4a25be6630a9c52e3179ed192da","src/file/mod.rs":"fcfb0a882a08600be841d0a40d6746d6c82cf394de8b57e1c21803ea1f1bfcb4","src/file/open.rs":"5012c37d58048606e009bc4b7f7c5095167ce1f5c3dfc9a03e4b1762e83a6de3","src/file/open_options.rs":"0a6ae78653ae200b385b524fe5bbdf069f53a9d7fb087daa11f0bc28759dfc05","src/file/seek.rs":"c1cfe4a228b518e9a7fc3e09567b6ff290b116a7d717f99cdafcd2de13581411","src/hard_link.rs":"3cd1baeed52abc1cb45801d83872ce77df9ea8d037735829b07a1bea07e69fc1","src/lib.rs":"6b8573d70b4b7968d30964803791cf14dfcd4386a74a6170943b3c848f61ab5a","src/metadata.rs":"3882111af4cdbed2eec93ce7fa66b392867833bb55267015e4956d7a2aaa7442","src/os/mod.rs":"9165d5841d1503608049459154c9e2f020ec8d70dde6c00e25833db12addb572","src/os/unix.rs":"44588f808a79a99614812a181ad20d978a1db8b86bf2d95b4fbf77036f15f4ef","src/os/windows/mod.rs":"09c28e0926a2a99ed46fa53f878b1bd84ea73f522acbb40ca63983477922f365","src/os/windows/symlink_dir.rs":"cd41c3d31e9e1a17cf1c21a9c64741d02181242407410ebd65dc4ce81f5c4b8e","src/os/windows/symlink_file.rs":"b9cb16bd85ab90af67baafe6243615b8b3d82348771bd49956a232e9b97b0560","src/read_dir.rs":"138e9d05ec30326ad151b4c1cf1f134f8c3337d9fcf4f0175f51560eb6ee5b97","src/read_link.rs":"ad85fdee77a8ee2ffc1beb35a4a7e7571fc637e1a367d7cf3346ea67673f1edd","src/remove_dir.rs":"21e1e346b53f50d8e8a82c9143bba566a418e09eb13ba8d6c9a6d9155689355b","src/remove_file.rs":"9474d73a6f26ccd1b70da120d2751a1115cc5431983c72f69346b092993b5acc","src/rename.rs":"2eb8fdfcf5b38c059a12abe0443ff65d200b2b44a9b0130b774d417e8949e313","src/set_permissions.rs":"bde74d8d83f8113ef8a1883e1b09a67c26259ddf749f1e11ad2ab2c284222118","src/stderr.rs":"0a84ebdab07500a0b71c03076ccaaf511e093ef77af3b722a0f0acdf91b94034","src/stdin.rs":"ba7b0e4d08dc10743f1139a4aff13d74c564ae9e5dfef8a07388b4b9f75e2c3b","src/stdout.rs":"6f5e807c1bfc90655d364dd2a2d49f8895d86af9ad5cfdf8196a00bdf6d247fb","src/symlink_metadata.rs":"ce4345c5b35f48bcc40b0c0e7c84f3574540a044576fd9837f15724d266480de","tests/file.rs":"1b74b9e1ee6ab62d0db9d900ad497fd42464bb40bcce95453fad67969d9d1ddc"},"package":"b5cbe4ca6e71cb0b62a66e4e6f53a8c06a6eefe46cc5f665ad6f274c9906f135"} \ No newline at end of file diff --git a/third_party/rust/tokio-fs/CHANGELOG.md b/third_party/rust/tokio-fs/CHANGELOG.md new file mode 100644 index 000000000000..197d4a89377c --- /dev/null +++ b/third_party/rust/tokio-fs/CHANGELOG.md @@ -0,0 +1,18 @@ +# 0.1.3 (August 6, 2018) + +* Add async equivalents to most of `std::fs` (#494). + +# 0.1.2 (July 11, 2018) + +* Add `metadata` and `File::metadata` ([#433](https://github.com/tokio-rs/tokio/pull/433), [#385](https://github.com/tokio-rs/tokio/pull/385)) +* Add `File::seek` ([#434](https://github.com/tokio-rs/tokio/pull/434)) + +# 0.1.1 (June 13, 2018) + +* Add `OpenOptions` ([#390](https://github.com/tokio-rs/tokio/pull/390)) +* Add `into_std` to `File` ([#403](https://github.com/tokio-rs/tokio/pull/403)) +* Use `tokio-codec` in examples + +# 0.1.0 (May 2, 2018) + +* Initial release diff --git a/third_party/rust/tokio-fs/Cargo.toml b/third_party/rust/tokio-fs/Cargo.toml new file mode 100644 index 000000000000..fd5e59434e50 --- /dev/null +++ b/third_party/rust/tokio-fs/Cargo.toml @@ -0,0 +1,46 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-fs" +version = "0.1.3" +authors = ["Carl Lerche "] +description = "Filesystem API for Tokio.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-fs/0.1" +readme = "README.md" +keywords = ["tokio", "futures", "fs", "file", "async"] +categories = ["asynchronous", "network-programming", "filesystem"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.futures] +version = "0.1.21" + +[dependencies.tokio-io] +version = "0.1.6" + +[dependencies.tokio-threadpool] +version = "0.1.3" +[dev-dependencies.rand] +version = "0.4.2" + +[dev-dependencies.tempdir] +version = "0.3.7" + +[dev-dependencies.tokio] +version = "0.1.7" + +[dev-dependencies.tokio-codec] +version = "0.1.0" + +[dev-dependencies.tokio-io] +version = "0.1.6" diff --git a/third_party/rust/tokio-fs/LICENSE b/third_party/rust/tokio-fs/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-fs/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-fs/README.md b/third_party/rust/tokio-fs/README.md new file mode 100644 index 000000000000..f359d7676aa7 --- /dev/null +++ b/third_party/rust/tokio-fs/README.md @@ -0,0 +1,19 @@ +# Tokio FS + +Asynchronous filesystem manipulation operations (and stdin, stdout, stderr). + +[Documentation](https://tokio-rs.github.io/tokio/tokio_fs/) + +## Overview + +This crate provides filesystem manipulation facilities for usage with Tokio. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-fs/examples/std-echo.rs b/third_party/rust/tokio-fs/examples/std-echo.rs new file mode 100644 index 000000000000..83efa66e5839 --- /dev/null +++ b/third_party/rust/tokio-fs/examples/std-echo.rs @@ -0,0 +1,48 @@ +//! Echo everything received on STDIN to STDOUT. +#![deny(deprecated, warnings)] + +extern crate futures; +extern crate tokio_fs; +extern crate tokio_codec; +extern crate tokio_threadpool; + +use tokio_fs::{stdin, stdout, stderr}; +use tokio_codec::{FramedRead, FramedWrite, LinesCodec}; +use tokio_threadpool::Builder; + +use futures::{Future, Stream, Sink}; + +use std::io; + +pub fn main() { + let pool = Builder::new() + .pool_size(1) + .build(); + + pool.spawn({ + let input = FramedRead::new(stdin(), LinesCodec::new()); + + let output = FramedWrite::new(stdout(), LinesCodec::new()) + .with(|line: String| { + let mut out = "OUT: ".to_string(); + out.push_str(&line); + Ok::<_, io::Error>(out) + }); + + let error = FramedWrite::new(stderr(), LinesCodec::new()) + .with(|line: String| { + let mut out = "ERR: ".to_string(); + out.push_str(&line); + Ok::<_, io::Error>(out) + }); + + let dst = output.fanout(error); + + input + .forward(dst) + .map(|_| ()) + .map_err(|e| panic!("io error = {:?}", e)) + }); + + pool.shutdown_on_idle().wait().unwrap(); +} diff --git a/third_party/rust/tokio-fs/src/create_dir.rs b/third_party/rust/tokio-fs/src/create_dir.rs new file mode 100644 index 000000000000..a174a5d97f09 --- /dev/null +++ b/third_party/rust/tokio-fs/src/create_dir.rs @@ -0,0 +1,46 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Creates a new, empty directory at the provided path +/// +/// This is an async version of [`std::fs::create_dir`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.create_dir.html +pub fn create_dir>(path: P) -> CreateDirFuture

    { + CreateDirFuture::new(path) +} + +/// Future returned by `create_dir`. +#[derive(Debug)] +pub struct CreateDirFuture

    +where + P: AsRef +{ + path: P, +} + +impl

    CreateDirFuture

    +where + P: AsRef +{ + fn new(path: P) -> CreateDirFuture

    { + CreateDirFuture { + path: path, + } + } +} + +impl

    Future for CreateDirFuture

    +where + P: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::create_dir(&self.path) ) + } +} diff --git a/third_party/rust/tokio-fs/src/create_dir_all.rs b/third_party/rust/tokio-fs/src/create_dir_all.rs new file mode 100644 index 000000000000..3e32480673e3 --- /dev/null +++ b/third_party/rust/tokio-fs/src/create_dir_all.rs @@ -0,0 +1,47 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Recursively create a directory and all of its parent components if they +/// are missing. +/// +/// This is an async version of [`std::fs::create_dir_all`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.create_dir_all.html +pub fn create_dir_all>(path: P) -> CreateDirAllFuture

    { + CreateDirAllFuture::new(path) +} + +/// Future returned by `create_dir_all`. +#[derive(Debug)] +pub struct CreateDirAllFuture

    +where + P: AsRef +{ + path: P, +} + +impl

    CreateDirAllFuture

    +where + P: AsRef +{ + fn new(path: P) -> CreateDirAllFuture

    { + CreateDirAllFuture { + path: path, + } + } +} + +impl

    Future for CreateDirAllFuture

    +where + P: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::create_dir_all(&self.path) ) + } +} diff --git a/third_party/rust/tokio-fs/src/file/create.rs b/third_party/rust/tokio-fs/src/file/create.rs new file mode 100644 index 000000000000..40274185631b --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/create.rs @@ -0,0 +1,37 @@ +use super::File; + +use futures::{Future, Poll}; + +use std::fs::File as StdFile; +use std::io; +use std::path::Path; + +/// Future returned by `File::create` and resolves to a `File` instance. +#[derive(Debug)] +pub struct CreateFuture

    { + path: P, +} + +impl

    CreateFuture

    +where P: AsRef + Send + 'static, +{ + pub(crate) fn new(path: P) -> Self { + CreateFuture { path } + } +} + +impl

    Future for CreateFuture

    +where P: AsRef + Send + 'static, +{ + type Item = File; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let std = try_ready!(::blocking_io(|| { + StdFile::create(&self.path) + })); + + let file = File::from_std(std); + Ok(file.into()) + } +} diff --git a/third_party/rust/tokio-fs/src/file/metadata.rs b/third_party/rust/tokio-fs/src/file/metadata.rs new file mode 100644 index 000000000000..b55ca103c13d --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/metadata.rs @@ -0,0 +1,39 @@ +use super::File; + +use futures::{Future, Poll}; + +use std::fs::File as StdFile; +use std::fs::Metadata; +use std::io; + +const POLL_AFTER_RESOLVE: &str = "Cannot poll MetadataFuture after it resolves"; + +/// Future returned by `File::metadata` and resolves to a `(Metadata, File)` instance. +#[derive(Debug)] +pub struct MetadataFuture { + file: Option, +} + +impl MetadataFuture { + pub(crate) fn new(file: File) -> Self { + MetadataFuture { file: Some(file) } + } + + fn std(&mut self) -> &mut StdFile { + self.file.as_mut().expect(POLL_AFTER_RESOLVE).std() + } +} + +impl Future for MetadataFuture { + type Item = (File, Metadata); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let metadata = try_ready!(::blocking_io(|| { + StdFile::metadata(self.std()) + })); + + let file = self.file.take().expect(POLL_AFTER_RESOLVE); + Ok((file, metadata).into()) + } +} diff --git a/third_party/rust/tokio-fs/src/file/mod.rs b/third_party/rust/tokio-fs/src/file/mod.rs new file mode 100644 index 000000000000..c3ae431121d4 --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/mod.rs @@ -0,0 +1,243 @@ +//! Types for working with [`File`]. +//! +//! [`File`]: file/struct.File.html + +mod create; +mod metadata; +mod open; +mod open_options; +mod seek; + +pub use self::create::CreateFuture; +pub use self::metadata::MetadataFuture; +pub use self::open::OpenFuture; +pub use self::open_options::OpenOptions; +pub use self::seek::SeekFuture; + +use tokio_io::{AsyncRead, AsyncWrite}; + +use futures::Poll; + +use std::fs::{File as StdFile, Metadata, Permissions}; +use std::io::{self, Read, Write, Seek}; +use std::path::Path; + +/// A reference to an open file on the filesystem. +/// +/// This is a specialized version of [`std::fs::File`][std] for usage from the +/// Tokio runtime. +/// +/// An instance of a `File` can be read and/or written depending on what options +/// it was opened with. Files also implement Seek to alter the logical cursor +/// that the file contains internally. +/// +/// Files are automatically closed when they go out of scope. +/// +/// [std]: https://doc.rust-lang.org/std/fs/struct.File.html +#[derive(Debug)] +pub struct File { + std: Option, +} + +impl File { + /// Attempts to open a file in read-only mode. + /// + /// See [`OpenOptions`] for more details. + /// + /// [`OpenOptions`]: struct.OpenOptions.html + /// + /// # Errors + /// + /// `OpenFuture` results in an error if called from outside of the Tokio + /// runtime or if the underlying [`open`] call results in an error. + /// + /// [`open`]: https://doc.rust-lang.org/std/fs/struct.File.html#method.open + pub fn open

    (path: P) -> OpenFuture

    + where P: AsRef + Send + 'static, + { + OpenOptions::new().read(true).open(path) + } + + /// Opens a file in write-only mode. + /// + /// This function will create a file if it does not exist, and will truncate + /// it if it does. + /// + /// See [`OpenOptions`] for more details. + /// + /// [`OpenOptions`]: struct.OpenOptions.html + /// + /// # Errors + /// + /// `CreateFuture` results in an error if called from outside of the Tokio + /// runtime or if the underlying [`create`] call results in an error. + /// + /// [`create`]: https://doc.rust-lang.org/std/fs/struct.File.html#method.create + pub fn create

    (path: P) -> CreateFuture

    + where P: AsRef + Send + 'static, + { + CreateFuture::new(path) + } + + /// Convert a [`std::fs::File`][std] to a `tokio_fs::File`. + /// + /// [std]: https://doc.rust-lang.org/std/fs/struct.File.html + pub(crate) fn from_std(std: StdFile) -> File { + File { std: Some(std) } + } + + /// Seek to an offset, in bytes, in a stream. + /// + /// A seek beyond the end of a stream is allowed, but implementation + /// defined. + /// + /// If the seek operation completed successfully, this method returns the + /// new position from the start of the stream. That position can be used + /// later with `SeekFrom::Start`. + /// + /// # Errors + /// + /// Seeking to a negative offset is considered an error. + pub fn poll_seek(&mut self, pos: io::SeekFrom) -> Poll { + ::blocking_io(|| self.std().seek(pos)) + } + + /// Seek to an offset, in bytes, in a stream. + /// + /// Similar to `poll_seek`, but returning a `Future`. + /// + /// This method consumes the `File` and returns it back when the future + /// completes. + pub fn seek(self, pos: io::SeekFrom) -> SeekFuture { + SeekFuture::new(self, pos) + } + + /// Attempts to sync all OS-internal metadata to disk. + /// + /// This function will attempt to ensure that all in-core data reaches the + /// filesystem before returning. + pub fn poll_sync_all(&mut self) -> Poll<(), io::Error> { + ::blocking_io(|| self.std().sync_all()) + } + + /// This function is similar to `poll_sync_all`, except that it may not + /// synchronize file metadata to the filesystem. + /// + /// This is intended for use cases that must synchronize content, but don't + /// need the metadata on disk. The goal of this method is to reduce disk + /// operations. + /// + /// Note that some platforms may simply implement this in terms of `poll_sync_all`. + pub fn poll_sync_data(&mut self) -> Poll<(), io::Error> { + ::blocking_io(|| self.std().sync_data()) + } + + /// Truncates or extends the underlying file, updating the size of this file to become size. + /// + /// If the size is less than the current file's size, then the file will be + /// shrunk. If it is greater than the current file's size, then the file + /// will be extended to size and have all of the intermediate data filled in + /// with 0s. + /// + /// # Errors + /// + /// This function will return an error if the file is not opened for + /// writing. + pub fn poll_set_len(&mut self, size: u64) -> Poll<(), io::Error> { + ::blocking_io(|| self.std().set_len(size)) + } + + /// Queries metadata about the underlying file. + pub fn metadata(self) -> MetadataFuture { + MetadataFuture::new(self) + } + + /// Queries metadata about the underlying file. + pub fn poll_metadata(&mut self) -> Poll { + ::blocking_io(|| self.std().metadata()) + } + + /// Create a new `File` instance that shares the same underlying file handle + /// as the existing `File` instance. Reads, writes, and seeks will affect both + /// File instances simultaneously. + pub fn poll_try_clone(&mut self) -> Poll { + ::blocking_io(|| { + let std = self.std().try_clone()?; + Ok(File::from_std(std)) + }) + } + + /// Changes the permissions on the underlying file. + /// + /// # Platform-specific behavior + /// + /// This function currently corresponds to the `fchmod` function on Unix and + /// the `SetFileInformationByHandle` function on Windows. Note that, this + /// [may change in the future][changes]. + /// + /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior + /// + /// # Errors + /// + /// This function will return an error if the user lacks permission change + /// attributes on the underlying file. It may also return an error in other + /// os-specific unspecified cases. + pub fn poll_set_permissions(&mut self, perm: Permissions) -> Poll<(), io::Error> { + ::blocking_io(|| self.std().set_permissions(perm)) + } + + /// Destructures the `tokio_fs::File` into a [`std::fs::File`][std]. + /// + /// # Panics + /// + /// This function will panic if `shutdown` has been called. + /// + /// [std]: https://doc.rust-lang.org/std/fs/struct.File.html + pub fn into_std(mut self) -> StdFile { + self.std.take().expect("`File` instance already shutdown") + } + + fn std(&mut self) -> &mut StdFile { + self.std.as_mut().expect("`File` instance already shutdown") + } +} + +impl Read for File { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + ::would_block(|| self.std().read(buf)) + } +} + +impl AsyncRead for File { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { + false + } +} + +impl Write for File { + fn write(&mut self, buf: &[u8]) -> io::Result { + ::would_block(|| self.std().write(buf)) + } + + fn flush(&mut self) -> io::Result<()> { + ::would_block(|| self.std().flush()) + } +} + +impl AsyncWrite for File { + fn shutdown(&mut self) -> Poll<(), io::Error> { + ::blocking_io(|| { + self.std = None; + Ok(()) + }) + } +} + +impl Drop for File { + fn drop(&mut self) { + if let Some(_std) = self.std.take() { + // This is probably fine as closing a file *shouldn't* be a blocking + // operation. That said, ideally `shutdown` is called first. + } + } +} diff --git a/third_party/rust/tokio-fs/src/file/open.rs b/third_party/rust/tokio-fs/src/file/open.rs new file mode 100644 index 000000000000..197ec237af78 --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/open.rs @@ -0,0 +1,38 @@ +use super::File; + +use futures::{Future, Poll}; + +use std::fs::OpenOptions as StdOpenOptions; +use std::io; +use std::path::Path; + +/// Future returned by `File::open` and resolves to a `File` instance. +#[derive(Debug)] +pub struct OpenFuture

    { + options: StdOpenOptions, + path: P, +} + +impl

    OpenFuture

    +where P: AsRef + Send + 'static, +{ + pub(crate) fn new(options: StdOpenOptions, path: P) -> Self { + OpenFuture { options, path } + } +} + +impl

    Future for OpenFuture

    +where P: AsRef + Send + 'static, +{ + type Item = File; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let std = try_ready!(::blocking_io(|| { + self.options.open(&self.path) + })); + + let file = File::from_std(std); + Ok(file.into()) + } +} diff --git a/third_party/rust/tokio-fs/src/file/open_options.rs b/third_party/rust/tokio-fs/src/file/open_options.rs new file mode 100644 index 000000000000..99cc71c5b008 --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/open_options.rs @@ -0,0 +1,103 @@ +use super::OpenFuture; + +use std::convert::From; +use std::fs::OpenOptions as StdOpenOptions; +use std::path::Path; + +/// Options and flags which can be used to configure how a file is opened. +/// +/// This is a specialized version of [`std::fs::OpenOptions`] for usage from +/// the Tokio runtime. +/// +/// `From` is implemented for more advanced configuration +/// than the methods provided here. +/// +/// [`std::fs::OpenOptions`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html +#[derive(Clone, Debug)] +pub struct OpenOptions(StdOpenOptions); + +impl OpenOptions { + /// Creates a blank new set of options ready for configuration. + /// + /// All options are initially set to `false`. + /// + /// # Examples + /// + /// ```ignore + /// use tokio::fs::OpenOptions; + /// + /// let mut options = OpenOptions::new(); + /// let future = options.read(true).open("foo.txt"); + /// ``` + pub fn new() -> OpenOptions { + OpenOptions(StdOpenOptions::new()) + } + + /// See the underlying [`read`] call for details. + /// + /// [`read`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.read + pub fn read(&mut self, read: bool) -> &mut OpenOptions { + self.0.read(read); + self + } + + /// See the underlying [`write`] call for details. + /// + /// [`write`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.write + pub fn write(&mut self, write: bool) -> &mut OpenOptions { + self.0.write(write); + self + } + + /// See the underlying [`append`] call for details. + /// + /// [`append`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.append + pub fn append(&mut self, append: bool) -> &mut OpenOptions { + self.0.append(append); + self + } + + /// See the underlying [`truncate`] call for details. + /// + /// [`truncate`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.truncate + pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions { + self.0.truncate(truncate); + self + } + + /// See the underlying [`create`] call for details. + /// + /// [`create`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.create + pub fn create(&mut self, create: bool) -> &mut OpenOptions { + self.0.create(create); + self + } + + /// See the underlying [`create_new`] call for details. + /// + /// [`create_new`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.create_new + pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions { + self.0.create_new(create_new); + self + } + + /// Opens a file at `path` with the options specified by `self`. + /// + /// # Errors + /// + /// `OpenOptionsFuture` results in an error if called from outside of the + /// Tokio runtime or if the underlying [`open`] call results in an error. + /// + /// [`open`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html#method.open + pub fn open

    (&self, path: P) -> OpenFuture

    + where P: AsRef + Send + 'static + { + OpenFuture::new(self.0.clone(), path) + } +} + +impl From for OpenOptions { + fn from(options: StdOpenOptions) -> OpenOptions { + OpenOptions(options) + } +} diff --git a/third_party/rust/tokio-fs/src/file/seek.rs b/third_party/rust/tokio-fs/src/file/seek.rs new file mode 100644 index 000000000000..0765d3db9032 --- /dev/null +++ b/third_party/rust/tokio-fs/src/file/seek.rs @@ -0,0 +1,37 @@ +use super::File; + +use futures::{Future, Poll}; + +use std::io; + +/// Future returned by `File::seek`. +#[derive(Debug)] +pub struct SeekFuture { + inner: Option, + pos: io::SeekFrom, +} + +impl SeekFuture { + pub(crate) fn new(file: File, pos: io::SeekFrom) -> Self { + Self { + pos, + inner: Some(file), + } + } +} + +impl Future for SeekFuture { + type Item = (File, u64); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let pos = try_ready!( + self.inner + .as_mut() + .expect("Cannot poll `SeekFuture` after it resolves") + .poll_seek(self.pos) + ); + let inner = self.inner.take().unwrap(); + Ok((inner, pos).into()) + } +} diff --git a/third_party/rust/tokio-fs/src/hard_link.rs b/third_party/rust/tokio-fs/src/hard_link.rs new file mode 100644 index 000000000000..e8ea5115207a --- /dev/null +++ b/third_party/rust/tokio-fs/src/hard_link.rs @@ -0,0 +1,54 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Creates a new hard link on the filesystem. +/// +/// The `dst` path will be a link pointing to the `src` path. Note that systems +/// often require these two paths to both be located on the same filesystem. +/// +/// This is an async version of [`std::fs::hard_link`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.hard_link.html +pub fn hard_link, Q: AsRef>(src: P, dst: Q) -> HardLinkFuture { + HardLinkFuture::new(src, dst) +} + +/// Future returned by `hard_link`. +#[derive(Debug)] +pub struct HardLinkFuture +where + P: AsRef, + Q: AsRef +{ + src: P, + dst: Q, +} + +impl HardLinkFuture +where + P: AsRef, + Q: AsRef +{ + fn new(src: P, dst: Q) -> HardLinkFuture { + HardLinkFuture { + src: src, + dst: dst, + } + } +} + +impl Future for HardLinkFuture +where + P: AsRef, + Q: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::hard_link(&self.src, &self.dst) ) + } +} diff --git a/third_party/rust/tokio-fs/src/lib.rs b/third_party/rust/tokio-fs/src/lib.rs new file mode 100644 index 000000000000..2beec8c0c690 --- /dev/null +++ b/third_party/rust/tokio-fs/src/lib.rs @@ -0,0 +1,104 @@ +//! Asynchronous file and standard stream adaptation. +//! +//! This module contains utility methods and adapter types for input/output to +//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and +//! filesystem manipulation, for use within (and only within) a Tokio runtime. +//! +//! Tasks run by *worker* threads should not block, as this could delay +//! servicing reactor events. Portable filesystem operations are blocking, +//! however. This module offers adapters which use a [`blocking`] annotation +//! to inform the runtime that a blocking operation is required. When +//! necessary, this allows the runtime to convert the current thread from a +//! *worker* to a *backup* thread, where blocking is acceptable. +//! +//! ## Usage +//! +//! Where possible, users should prefer the provided asynchronous-specific +//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll` +//! type. Adaptions also extend to traits like `std::io::Read` where methods +//! return `std::io::Result`. Be warned that these adapted methods may return +//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted +//! to a *backup* thread immediately. See [tokio-threadpool] for more details +//! of the threading model and [`blocking`]. +//! +//! [`blocking`]: https://docs.rs/tokio-threadpool/0.1/tokio_threadpool/fn.blocking.html +//! [`AsyncRead`]: https://docs.rs/tokio-io/0.1/tokio_io/trait.AsyncRead.html +//! [tokio-threadpool]: https://docs.rs/tokio-threadpool/0.1/tokio_threadpool + +#![deny(missing_docs, missing_debug_implementations, warnings)] +#![doc(html_root_url = "https://docs.rs/tokio-fs/0.1.3")] + +#[macro_use] +extern crate futures; +extern crate tokio_io; +extern crate tokio_threadpool; + +mod create_dir; +mod create_dir_all; +pub mod file; +mod hard_link; +mod metadata; +pub mod os; +mod read_dir; +mod read_link; +mod remove_dir; +mod remove_file; +mod rename; +mod set_permissions; +mod stdin; +mod stdout; +mod stderr; +mod symlink_metadata; + +pub use create_dir::{create_dir, CreateDirFuture}; +pub use create_dir_all::{create_dir_all, CreateDirAllFuture}; +pub use file::File; +pub use file::OpenOptions; +pub use hard_link::{hard_link, HardLinkFuture}; +pub use metadata::{metadata, MetadataFuture}; +pub use read_dir::{read_dir, ReadDirFuture, ReadDir, DirEntry}; +pub use read_link::{read_link, ReadLinkFuture}; +pub use remove_dir::{remove_dir, RemoveDirFuture}; +pub use remove_file::{remove_file, RemoveFileFuture}; +pub use rename::{rename, RenameFuture}; +pub use set_permissions::{set_permissions, SetPermissionsFuture}; +pub use stdin::{stdin, Stdin}; +pub use stdout::{stdout, Stdout}; +pub use stderr::{stderr, Stderr}; +pub use symlink_metadata::{symlink_metadata, SymlinkMetadataFuture}; + +use futures::Poll; +use futures::Async::*; + +use std::io; +use std::io::ErrorKind::{Other, WouldBlock}; + +fn blocking_io(f: F) -> Poll +where F: FnOnce() -> io::Result, +{ + match tokio_threadpool::blocking(f) { + Ok(Ready(Ok(v))) => Ok(v.into()), + Ok(Ready(Err(err))) => Err(err), + Ok(NotReady) => Ok(NotReady), + Err(_) => Err(blocking_err()), + } +} + +fn would_block(f: F) -> io::Result +where F: FnOnce() -> io::Result, +{ + match tokio_threadpool::blocking(f) { + Ok(Ready(Ok(v))) => Ok(v), + Ok(Ready(Err(err))) => { + debug_assert_ne!(err.kind(), WouldBlock); + Err(err) + } + Ok(NotReady) => Err(WouldBlock.into()), + Err(_) => Err(blocking_err()), + } +} + +fn blocking_err() -> io::Error { + io::Error::new(Other, "`blocking` annotated I/O must be called \ + from the context of the Tokio runtime.") +} diff --git a/third_party/rust/tokio-fs/src/metadata.rs b/third_party/rust/tokio-fs/src/metadata.rs new file mode 100644 index 000000000000..200d58e08078 --- /dev/null +++ b/third_party/rust/tokio-fs/src/metadata.rs @@ -0,0 +1,45 @@ +use super::blocking_io; + +use futures::{Future, Poll}; + +use std::fs::{self, Metadata}; +use std::io; +use std::path::Path; + +/// Queries the file system metadata for a path. +pub fn metadata

    (path: P) -> MetadataFuture

    +where + P: AsRef + Send + 'static, +{ + MetadataFuture::new(path) +} + +/// Future returned by `metadata`. +#[derive(Debug)] +pub struct MetadataFuture

    +where + P: AsRef + Send + 'static, +{ + path: P, +} + +impl

    MetadataFuture

    +where + P: AsRef + Send + 'static, +{ + pub(crate) fn new(path: P) -> Self { + Self { path } + } +} + +impl

    Future for MetadataFuture

    +where + P: AsRef + Send + 'static, +{ + type Item = Metadata; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + blocking_io(|| fs::metadata(&self.path)) + } +} diff --git a/third_party/rust/tokio-fs/src/os/mod.rs b/third_party/rust/tokio-fs/src/os/mod.rs new file mode 100644 index 000000000000..ae57c4847618 --- /dev/null +++ b/third_party/rust/tokio-fs/src/os/mod.rs @@ -0,0 +1,6 @@ +//! OS-specific functionality. + +#[cfg(unix)] +pub mod unix; +#[cfg(windows)] +pub mod windows; diff --git a/third_party/rust/tokio-fs/src/os/unix.rs b/third_party/rust/tokio-fs/src/os/unix.rs new file mode 100644 index 000000000000..5f8eedeba39b --- /dev/null +++ b/third_party/rust/tokio-fs/src/os/unix.rs @@ -0,0 +1,55 @@ +//! Unix-specific extensions to primitives in the `tokio_fs` module. + +use std::io; +use std::path::Path; +use std::os::unix::fs; + +use futures::{Future, Poll}; + +/// Creates a new symbolic link on the filesystem. +/// +/// The `dst` path will be a symbolic link pointing to the `src` path. +/// +/// This is an async version of [`std::os::unix::fs::symlink`][std] +/// +/// [std]: https://doc.rust-lang.org/std/os/unix/fs/fn.symlink.html +pub fn symlink, Q: AsRef>(src: P, dst: Q) -> SymlinkFuture { + SymlinkFuture::new(src, dst) +} + +/// Future returned by `symlink`. +#[derive(Debug)] +pub struct SymlinkFuture +where + P: AsRef, + Q: AsRef +{ + src: P, + dst: Q, +} + +impl SymlinkFuture +where + P: AsRef, + Q: AsRef +{ + fn new(src: P, dst: Q) -> SymlinkFuture { + SymlinkFuture { + src: src, + dst: dst, + } + } +} + +impl Future for SymlinkFuture +where + P: AsRef, + Q: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::symlink(&self.src, &self.dst) ) + } +} diff --git a/third_party/rust/tokio-fs/src/os/windows/mod.rs b/third_party/rust/tokio-fs/src/os/windows/mod.rs new file mode 100644 index 000000000000..eaeed043eb3b --- /dev/null +++ b/third_party/rust/tokio-fs/src/os/windows/mod.rs @@ -0,0 +1,7 @@ +//! Windows-specific extensions for the primitives in the `tokio_fs` module. + +mod symlink_dir; +mod symlink_file; + +pub use self::symlink_dir::{symlink_dir, SymlinkDirFuture}; +pub use self::symlink_file::{symlink_file, SymlinkFileFuture}; diff --git a/third_party/rust/tokio-fs/src/os/windows/symlink_dir.rs b/third_party/rust/tokio-fs/src/os/windows/symlink_dir.rs new file mode 100644 index 000000000000..9806ff3a3fab --- /dev/null +++ b/third_party/rust/tokio-fs/src/os/windows/symlink_dir.rs @@ -0,0 +1,54 @@ +use std::io; +use std::path::Path; +use std::os::windows::fs; + +use futures::{Future, Poll}; + +/// Creates a new directory symlink on the filesystem. +/// +/// The `dst` path will be a directory symbolic link pointing to the `src` +/// path. +/// +/// This is an async version of [`std::os::windows::fs::symlink_dir`][std] +/// +/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_dir.html +pub fn symlink_dir, Q: AsRef>(src: P, dst: Q) -> SymlinkDirFuture { + SymlinkDirFuture::new(src, dst) +} + +/// Future returned by `symlink_dir`. +#[derive(Debug)] +pub struct SymlinkDirFuture +where + P: AsRef, + Q: AsRef +{ + src: P, + dst: Q, +} + +impl SymlinkDirFuture +where + P: AsRef, + Q: AsRef +{ + fn new(src: P, dst: Q) -> SymlinkDirFuture { + SymlinkDirFuture { + src: src, + dst: dst, + } + } +} + +impl Future for SymlinkDirFuture +where + P: AsRef, + Q: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::symlink_dir(&self.src, &self.dst) ) + } +} diff --git a/third_party/rust/tokio-fs/src/os/windows/symlink_file.rs b/third_party/rust/tokio-fs/src/os/windows/symlink_file.rs new file mode 100644 index 000000000000..583b61587443 --- /dev/null +++ b/third_party/rust/tokio-fs/src/os/windows/symlink_file.rs @@ -0,0 +1,54 @@ +use std::io; +use std::path::Path; +use std::os::windows::fs; + +use futures::{Future, Poll}; + +/// Creates a new file symbolic link on the filesystem. +/// +/// The `dst` path will be a file symbolic link pointing to the `src` +/// path. +/// +/// This is an async version of [`std::os::windows::fs::symlink_file`][std] +/// +/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_file.html +pub fn symlink_file, Q: AsRef>(src: P, dst: Q) -> SymlinkFileFuture { + SymlinkFileFuture::new(src, dst) +} + +/// Future returned by `symlink_file`. +#[derive(Debug)] +pub struct SymlinkFileFuture +where + P: AsRef, + Q: AsRef +{ + src: P, + dst: Q, +} + +impl SymlinkFileFuture +where + P: AsRef, + Q: AsRef +{ + fn new(src: P, dst: Q) -> SymlinkFileFuture { + SymlinkFileFuture { + src: src, + dst: dst, + } + } +} + +impl Future for SymlinkFileFuture +where + P: AsRef, + Q: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::symlink_file(&self.src, &self.dst) ) + } +} diff --git a/third_party/rust/tokio-fs/src/read_dir.rs b/third_party/rust/tokio-fs/src/read_dir.rs new file mode 100644 index 000000000000..3818c7102b2d --- /dev/null +++ b/third_party/rust/tokio-fs/src/read_dir.rs @@ -0,0 +1,247 @@ +use std::ffi::OsString; +use std::fs::{self, DirEntry as StdDirEntry, ReadDir as StdReadDir, FileType, Metadata}; +use std::io; +#[cfg(unix)] +use std::os::unix::fs::DirEntryExt; +use std::path::{Path, PathBuf}; + +use futures::{Future, Poll, Stream}; + +/// Returns a stream over the entries within a directory. +/// +/// This is an async version of [`std::fs::read_dir`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.read_dir.html +pub fn read_dir

    (path: P) -> ReadDirFuture

    +where + P: AsRef + Send + 'static, +{ + ReadDirFuture::new(path) +} + +/// Future returned by `read_dir`. +#[derive(Debug)] +pub struct ReadDirFuture

    +where + P: AsRef + Send + 'static, +{ + path: P, +} + +impl

    ReadDirFuture

    +where + P: AsRef + Send + 'static +{ + fn new(path: P) -> ReadDirFuture

    { + ReadDirFuture { + path: path, + } + } +} + +impl

    Future for ReadDirFuture

    +where + P: AsRef + Send + 'static, +{ + type Item = ReadDir; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| Ok(ReadDir(fs::read_dir(&self.path)?))) + } +} + +/// Stream of the entries in a directory. +/// +/// This stream is returned from the [`read_dir`] function of this module and +/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] +/// information like the entry's path and possibly other metadata can be +/// learned. +/// +/// # Errors +/// +/// This [`Stream`] will return an [`Err`] if there's some sort of intermittent +/// IO error during iteration. +/// +/// [`read_dir`]: fn.read_dir.html +/// [`DirEntry`]: struct.DirEntry.html +/// [`Stream`]: ../futures/stream/trait.Stream.html +/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err +#[derive(Debug)] +pub struct ReadDir(StdReadDir); + +impl Stream for ReadDir { + type Item = DirEntry; + type Error = io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + ::blocking_io(|| { + match self.0.next() { + Some(Err(err)) => Err(err), + Some(Ok(item)) => Ok(Some(DirEntry(item))), + None => Ok(None) + } + }) + } +} + +/// Entries returned by the [`ReadDir`] stream. +/// +/// [`ReadDir`]: struct.ReadDir.html +/// +/// This is a specialized version of [`std::fs::DirEntry`][std] for usage from the +/// Tokio runtime. +/// +/// An instance of `DirEntry` represents an entry inside of a directory on the +/// filesystem. Each entry can be inspected via methods to learn about the full +/// path or possibly other metadata through per-platform extension traits. +/// +/// [std]: https://doc.rust-lang.org/std/fs/struct.DirEntry.html +#[derive(Debug)] +pub struct DirEntry(StdDirEntry); + +impl DirEntry { + /// Destructures the `tokio_fs::DirEntry` into a [`std::fs::DirEntry`][std]. + /// + /// [std]: https://doc.rust-lang.org/std/fs/struct.DirEntry.html + pub fn into_std(self) -> StdDirEntry { + self.0 + } + + /// Returns the full path to the file that this entry represents. + /// + /// The full path is created by joining the original path to `read_dir` + /// with the filename of this entry. + /// + /// # Examples + /// + /// ``` + /// # extern crate futures; + /// # extern crate tokio; + /// # extern crate tokio_fs; + /// use futures::{Future, Stream}; + /// + /// fn main() { + /// let fut = tokio_fs::read_dir(".").flatten_stream().for_each(|dir| { + /// println!("{:?}", dir.path()); + /// Ok(()) + /// }).map_err(|err| { eprintln!("Error: {:?}", err); () }); + /// tokio::run(fut); + /// } + /// ``` + /// + /// This prints output like: + /// + /// ```text + /// "./whatever.txt" + /// "./foo.html" + /// "./hello_world.rs" + /// ``` + /// + /// The exact text, of course, depends on what files you have in `.`. + pub fn path(&self) -> PathBuf { + self.0.path() + } + + /// Returns the bare file name of this directory entry without any other + /// leading path component. + /// + /// # Examples + /// + /// ``` + /// # extern crate futures; + /// # extern crate tokio; + /// # extern crate tokio_fs; + /// use futures::{Future, Stream}; + /// + /// fn main() { + /// let fut = tokio_fs::read_dir(".").flatten_stream().for_each(|dir| { + /// // Here, `dir` is a `DirEntry`. + /// println!("{:?}", dir.file_name()); + /// Ok(()) + /// }).map_err(|err| { eprintln!("Error: {:?}", err); () }); + /// tokio::run(fut); + /// } + /// ``` + pub fn file_name(&self) -> OsString { + self.0.file_name() + } + + /// Return the metadata for the file that this entry points at. + /// + /// This function will not traverse symlinks if this entry points at a + /// symlink. + /// + /// # Platform-specific behavior + /// + /// On Windows this function is cheap to call (no extra system calls + /// needed), but on Unix platforms this function is the equivalent of + /// calling `symlink_metadata` on the path. + /// + /// # Examples + /// + /// ``` + /// # extern crate futures; + /// # extern crate tokio; + /// # extern crate tokio_fs; + /// use futures::{Future, Stream}; + /// use futures::future::poll_fn; + /// + /// fn main() { + /// let fut = tokio_fs::read_dir(".").flatten_stream().for_each(|dir| { + /// // Here, `dir` is a `DirEntry`. + /// let path = dir.path(); + /// poll_fn(move || dir.poll_metadata()).map(move |metadata| { + /// println!("{:?}: {:?}", path, metadata.permissions()); + /// }) + /// }).map_err(|err| { eprintln!("Error: {:?}", err); () }); + /// tokio::run(fut); + /// } + /// ``` + pub fn poll_metadata(&self) -> Poll { + ::blocking_io(|| self.0.metadata()) + } + + /// Return the file type for the file that this entry points at. + /// + /// This function will not traverse symlinks if this entry points at a + /// symlink. + /// + /// # Platform-specific behavior + /// + /// On Windows and most Unix platforms this function is free (no extra + /// system calls needed), but some Unix platforms may require the equivalent + /// call to `symlink_metadata` to learn about the target file type. + /// + /// # Examples + /// + /// ``` + /// # extern crate futures; + /// # extern crate tokio; + /// # extern crate tokio_fs; + /// use futures::{Future, Stream}; + /// use futures::future::poll_fn; + /// + /// fn main() { + /// let fut = tokio_fs::read_dir(".").flatten_stream().for_each(|dir| { + /// // Here, `dir` is a `DirEntry`. + /// let path = dir.path(); + /// poll_fn(move || dir.poll_file_type()).map(move |file_type| { + /// // Now let's show our entry's file type! + /// println!("{:?}: {:?}", path, file_type); + /// }) + /// }).map_err(|err| { eprintln!("Error: {:?}", err); () }); + /// tokio::run(fut); + /// } + /// ``` + pub fn poll_file_type(&self) -> Poll { + ::blocking_io(|| self.0.file_type()) + } +} + +#[cfg(unix)] +impl DirEntryExt for DirEntry { + fn ino(&self) -> u64 { + self.0.ino() + } +} diff --git a/third_party/rust/tokio-fs/src/read_link.rs b/third_party/rust/tokio-fs/src/read_link.rs new file mode 100644 index 000000000000..5e5bf2a30e39 --- /dev/null +++ b/third_party/rust/tokio-fs/src/read_link.rs @@ -0,0 +1,46 @@ +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use futures::{Future, Poll}; + +/// Reads a symbolic link, returning the file that the link points to. +/// +/// This is an async version of [`std::fs::read_link`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.read_link.html +pub fn read_link>(path: P) -> ReadLinkFuture

    { + ReadLinkFuture::new(path) +} + +/// Future returned by `read_link`. +#[derive(Debug)] +pub struct ReadLinkFuture

    +where + P: AsRef +{ + path: P, +} + +impl

    ReadLinkFuture

    +where + P: AsRef +{ + fn new(path: P) -> ReadLinkFuture

    { + ReadLinkFuture { + path: path, + } + } +} + +impl

    Future for ReadLinkFuture

    +where + P: AsRef +{ + type Item = PathBuf; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::read_link(&self.path) ) + } +} diff --git a/third_party/rust/tokio-fs/src/remove_dir.rs b/third_party/rust/tokio-fs/src/remove_dir.rs new file mode 100644 index 000000000000..5aa73bfb5f47 --- /dev/null +++ b/third_party/rust/tokio-fs/src/remove_dir.rs @@ -0,0 +1,46 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Removes an existing, empty directory. +/// +/// This is an async version of [`std::fs::remove_dir`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.remove_dir.html +pub fn remove_dir>(path: P) -> RemoveDirFuture

    { + RemoveDirFuture::new(path) +} + +/// Future returned by `remove_dir`. +#[derive(Debug)] +pub struct RemoveDirFuture

    +where + P: AsRef +{ + path: P, +} + +impl

    RemoveDirFuture

    +where + P: AsRef +{ + fn new(path: P) -> RemoveDirFuture

    { + RemoveDirFuture { + path: path, + } + } +} + +impl

    Future for RemoveDirFuture

    +where + P: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::remove_dir(&self.path) ) + } +} diff --git a/third_party/rust/tokio-fs/src/remove_file.rs b/third_party/rust/tokio-fs/src/remove_file.rs new file mode 100644 index 000000000000..f61741857918 --- /dev/null +++ b/third_party/rust/tokio-fs/src/remove_file.rs @@ -0,0 +1,50 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Removes a file from the filesystem. +/// +/// Note that there is no +/// guarantee that the file is immediately deleted (e.g. depending on +/// platform, other open file descriptors may prevent immediate removal). +/// +/// This is an async version of [`std::fs::remove_file`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.remove_file.html +pub fn remove_file>(path: P) -> RemoveFileFuture

    { + RemoveFileFuture::new(path) +} + +/// Future returned by `remove_file`. +#[derive(Debug)] +pub struct RemoveFileFuture

    +where + P: AsRef +{ + path: P, +} + +impl

    RemoveFileFuture

    +where + P: AsRef +{ + fn new(path: P) -> RemoveFileFuture

    { + RemoveFileFuture { + path: path, + } + } +} + +impl

    Future for RemoveFileFuture

    +where + P: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::remove_file(&self.path) ) + } +} diff --git a/third_party/rust/tokio-fs/src/rename.rs b/third_party/rust/tokio-fs/src/rename.rs new file mode 100644 index 000000000000..210f53bb5a22 --- /dev/null +++ b/third_party/rust/tokio-fs/src/rename.rs @@ -0,0 +1,54 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Rename a file or directory to a new name, replacing the original file if +/// `to` already exists. +/// +/// This will not work if the new name is on a different mount point. +/// +/// This is an async version of [`std::fs::rename`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.rename.html +pub fn rename, Q: AsRef>(from: P, to: Q) -> RenameFuture { + RenameFuture::new(from, to) +} + +/// Future returned by `rename`. +#[derive(Debug)] +pub struct RenameFuture +where + P: AsRef, + Q: AsRef +{ + from: P, + to: Q, +} + +impl RenameFuture +where + P: AsRef, + Q: AsRef +{ + fn new(from: P, to: Q) -> RenameFuture { + RenameFuture { + from: from, + to: to, + } + } +} + +impl Future for RenameFuture +where + P: AsRef, + Q: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::rename(&self.from, &self.to) ) + } +} diff --git a/third_party/rust/tokio-fs/src/set_permissions.rs b/third_party/rust/tokio-fs/src/set_permissions.rs new file mode 100644 index 000000000000..a85044862632 --- /dev/null +++ b/third_party/rust/tokio-fs/src/set_permissions.rs @@ -0,0 +1,48 @@ +use std::fs; +use std::io; +use std::path::Path; + +use futures::{Future, Poll}; + +/// Changes the permissions found on a file or a directory. +/// +/// This is an async version of [`std::fs::set_permissions`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.set_permissions.html +pub fn set_permissions>(path: P, perm: fs::Permissions) -> SetPermissionsFuture

    { + SetPermissionsFuture::new(path, perm) +} + +/// Future returned by `set_permissions`. +#[derive(Debug)] +pub struct SetPermissionsFuture

    +where + P: AsRef +{ + path: P, + perm: fs::Permissions, +} + +impl

    SetPermissionsFuture

    +where + P: AsRef +{ + fn new(path: P, perm: fs::Permissions) -> SetPermissionsFuture

    { + SetPermissionsFuture { + path: path, + perm: perm, + } + } +} + +impl

    Future for SetPermissionsFuture

    +where + P: AsRef +{ + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + ::blocking_io(|| fs::set_permissions(&self.path, self.perm.clone()) ) + } +} diff --git a/third_party/rust/tokio-fs/src/stderr.rs b/third_party/rust/tokio-fs/src/stderr.rs new file mode 100644 index 000000000000..cf643907577c --- /dev/null +++ b/third_party/rust/tokio-fs/src/stderr.rs @@ -0,0 +1,45 @@ +use tokio_io::{AsyncWrite}; + +use futures::Poll; + +use std::io::{self, Write, Stderr as StdStderr}; + +/// A handle to the standard error stream of a process. +/// +/// The handle implements the [`AsyncWrite`] trait, but beware that concurrent +/// writes to `Stderr` must be executed with care. +/// +/// Created by the [`stderr`] function. +/// +/// [`stderr`]: fn.stderr.html +/// [`AsyncWrite`]: trait.AsyncWrite.html +#[derive(Debug)] +pub struct Stderr { + std: StdStderr, +} + +/// Constructs a new handle to the standard error of the current process. +/// +/// The returned handle allows writing to standard error from the within the +/// Tokio runtime. +pub fn stderr() -> Stderr { + let std = io::stderr(); + Stderr { std } +} + +impl Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result { + ::would_block(|| self.std.write(buf)) + } + + fn flush(&mut self) -> io::Result<()> { + ::would_block(|| self.std.flush()) + } +} + +impl AsyncWrite for Stderr { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + diff --git a/third_party/rust/tokio-fs/src/stdin.rs b/third_party/rust/tokio-fs/src/stdin.rs new file mode 100644 index 000000000000..f41b8dc84c22 --- /dev/null +++ b/third_party/rust/tokio-fs/src/stdin.rs @@ -0,0 +1,38 @@ +use tokio_io::{AsyncRead}; + +use std::io::{self, Read, Stdin as StdStdin}; + +/// A handle to the standard input stream of a process. +/// +/// The handle implements the [`AsyncRead`] trait, but beware that concurrent +/// reads of `Stdin` must be executed with care. +/// +/// Created by the [`stdin`] function. +/// +/// [`stdin`]: fn.stdin.html +/// [`AsyncRead`]: trait.AsyncRead.html +#[derive(Debug)] +pub struct Stdin { + std: StdStdin, +} + +/// Constructs a new handle to the standard input of the current process. +/// +/// The returned handle allows reading from standard input from the within the +/// Tokio runtime. +pub fn stdin() -> Stdin { + let std = io::stdin(); + Stdin { std } +} + +impl Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + ::would_block(|| self.std.read(buf)) + } +} + +impl AsyncRead for Stdin { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { + false + } +} diff --git a/third_party/rust/tokio-fs/src/stdout.rs b/third_party/rust/tokio-fs/src/stdout.rs new file mode 100644 index 000000000000..1c4cd5ad05be --- /dev/null +++ b/third_party/rust/tokio-fs/src/stdout.rs @@ -0,0 +1,44 @@ +use tokio_io::{AsyncWrite}; + +use futures::Poll; + +use std::io::{self, Write, Stdout as StdStdout}; + +/// A handle to the standard output stream of a process. +/// +/// The handle implements the [`AsyncWrite`] trait, but beware that concurrent +/// writes to `Stdout` must be executed with care. +/// +/// Created by the [`stdout`] function. +/// +/// [`stdout`]: fn.stdout.html +/// [`AsyncWrite`]: trait.AsyncWrite.html +#[derive(Debug)] +pub struct Stdout { + std: StdStdout, +} + +/// Constructs a new handle to the standard output of the current process. +/// +/// The returned handle allows writing to standard out from the within the Tokio +/// runtime. +pub fn stdout() -> Stdout { + let std = io::stdout(); + Stdout { std } +} + +impl Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result { + ::would_block(|| self.std.write(buf)) + } + + fn flush(&mut self) -> io::Result<()> { + ::would_block(|| self.std.flush()) + } +} + +impl AsyncWrite for Stdout { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} diff --git a/third_party/rust/tokio-fs/src/symlink_metadata.rs b/third_party/rust/tokio-fs/src/symlink_metadata.rs new file mode 100644 index 000000000000..db02769e205d --- /dev/null +++ b/third_party/rust/tokio-fs/src/symlink_metadata.rs @@ -0,0 +1,49 @@ +use super::blocking_io; + +use futures::{Future, Poll}; + +use std::fs::{self, Metadata}; +use std::io; +use std::path::Path; + +/// Queries the file system metadata for a path. +/// +/// This is an async version of [`std::fs::symlink_metadata`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.symlink_metadata.html +pub fn symlink_metadata

    (path: P) -> SymlinkMetadataFuture

    +where + P: AsRef + Send + 'static, +{ + SymlinkMetadataFuture::new(path) +} + +/// Future returned by `symlink_metadata`. +#[derive(Debug)] +pub struct SymlinkMetadataFuture

    +where + P: AsRef + Send + 'static, +{ + path: P, +} + +impl

    SymlinkMetadataFuture

    +where + P: AsRef + Send + 'static, +{ + pub(crate) fn new(path: P) -> Self { + Self { path } + } +} + +impl

    Future for SymlinkMetadataFuture

    +where + P: AsRef + Send + 'static, +{ + type Item = Metadata; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + blocking_io(|| fs::symlink_metadata(&self.path)) + } +} diff --git a/third_party/rust/tokio-fs/tests/file.rs b/third_party/rust/tokio-fs/tests/file.rs new file mode 100644 index 000000000000..c8185c1f229a --- /dev/null +++ b/third_party/rust/tokio-fs/tests/file.rs @@ -0,0 +1,146 @@ +extern crate futures; +extern crate rand; +extern crate tempdir; +extern crate tokio_fs; +extern crate tokio_io; +extern crate tokio_threadpool; + +use tokio_fs::*; +use tokio_io::io; +use tokio_threadpool::*; + +use futures::Future; +use futures::future::poll_fn; +use futures::sync::oneshot; +use rand::{thread_rng, Rng}; +use tempdir::TempDir; + +use std::fs::File as StdFile; +use std::io::{Read, SeekFrom}; + +#[test] +fn read_write() { + const NUM_CHARS: usize = 16 * 1_024; + + let dir = TempDir::new("tokio-fs-tests").unwrap(); + let file_path = dir.path().join("read_write.txt"); + + let contents: Vec = thread_rng().gen_ascii_chars() + .take(NUM_CHARS) + .collect::() + .into(); + + let pool = Builder::new() + .pool_size(1) + .build(); + + let (tx, rx) = oneshot::channel(); + + pool.spawn({ + let file_path = file_path.clone(); + let contents = contents.clone(); + + File::create(file_path) + .and_then(|file| file.metadata()) + .inspect(|&(_, ref metadata)| assert!(metadata.is_file())) + .and_then(move |(file, _)| io::write_all(file, contents)) + .and_then(|(mut file, _)| { + poll_fn(move || file.poll_sync_all()) + }) + .then(|res| { + let _ = res.unwrap(); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.wait().unwrap(); + + let mut file = StdFile::open(&file_path).unwrap(); + + let mut dst = vec![]; + file.read_to_end(&mut dst).unwrap(); + + assert_eq!(dst, contents); + + let (tx, rx) = oneshot::channel(); + + pool.spawn({ + File::open(file_path) + .and_then(|file| io::read_to_end(file, vec![])) + .then(move |res| { + let (_, buf) = res.unwrap(); + assert_eq!(buf, contents); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.wait().unwrap(); +} + +#[test] +fn metadata() { + let dir = TempDir::new("tokio-fs-tests").unwrap(); + let file_path = dir.path().join("metadata.txt"); + + let pool = Builder::new().pool_size(1).build(); + + let (tx, rx) = oneshot::channel(); + + pool.spawn({ + let file_path = file_path.clone(); + let file_path2 = file_path.clone(); + let file_path3 = file_path.clone(); + + tokio_fs::metadata(file_path) + .then(|r| { + let _ = r.err().unwrap(); + Ok(()) + }) + .and_then(|_| File::create(file_path2)) + .and_then(|_| tokio_fs::metadata(file_path3)) + .then(|r| { + assert!(r.unwrap().is_file()); + tx.send(()) + }) + }); + + rx.wait().unwrap(); +} + +#[test] +fn seek() { + let dir = TempDir::new("tokio-fs-tests").unwrap(); + let file_path = dir.path().join("seek.txt"); + + let pool = Builder::new().pool_size(1).build(); + + let (tx, rx) = oneshot::channel(); + + pool.spawn( + OpenOptions::new() + .create(true) + .read(true) + .write(true) + .open(file_path) + .and_then(|file| io::write_all(file, "Hello, world!")) + .and_then(|(file, _)| file.seek(SeekFrom::End(-6))) + .and_then(|(file, _)| io::read_exact(file, vec![0; 5])) + .and_then(|(file, buf)| { + assert_eq!(buf, b"world"); + file.seek(SeekFrom::Start(0)) + }) + .and_then(|(file, _)| io::read_exact(file, vec![0; 5])) + .and_then(|(_, buf)| { + assert_eq!(buf, b"Hello"); + Ok(()) + }) + .then(|r| { + let _ = r.unwrap(); + tx.send(()) + }), + ); + + rx.wait().unwrap(); +} diff --git a/third_party/rust/tokio-io/.cargo-checksum.json b/third_party/rust/tokio-io/.cargo-checksum.json index 959b3cacbda8..9901a42f0390 100644 --- a/third_party/rust/tokio-io/.cargo-checksum.json +++ b/third_party/rust/tokio-io/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".travis.yml":"471c401b386c3fe94c943a0ac3704d4d92ea02fa0b037169a0102b3f40c69f69","Cargo.toml":"ba04305fe1a3dadfbf0717a65b88e9c8ab75a276c35b2579f37213908ecc9ec4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"cce9a15791ab2ad9f67f8e441f5c9bd9a8ac51f6b37d46029a43710175ab8248","src/codec.rs":"f71e713df0055765c3187fed54463d94a33864833cb252286b88c9141bdcbcfe","src/copy.rs":"c7a8a530da6d6ecbb33fa502d1bd97c552e7d874570b3ef47ded52d97d779bc3","src/flush.rs":"b354745517e3679a380699e3b799f896bc818b6baccebb526e1e6c33f04252d6","src/framed.rs":"72d65dad3c132d79c550e76b0d0426c24b95f4d78650a90468da259089c7f1ee","src/framed_read.rs":"165d30cc7d9256fd5705e7490ea709a5c5d6fed2159adaefc37f28a2f1fb9244","src/framed_write.rs":"0d84e58184f565f6b5e8344f3e7a9a561833c68b89d2b42924c090d0a1d658c6","src/io.rs":"90b14300e9849a959d2b09de0bf467d51460822bee80d5054728b8018cf75ffb","src/length_delimited.rs":"16e282101a067350e7e4df0052fbcf58cda6b5665a83e6b7a7941eb2c0135894","src/lib.rs":"8e0dd19afd29c38efebd303e805cadce16b35c876a08f166c8527128c8f26c53","src/lines.rs":"9e3970714f4a6496fbfcfbf07ca24b8f7c7f5203a5778f037736df3bece1640c","src/read.rs":"345d8430416b55c506b213f717c86e531465159ce779bea654c605f5b6508f96","src/read_exact.rs":"79a39e865e271e31f789d94a282b76d58682dcf64b737e55a8d3952191df06bc","src/read_to_end.rs":"54e7696e3427351ec168d7d3dc820169a7acfdafe28106e14f2d18316b104671","src/read_until.rs":"ecc3d7e967fd3b92efa6dce43198d719c7628a01e70facfdfe0d2806d060eabe","src/shutdown.rs":"d708bcd6a54ecb871769c6c7e31db2c4dfdf40c7a76b855e0a45833c68114e77","src/split.rs":"00232b018571ecb8ebf0c5c927e78f3aee573632ec976f27d65c63fe0e50e89b","src/window.rs":"e6fca0e2e8d99c76b4fe3ab9956997c6fd8167005d4dec01cae0c7879203e3c5","src/write_all.rs":"269d4f744ce038a62cc6c49272af6035a98f37b041bda154a40b5144392b7891","tests/async_read.rs":"79aafcd01876c0eaa642950556482e00598e6bc8b5560a04d2df4b6aafd5a8c4","tests/framed.rs":"705eea7cab52b00ce680da96a5f329ecce1b0ac13dea06504ebed0188d0b97d8","tests/framed_read.rs":"5c3a0f82b31807a3a7994b6c9867d3dbad1b06ead4125cef2eec414b46c1783f","tests/framed_write.rs":"e6c85612726d69f48d95c15d30906e72b5d7a4e2cea9e874c65c38df3cea8691","tests/length_delimited.rs":"fc6a9ba729459bc7962ae8b1ad7c3d15df7f8d8941694d1a4ea965bedb61c1e0"},"package":"b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af"} \ No newline at end of file +{"files":{"CHANGELOG.md":"5bcf40c6c83c52c1ab442bf9309442702cbae868b9733391aba7f5f2c988ec49","Cargo.toml":"6fec48ce934abe8df85c718a283f3e78d603795c55cd70de8d87036449d4f9b8","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"7c3965288e43746860347174468533ce0216d34c69397c57121ab42bec33d55f","src/_tokio_codec/decoder.rs":"0243699818a7ee0ba82a5ec40ed87817a3d756f3cf253de6fd8367f7dc79d1ae","src/_tokio_codec/encoder.rs":"1283daf4e9c985dc859d0e2e3f99f8f76832543c636b0f87294d02f0133c7b59","src/_tokio_codec/framed.rs":"000c9186ba7bd44e309cd8483f0784909a347a2113b57e36ed38c0ec8b5defea","src/_tokio_codec/framed_read.rs":"bd4400baac4d25c79b7dea3a7150badd5fd8f15f325817ac97e27301ed06b1d2","src/_tokio_codec/framed_write.rs":"7cf52995633dc30840b571a99a28f258dfe8cca9b0de0a9833469bc164734d03","src/_tokio_codec/mod.rs":"495f7c486650ad57f974c409d21fd128b295d8bb9d78e5297add1b12c402c88d","src/allow_std.rs":"1a2a1a43f3ee56b2d71ad48872bb40c571e55855730eb2746c35e59a05d36a00","src/async_read.rs":"cb089df6d0a5a147b6e57d758298918a4a710c62f31107f12afa7a2133abb772","src/async_write.rs":"2d560cdf62f5b05c2db8cef068bd769d3db03647e6b98476a668ba62f2cd410b","src/codec/bytes_codec.rs":"73eaada67ddda5b321eb81d7da6952978601e91647a518ae4bd9d06b8d009102","src/codec/decoder.rs":"48cf4263b63ba5efe5411b865ef47616290a32f04d60cdf3cb156aeb51d2fb0e","src/codec/encoder.rs":"97c9d30d36fbd0c41c67176c2e8037a29cec08ace124e912ebcd2ee903558d66","src/codec/lines_codec.rs":"93e80d6d0de1804af688fcd386fdd3ed145c89c77d249986a2102e4b19a08bb7","src/codec/mod.rs":"94dbd50ff517b66722be6d1a370b31634f9171c62212cce4d7262613f938dba7","src/framed.rs":"656ad590abbf51925c2a6458a10ee9749ac3d9664befb308253ac1d8c2713720","src/framed_read.rs":"07e36ff58fe30bf7b0aa28a998b0bc4e2f78acd04cc1570a877b4eeac1cadcf7","src/framed_write.rs":"8bdd4a15d7f31c973bd466c48e127f56803e0b7f96a2e141ca5a9c3d6044f93a","src/io/copy.rs":"e0f6d14caaaaa4d6a5479db419fcc7162aa574cf1aadd21b01707fbfabbbe570","src/io/flush.rs":"b3b9cfe6eb0551407e93813379928f6d93be4854b58c2aa452f11327289725ad","src/io/mod.rs":"5fdd48ac7f178e0b48975a5b87dcdf5fbefdb50ec846036fcbfb4e423b913e49","src/io/read.rs":"7b5abc316490fdead3864d4cdb0dc02db9738a7ba7c3141ce4553f0a11cdd609","src/io/read_exact.rs":"71978b5124c52badfd28709ba2e779aed92800a77d1cc3cd1488f676f7cf868f","src/io/read_to_end.rs":"54e7696e3427351ec168d7d3dc820169a7acfdafe28106e14f2d18316b104671","src/io/read_until.rs":"ecc3d7e967fd3b92efa6dce43198d719c7628a01e70facfdfe0d2806d060eabe","src/io/shutdown.rs":"16eb91e5d480af92be4bccb01105ba98a0d1ca9bfaecf9fe0a23c9417eb19307","src/io/write_all.rs":"e112742d7f6d6869f9abe77a3515e3098ca60f567ba9bad02359b5bc754fc09e","src/length_delimited.rs":"28e2fbab258407cffa0fbec412c045b608293eb8e5430b236bb7385130381296","src/lib.rs":"a6f41a5359c3afc72d5d5e09fa5f0488c78436b2af227b2625f4e03e75967381","src/lines.rs":"9e3970714f4a6496fbfcfbf07ca24b8f7c7f5203a5778f037736df3bece1640c","src/split.rs":"b8b70987f7d4815e37c5130dec3678ccae3e981e9ad4bf84c58e2f81046dba28","src/window.rs":"e6fca0e2e8d99c76b4fe3ab9956997c6fd8167005d4dec01cae0c7879203e3c5","tests/async_read.rs":"79aafcd01876c0eaa642950556482e00598e6bc8b5560a04d2df4b6aafd5a8c4","tests/length_delimited.rs":"ea3ba492f0011f1f9cf96387b8a6e793e9183916e8dfa6aba660fafa59e96cab"},"package":"a5c9635ee806f26d302b8baa1e145689a280d8f5aa8d0552e7344808da54cc21"} \ No newline at end of file diff --git a/third_party/rust/tokio-io/.travis.yml b/third_party/rust/tokio-io/.travis.yml deleted file mode 100644 index 5da0a5f4b397..000000000000 --- a/third_party/rust/tokio-io/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: rust - -rust: - - stable - - beta - - nightly -sudo: false -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo build - - cargo test - - cargo test --no-default-features - - cargo doc --no-deps - -after_success: - - travis-cargo --only nightly doc-upload -env: - global: - - secure: "aDOBmkUbJR3kY1EDzchDhxdzV2HBt8yUHicNlWDZUh+JOmeV/2ezqPt4bre2fgludm98P7tmTG7GHbtnYtMaU4MTw8EarmEXR3tXqUQdDoWzSsMbUsJVZp3wRUWEF2UUZMv7u+xsvSDrIwF2ux6LcySEN6j2gmlobphmOod5NzhJp8d4ap7yLZ6UW4cnJ3m69HtD4yYa8wy3kGvfYOgcFBoB1HODSu2J9sFCzVrdxe9tm3aBvvl/dR0RAmRXyM7ZNE8Fv6aiISJ91M3EaulN1jzggdYEkN3bU0oxnzHvzrFDt1zmi30uR8jBYJmbBlSKYnhSoQqCKZMS7QEATqMDxGl1/M8QJPnaKg+Hu3w0i5yH5QInLW/8j+myzhMzLM8/IDrppZS4fuEb1XcJ/5m+ip3XjLSrXQzFRioA908NvcOUL5t71Yx1uey2kSccUOsGh3wETRbSWWs5SQPxt4BYP9jd8zpVZIInJRgztLFwqGcTDdDSrVHpKpzVNJSMmdgOG8lNubGjdwyrC8J2EyPlWa+QOyx7CoSoyygm4nV4a/UpGPeNgHHkbj/qrf3dhueypLnlj8nJyBk2Lzug8PVCszyCwfv2wXVJ9OCO40lp01XTvxT0cLzgWinn+TvmRn+Mhyt13u2urLjqfjKjA93v6OGZUqnvDG+2FiwGNP3GS+E=" - -notifications: - email: - on_success: never diff --git a/third_party/rust/tokio-io/CHANGELOG.md b/third_party/rust/tokio-io/CHANGELOG.md new file mode 100644 index 000000000000..012c96a9424e --- /dev/null +++ b/third_party/rust/tokio-io/CHANGELOG.md @@ -0,0 +1,40 @@ +# 0.1.7 (June 13, 2018) + +* Move `codec::{Encode, Decode, Framed*}` into `tokio-codec` (#353) + +# 0.1.6 (March 09, 2018) + +* Add native endian builder fn to length_delimited (#144) +* Add AsyncRead::poll_read, AsyncWrite::poll_write (#170) + +# 0.1.5 (February 07, 2018) + +* Fix bug in `BytesCodec` and `LinesCodec`. +* Performance improvement to `split`. + +# 0.1.4 (November 10, 2017) + +* Use `FrameTooBig` as length delimited error type (#70). +* Provide `Bytes` and `Lines` codecs (#78). +* Provide `AllowStdIo` wrapper (#76). + +# 0.1.3 (August 14, 2017) + +* Fix bug involving zero sized writes in copy helper (#57). +* Add get / set accessors for length delimited max frame length setting. (#65). +* Add `Framed::into_parts_and_codec` (#59). + +# 0.1.2 (May 23, 2017) + +* Add `from_parts` and `into_parts` to the framing combinators. +* Support passing an initialized buffer to the framing combinators. +* Add `length_adjustment` support to length delimited encoding (#48). + +# 0.1.1 (March 22, 2017) + +* Add some omitted `Self: Sized` bounds. +* Add missing "inner" fns. + +# 0.1.0 (March 15, 2017) + +* Initial release diff --git a/third_party/rust/tokio-io/Cargo.toml b/third_party/rust/tokio-io/Cargo.toml index c03487649d88..82954137ace4 100644 --- a/third_party/rust/tokio-io/Cargo.toml +++ b/third_party/rust/tokio-io/Cargo.toml @@ -12,19 +12,19 @@ [package] name = "tokio-io" -version = "0.1.3" -authors = ["Alex Crichton "] +version = "0.1.7" +authors = ["Carl Lerche "] description = "Core I/O primitives for asynchronous I/O in Rust.\n" homepage = "https://tokio.rs" documentation = "https://docs.rs/tokio-io/0.1" categories = ["asynchronous"] -license = "MIT/Apache-2.0" -repository = "https://github.com/tokio-rs/tokio-io" -[dependencies.log] -version = "0.3" - +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" [dependencies.bytes] -version = "0.4" +version = "0.4.7" [dependencies.futures] -version = "0.1.11" +version = "0.1.18" + +[dependencies.log] +version = "0.4" diff --git a/third_party/rust/tokio-io/LICENSE b/third_party/rust/tokio-io/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-io/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-io/README.md b/third_party/rust/tokio-io/README.md index cc1451924823..324e7e6c7b7e 100644 --- a/third_party/rust/tokio-io/README.md +++ b/third_party/rust/tokio-io/README.md @@ -26,11 +26,12 @@ online at [https://tokio.rs](https://tokio.rs). The [API documentation](https://docs.rs/tokio-io) is also a great place to get started for the nitty-gritty. +## License -# License +This project is licensed under the [MIT license](LICENSE). -`tokio-io` is primarily distributed under the terms of both the MIT license -and the Apache License (Version 2.0), with portions covered by various BSD-like -licenses. +### Contribution -See LICENSE-APACHE, and LICENSE-MIT for details. +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-io/src/_tokio_codec/decoder.rs b/third_party/rust/tokio-io/src/_tokio_codec/decoder.rs new file mode 100644 index 000000000000..9c9fbacb45fd --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/decoder.rs @@ -0,0 +1,3 @@ +// For now, we need to keep the implmentation of Encoder in tokio_io. + +pub use codec::Decoder; diff --git a/third_party/rust/tokio-io/src/_tokio_codec/encoder.rs b/third_party/rust/tokio-io/src/_tokio_codec/encoder.rs new file mode 100644 index 000000000000..9cbe054d74ed --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/encoder.rs @@ -0,0 +1,3 @@ +// For now, we need to keep the implmentation of Encoder in tokio_io. + +pub use codec::Encoder; diff --git a/third_party/rust/tokio-io/src/_tokio_codec/framed.rs b/third_party/rust/tokio-io/src/_tokio_codec/framed.rs new file mode 100644 index 000000000000..666bf9471ccc --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/framed.rs @@ -0,0 +1,262 @@ +#![allow(deprecated)] + +use std::io::{self, Read, Write}; +use std::fmt; + +use {AsyncRead, AsyncWrite}; +use codec::{Decoder, Encoder}; +use super::framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2}; +use super::framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2}; + +use futures::{Stream, Sink, StartSend, Poll}; +use bytes::{BytesMut}; + +/// A unified `Stream` and `Sink` interface to an underlying I/O object, using +/// the `Encoder` and `Decoder` traits to encode and decode frames. +/// +/// You can create a `Framed` instance by using the `AsyncRead::framed` adapter. +pub struct Framed { + inner: FramedRead2>>, +} + +pub struct Fuse(pub T, pub U); + +impl Framed +where T: AsyncRead + AsyncWrite, + U: Decoder + Encoder, +{ + /// Provides a `Stream` and `Sink` interface for reading and writing to this + /// `Io` object, using `Decode` and `Encode` to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the `Codec` + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both `Stream` and + /// `Sink`; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling `split` on the `Framed` returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + pub fn new(inner: T, codec: U) -> Framed { + Framed { + inner: framed_read2(framed_write2(Fuse(inner, codec))), + } + } +} + +impl Framed { + /// Provides a `Stream` and `Sink` interface for reading and writing to this + /// `Io` object, using `Decode` and `Encode` to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the `Codec` + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both `Stream` and + /// `Sink`; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// This objects takes a stream and a readbuffer and a writebuffer. These field + /// can be obtained from an existing `Framed` with the `into_parts` method. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling `split` on the `Framed` returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + pub fn from_parts(parts: FramedParts) -> Framed + { + Framed { + inner: framed_read2_with_buffer(framed_write2_with_buffer(Fuse(parts.io, parts.codec), parts.write_buf), parts.read_buf), + } + } + + /// Returns a reference to the underlying I/O stream wrapped by + /// `Frame`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_ref(&self) -> &T { + &self.inner.get_ref().get_ref().0 + } + + /// Returns a mutable reference to the underlying I/O stream wrapped by + /// `Frame`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner.get_mut().get_mut().0 + } + + /// Consumes the `Frame`, returning its underlying I/O stream. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn into_inner(self) -> T { + self.inner.into_inner().into_inner().0 + } + + /// Consumes the `Frame`, returning its underlying I/O stream, the buffer + /// with unprocessed data, and the codec. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn into_parts(self) -> FramedParts { + let (inner, read_buf) = self.inner.into_parts(); + let (inner, write_buf) = inner.into_parts(); + + FramedParts { + io: inner.0, + codec: inner.1, + read_buf: read_buf, + write_buf: write_buf, + _priv: (), + } + } +} + +impl Stream for Framed + where T: AsyncRead, + U: Decoder, +{ + type Item = U::Item; + type Error = U::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.poll() + } +} + +impl Sink for Framed + where T: AsyncWrite, + U: Encoder, + U::Error: From, +{ + type SinkItem = U::Item; + type SinkError = U::Error; + + fn start_send(&mut self, + item: Self::SinkItem) + -> StartSend + { + self.inner.get_mut().start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.inner.get_mut().poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.inner.get_mut().close() + } +} + +impl fmt::Debug for Framed + where T: fmt::Debug, + U: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Framed") + .field("io", &self.inner.get_ref().get_ref().0) + .field("codec", &self.inner.get_ref().get_ref().1) + .finish() + } +} + +// ===== impl Fuse ===== + +impl Read for Fuse { + fn read(&mut self, dst: &mut [u8]) -> io::Result { + self.0.read(dst) + } +} + +impl AsyncRead for Fuse { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.0.prepare_uninitialized_buffer(buf) + } +} + +impl Write for Fuse { + fn write(&mut self, src: &[u8]) -> io::Result { + self.0.write(src) + } + + fn flush(&mut self) -> io::Result<()> { + self.0.flush() + } +} + +impl AsyncWrite for Fuse { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.0.shutdown() + } +} + +impl Decoder for Fuse { + type Item = U::Item; + type Error = U::Error; + + fn decode(&mut self, buffer: &mut BytesMut) -> Result, Self::Error> { + self.1.decode(buffer) + } + + fn decode_eof(&mut self, buffer: &mut BytesMut) -> Result, Self::Error> { + self.1.decode_eof(buffer) + } +} + +impl Encoder for Fuse { + type Item = U::Item; + type Error = U::Error; + + fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + self.1.encode(item, dst) + } +} + +/// `FramedParts` contains an export of the data of a Framed transport. +/// It can be used to construct a new `Framed` with a different codec. +/// It contains all current buffers and the inner transport. +#[derive(Debug)] +pub struct FramedParts { + /// The inner transport used to read bytes to and write bytes to + pub io: T, + + /// The codec + pub codec: U, + + /// The buffer with read but unprocessed data. + pub read_buf: BytesMut, + + /// A buffer with unprocessed data which are not written yet. + pub write_buf: BytesMut, + + /// This private field allows us to add additional fields in the future in a + /// backwards compatible way. + _priv: (), +} + +impl FramedParts { + /// Create a new, default, `FramedParts` + pub fn new(io: T, codec: U) -> FramedParts { + FramedParts { + io, + codec, + read_buf: BytesMut::new(), + write_buf: BytesMut::new(), + _priv: (), + } + } +} diff --git a/third_party/rust/tokio-io/src/_tokio_codec/framed_read.rs b/third_party/rust/tokio-io/src/_tokio_codec/framed_read.rs new file mode 100644 index 000000000000..279b1a3bc7d3 --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/framed_read.rs @@ -0,0 +1,214 @@ +#![allow(deprecated)] + +use std::fmt; + +use AsyncRead; +use codec::Decoder; +use super::framed::Fuse; + +use futures::{Async, Poll, Stream, Sink, StartSend}; +use bytes::BytesMut; + +/// A `Stream` of messages decoded from an `AsyncRead`. +pub struct FramedRead { + inner: FramedRead2>, +} + +pub struct FramedRead2 { + inner: T, + eof: bool, + is_readable: bool, + buffer: BytesMut, +} + +const INITIAL_CAPACITY: usize = 8 * 1024; + +// ===== impl FramedRead ===== + +impl FramedRead + where T: AsyncRead, + D: Decoder, +{ + /// Creates a new `FramedRead` with the given `decoder`. + pub fn new(inner: T, decoder: D) -> FramedRead { + FramedRead { + inner: framed_read2(Fuse(inner, decoder)), + } + } +} + +impl FramedRead { + /// Returns a reference to the underlying I/O stream wrapped by + /// `FramedRead`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_ref(&self) -> &T { + &self.inner.inner.0 + } + + /// Returns a mutable reference to the underlying I/O stream wrapped by + /// `FramedRead`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner.inner.0 + } + + /// Consumes the `FramedRead`, returning its underlying I/O stream. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn into_inner(self) -> T { + self.inner.inner.0 + } + + /// Returns a reference to the underlying decoder. + pub fn decoder(&self) -> &D { + &self.inner.inner.1 + } + + /// Returns a mutable reference to the underlying decoder. + pub fn decoder_mut(&mut self) -> &mut D { + &mut self.inner.inner.1 + } +} + +impl Stream for FramedRead + where T: AsyncRead, + D: Decoder, +{ + type Item = D::Item; + type Error = D::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.poll() + } +} + +impl Sink for FramedRead + where T: Sink, +{ + type SinkItem = T::SinkItem; + type SinkError = T::SinkError; + + fn start_send(&mut self, + item: Self::SinkItem) + -> StartSend + { + self.inner.inner.0.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.inner.inner.0.poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + self.inner.inner.0.close() + } +} + +impl fmt::Debug for FramedRead + where T: fmt::Debug, + D: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FramedRead") + .field("inner", &self.inner.inner.0) + .field("decoder", &self.inner.inner.1) + .field("eof", &self.inner.eof) + .field("is_readable", &self.inner.is_readable) + .field("buffer", &self.inner.buffer) + .finish() + } +} + +// ===== impl FramedRead2 ===== + +pub fn framed_read2(inner: T) -> FramedRead2 { + FramedRead2 { + inner: inner, + eof: false, + is_readable: false, + buffer: BytesMut::with_capacity(INITIAL_CAPACITY), + } +} + +pub fn framed_read2_with_buffer(inner: T, mut buf: BytesMut) -> FramedRead2 { + if buf.capacity() < INITIAL_CAPACITY { + let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity(); + buf.reserve(bytes_to_reserve); + } + FramedRead2 { + inner: inner, + eof: false, + is_readable: buf.len() > 0, + buffer: buf, + } +} + +impl FramedRead2 { + pub fn get_ref(&self) -> &T { + &self.inner + } + + pub fn into_inner(self) -> T { + self.inner + } + + pub fn into_parts(self) -> (T, BytesMut) { + (self.inner, self.buffer) + } + + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + +impl Stream for FramedRead2 + where T: AsyncRead + Decoder, +{ + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + loop { + // Repeatedly call `decode` or `decode_eof` as long as it is + // "readable". Readable is defined as not having returned `None`. If + // the upstream has returned EOF, and the decoder is no longer + // readable, it can be assumed that the decoder will never become + // readable again, at which point the stream is terminated. + if self.is_readable { + if self.eof { + let frame = try!(self.inner.decode_eof(&mut self.buffer)); + return Ok(Async::Ready(frame)); + } + + trace!("attempting to decode a frame"); + + if let Some(frame) = try!(self.inner.decode(&mut self.buffer)) { + trace!("frame decoded from buffer"); + return Ok(Async::Ready(Some(frame))); + } + + self.is_readable = false; + } + + assert!(!self.eof); + + // Otherwise, try to read more data and try again. Make sure we've + // got room for at least one byte to read to ensure that we don't + // get a spurious 0 that looks like EOF + self.buffer.reserve(1); + if 0 == try_ready!(self.inner.read_buf(&mut self.buffer)) { + self.eof = true; + } + + self.is_readable = true; + } + } +} diff --git a/third_party/rust/tokio-io/src/_tokio_codec/framed_write.rs b/third_party/rust/tokio-io/src/_tokio_codec/framed_write.rs new file mode 100644 index 000000000000..9a01e91393e2 --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/framed_write.rs @@ -0,0 +1,237 @@ +#![allow(deprecated)] + +use std::io::{self, Read}; +use std::fmt; + +use {AsyncRead, AsyncWrite}; +use codec::{Decoder, Encoder}; +use super::framed::Fuse; + +use futures::{Async, AsyncSink, Poll, Stream, Sink, StartSend}; +use bytes::BytesMut; + +/// A `Sink` of frames encoded to an `AsyncWrite`. +pub struct FramedWrite { + inner: FramedWrite2>, +} + +pub struct FramedWrite2 { + inner: T, + buffer: BytesMut, +} + +const INITIAL_CAPACITY: usize = 8 * 1024; +const BACKPRESSURE_BOUNDARY: usize = INITIAL_CAPACITY; + +impl FramedWrite + where T: AsyncWrite, + E: Encoder, +{ + /// Creates a new `FramedWrite` with the given `encoder`. + pub fn new(inner: T, encoder: E) -> FramedWrite { + FramedWrite { + inner: framed_write2(Fuse(inner, encoder)), + } + } +} + +impl FramedWrite { + /// Returns a reference to the underlying I/O stream wrapped by + /// `FramedWrite`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_ref(&self) -> &T { + &self.inner.inner.0 + } + + /// Returns a mutable reference to the underlying I/O stream wrapped by + /// `FramedWrite`. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner.inner.0 + } + + /// Consumes the `FramedWrite`, returning its underlying I/O stream. + /// + /// Note that care should be taken to not tamper with the underlying stream + /// of data coming in as it may corrupt the stream of frames otherwise + /// being worked with. + pub fn into_inner(self) -> T { + self.inner.inner.0 + } + + /// Returns a reference to the underlying decoder. + pub fn encoder(&self) -> &E { + &self.inner.inner.1 + } + + /// Returns a mutable reference to the underlying decoder. + pub fn encoder_mut(&mut self) -> &mut E { + &mut self.inner.inner.1 + } +} + +impl Sink for FramedWrite + where T: AsyncWrite, + E: Encoder, +{ + type SinkItem = E::Item; + type SinkError = E::Error; + + fn start_send(&mut self, item: E::Item) -> StartSend { + self.inner.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + self.inner.poll_complete() + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + Ok(try!(self.inner.close())) + } +} + +impl Stream for FramedWrite + where T: Stream, +{ + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.inner.inner.0.poll() + } +} + +impl fmt::Debug for FramedWrite + where T: fmt::Debug, + U: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FramedWrite") + .field("inner", &self.inner.get_ref().0) + .field("encoder", &self.inner.get_ref().1) + .field("buffer", &self.inner.buffer) + .finish() + } +} + +// ===== impl FramedWrite2 ===== + +pub fn framed_write2(inner: T) -> FramedWrite2 { + FramedWrite2 { + inner: inner, + buffer: BytesMut::with_capacity(INITIAL_CAPACITY), + } +} + +pub fn framed_write2_with_buffer(inner: T, mut buf: BytesMut) -> FramedWrite2 { + if buf.capacity() < INITIAL_CAPACITY { + let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity(); + buf.reserve(bytes_to_reserve); + } + FramedWrite2 { + inner: inner, + buffer: buf, + } +} + +impl FramedWrite2 { + pub fn get_ref(&self) -> &T { + &self.inner + } + + pub fn into_inner(self) -> T { + self.inner + } + + pub fn into_parts(self) -> (T, BytesMut) { + (self.inner, self.buffer) + } + + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + +impl Sink for FramedWrite2 + where T: AsyncWrite + Encoder, +{ + type SinkItem = T::Item; + type SinkError = T::Error; + + fn start_send(&mut self, item: T::Item) -> StartSend { + // If the buffer is already over 8KiB, then attempt to flush it. If after flushing it's + // *still* over 8KiB, then apply backpressure (reject the send). + if self.buffer.len() >= BACKPRESSURE_BOUNDARY { + try!(self.poll_complete()); + + if self.buffer.len() >= BACKPRESSURE_BOUNDARY { + return Ok(AsyncSink::NotReady(item)); + } + } + + try!(self.inner.encode(item, &mut self.buffer)); + + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { + trace!("flushing framed transport"); + + while !self.buffer.is_empty() { + trace!("writing; remaining={}", self.buffer.len()); + + let n = try_ready!(self.inner.poll_write(&self.buffer)); + + if n == 0 { + return Err(io::Error::new(io::ErrorKind::WriteZero, "failed to + write frame to transport").into()); + } + + // TODO: Add a way to `bytes` to do this w/o returning the drained + // data. + let _ = self.buffer.split_to(n); + } + + // Try flushing the underlying IO + try_ready!(self.inner.poll_flush()); + + trace!("framed transport flushed"); + return Ok(Async::Ready(())); + } + + fn close(&mut self) -> Poll<(), Self::SinkError> { + try_ready!(self.poll_complete()); + Ok(try!(self.inner.shutdown())) + } +} + +impl Decoder for FramedWrite2 { + type Item = T::Item; + type Error = T::Error; + + fn decode(&mut self, src: &mut BytesMut) -> Result, T::Error> { + self.inner.decode(src) + } + + fn decode_eof(&mut self, src: &mut BytesMut) -> Result, T::Error> { + self.inner.decode_eof(src) + } +} + +impl Read for FramedWrite2 { + fn read(&mut self, dst: &mut [u8]) -> io::Result { + self.inner.read(dst) + } +} + +impl AsyncRead for FramedWrite2 { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } +} diff --git a/third_party/rust/tokio-io/src/_tokio_codec/mod.rs b/third_party/rust/tokio-io/src/_tokio_codec/mod.rs new file mode 100644 index 000000000000..1c703f3a40d4 --- /dev/null +++ b/third_party/rust/tokio-io/src/_tokio_codec/mod.rs @@ -0,0 +1,36 @@ +//! Utilities for encoding and decoding frames. +//! +//! Contains adapters to go from streams of bytes, [`AsyncRead`] and +//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. +//! Framed streams are also known as [transports]. +//! +//! [`AsyncRead`]: # +//! [`AsyncWrite`]: # +//! [`Sink`]: # +//! [`Stream`]: # +//! [transports]: # + +#![deny(missing_docs, missing_debug_implementations, warnings)] +#![doc(hidden, html_root_url = "https://docs.rs/tokio-codec/0.1.0")] + +// _tokio_codec are the items that belong in the `tokio_codec` crate. However, because we need to +// maintain backward compatibility until the next major breaking change, they are defined here. +// When the next breaking change comes, they should be moved to the `tokio_codec` crate and become +// independent. +// +// The primary reason we can't move these to `tokio-codec` now is because, again for backward +// compatibility reasons, we need to keep `Decoder` and `Encoder` in tokio_io::codec. And `Decoder` +// and `Encoder` needs to reference `Framed`. So they all still need to still be in the same +// module. + +mod decoder; +mod encoder; +mod framed; +mod framed_read; +mod framed_write; + +pub use self::decoder::Decoder; +pub use self::encoder::Encoder; +pub use self::framed::{Framed, FramedParts}; +pub use self::framed_read::FramedRead; +pub use self::framed_write::FramedWrite; diff --git a/third_party/rust/tokio-io/src/allow_std.rs b/third_party/rust/tokio-io/src/allow_std.rs new file mode 100644 index 000000000000..46b0376b5873 --- /dev/null +++ b/third_party/rust/tokio-io/src/allow_std.rs @@ -0,0 +1,81 @@ +use {AsyncRead, AsyncWrite}; +use futures::{Async, Poll}; +use std::{fmt, io}; + +/// A simple wrapper type which allows types that only implement +/// `std::io::Read` or `std::io::Write` to be used in contexts which expect +/// an `AsyncRead` or `AsyncWrite`. +/// +/// If these types issue an error with the kind `io::ErrorKind::WouldBlock`, +/// it is expected that they will notify the current task on readiness. +/// Synchronous `std` types should not issue errors of this kind and +/// are safe to use in this context. However, using these types with +/// `AllowStdIo` will cause the event loop to block, so they should be used +/// with care. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct AllowStdIo(T); + +impl AllowStdIo { + /// Creates a new `AllowStdIo` from an existing IO object. + pub fn new(io: T) -> Self { + AllowStdIo(io) + } + + /// Returns a reference to the contained IO object. + pub fn get_ref(&self) -> &T { + &self.0 + } + + /// Returns a mutable reference to the contained IO object. + pub fn get_mut(&mut self) -> &mut T { + &mut self.0 + } + + /// Consumes self and returns the contained IO object. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl io::Write for AllowStdIo where T: io::Write { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.write(buf) + } + fn flush(&mut self) -> io::Result<()> { + self.0.flush() + } + fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { + self.0.write_all(buf) + } + fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> { + self.0.write_fmt(fmt) + } +} + +impl AsyncWrite for AllowStdIo where T: io::Write { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(Async::Ready(())) + } +} + +impl io::Read for AllowStdIo where T: io::Read { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) + } + // TODO: implement the `initializer` fn when it stabilizes. + // See rust-lang/rust #42788 + fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { + self.0.read_to_end(buf) + } + fn read_to_string(&mut self, buf: &mut String) -> io::Result { + self.0.read_to_string(buf) + } + fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { + self.0.read_exact(buf) + } +} + +impl AsyncRead for AllowStdIo where T: io::Read { + // TODO: override prepare_uninitialized_buffer once `Read::initializer` is stable. + // See rust-lang/rust #42788 +} diff --git a/third_party/rust/tokio-io/src/async_read.rs b/third_party/rust/tokio-io/src/async_read.rs new file mode 100644 index 000000000000..3c2c3bab092d --- /dev/null +++ b/third_party/rust/tokio-io/src/async_read.rs @@ -0,0 +1,168 @@ +use std::io as std_io; +use bytes::BufMut; +use futures::{Async, Poll}; + +use {framed, split, AsyncWrite}; +#[allow(deprecated)] +use codec::{Decoder, Encoder, Framed}; +use split::{ReadHalf, WriteHalf}; + +/// Read bytes asynchronously. +/// +/// This trait inherits from `std::io::Read` and indicates that an I/O object is +/// **non-blocking**. All non-blocking I/O objects must return an error when +/// bytes are unavailable instead of blocking the current thread. +/// +/// Specifically, this means that the `read` function will return one of the +/// following: +/// +/// * `Ok(n)` means that `n` bytes of data was immediately read and placed into +/// the output buffer, where `n` == 0 implies that EOF has been reached. +/// +/// * `Err(e) if e.kind() == ErrorKind::WouldBlock` means that no data was read +/// into the buffer provided. The I/O object is not currently readable but may +/// become readable in the future. Most importantly, **the current future's +/// task is scheduled to get unparked when the object is readable**. This +/// means that like `Future::poll` you'll receive a notification when the I/O +/// object is readable again. +/// +/// * `Err(e)` for other errors are standard I/O errors coming from the +/// underlying object. +/// +/// This trait importantly means that the `read` method only works in the +/// context of a future's task. The object may panic if used outside of a task. +pub trait AsyncRead: std_io::Read { + /// Prepares an uninitialized buffer to be safe to pass to `read`. Returns + /// `true` if the supplied buffer was zeroed out. + /// + /// While it would be highly unusual, implementations of [`io::Read`] are + /// able to read data from the buffer passed as an argument. Because of + /// this, the buffer passed to [`io::Read`] must be initialized memory. In + /// situations where large numbers of buffers are used, constantly having to + /// zero out buffers can be expensive. + /// + /// This function does any necessary work to prepare an uninitialized buffer + /// to be safe to pass to `read`. If `read` guarantees to never attempt read + /// data out of the supplied buffer, then `prepare_uninitialized_buffer` + /// doesn't need to do any work. + /// + /// If this function returns `true`, then the memory has been zeroed out. + /// This allows implementations of `AsyncRead` which are composed of + /// multiple sub implementations to efficiently implement + /// `prepare_uninitialized_buffer`. + /// + /// This function isn't actually `unsafe` to call but `unsafe` to implement. + /// The implementor must ensure that either the whole `buf` has been zeroed + /// or `read_buf()` overwrites the buffer without reading it and returns + /// correct value. + /// + /// This function is called from [`read_buf`]. + /// + /// [`io::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html + /// [`read_buf`]: #method.read_buf + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + for i in 0..buf.len() { + buf[i] = 0; + } + + true + } + + /// Attempt to read from the `AsyncRead` into `buf`. + /// + /// On success, returns `Ok(Async::Ready(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Ok(Async::Pending)` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object becomes + /// readable or is closed. + fn poll_read(&mut self, buf: &mut [u8]) -> Poll { + match self.read(buf) { + Ok(t) => Ok(Async::Ready(t)), + Err(ref e) if e.kind() == std_io::ErrorKind::WouldBlock => { + return Ok(Async::NotReady) + } + Err(e) => return Err(e.into()), + } + } + + /// Pull some bytes from this source into the specified `Buf`, returning + /// how many bytes were read. + /// + /// The `buf` provided will have bytes read into it and the internal cursor + /// will be advanced if any bytes were read. Note that this method typically + /// will not reallocate the buffer provided. + fn read_buf(&mut self, buf: &mut B) -> Poll + where Self: Sized, + { + if !buf.has_remaining_mut() { + return Ok(Async::Ready(0)); + } + + unsafe { + let n = { + let b = buf.bytes_mut(); + + self.prepare_uninitialized_buffer(b); + + try_ready!(self.poll_read(b)) + }; + + buf.advance_mut(n); + Ok(Async::Ready(n)) + } + } + + /// Provides a `Stream` and `Sink` interface for reading and writing to this + /// `Io` object, using `Decode` and `Encode` to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the `Codec` + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both `Stream` and + /// `Sink`; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling `split` on the `Framed` returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + #[deprecated(since = "0.1.7", note = "Use tokio_codec::Decoder::framed instead")] + #[allow(deprecated)] + fn framed(self, codec: T) -> Framed + where Self: AsyncWrite + Sized, + { + framed::framed(self, codec) + } + + /// Helper method for splitting this read/write object into two halves. + /// + /// The two halves returned implement the `Read` and `Write` traits, + /// respectively. + fn split(self) -> (ReadHalf, WriteHalf) + where Self: AsyncWrite + Sized, + { + split::split(self) + } +} + +impl AsyncRead for Box { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + (**self).prepare_uninitialized_buffer(buf) + } +} + +impl<'a, T: ?Sized + AsyncRead> AsyncRead for &'a mut T { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + (**self).prepare_uninitialized_buffer(buf) + } +} + +impl<'a> AsyncRead for &'a [u8] { + unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [u8]) -> bool { + false + } +} diff --git a/third_party/rust/tokio-io/src/async_write.rs b/third_party/rust/tokio-io/src/async_write.rs new file mode 100644 index 000000000000..514a8ec15520 --- /dev/null +++ b/third_party/rust/tokio-io/src/async_write.rs @@ -0,0 +1,219 @@ +use std::io as std_io; +use bytes::Buf; +use futures::{Async, Poll}; + +use AsyncRead; + +/// Writes bytes asynchronously. +/// +/// The trait inherits from `std::io::Write` and indicates that an I/O object is +/// **nonblocking**. All non-blocking I/O objects must return an error when +/// bytes cannot be written instead of blocking the current thread. +/// +/// Specifically, this means that the `write` function will return one of the +/// following: +/// +/// * `Ok(n)` means that `n` bytes of data was immediately written . +/// +/// * `Err(e) if e.kind() == ErrorKind::WouldBlock` means that no data was +/// written from the buffer provided. The I/O object is not currently +/// writable but may become writable in the future. Most importantly, **the +/// current future's task is scheduled to get unparked when the object is +/// readable**. This means that like `Future::poll` you'll receive a +/// notification when the I/O object is writable again. +/// +/// * `Err(e)` for other errors are standard I/O errors coming from the +/// underlying object. +/// +/// This trait importantly means that the `write` method only works in the +/// context of a future's task. The object may panic if used outside of a task. +/// +/// Note that this trait also represents that the `Write::flush` method works +/// very similarly to the `write` method, notably that `Ok(())` means that the +/// writer has successfully been flushed, a "would block" error means that the +/// current task is ready to receive a notification when flushing can make more +/// progress, and otherwise normal errors can happen as well. +pub trait AsyncWrite: std_io::Write { + /// Attempt to write bytes from `buf` into the object. + /// + /// On success, returns `Ok(Async::Ready(num_bytes_written))`. + /// + /// If the object is not ready for writing, the method returns + /// `Ok(Async::Pending)` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object becomes + /// readable or is closed. + fn poll_write(&mut self, buf: &[u8]) -> Poll { + match self.write(buf) { + Ok(t) => Ok(Async::Ready(t)), + Err(ref e) if e.kind() == std_io::ErrorKind::WouldBlock => { + return Ok(Async::NotReady) + } + Err(e) => return Err(e.into()), + } + } + + /// Attempt to flush the object, ensuring that any buffered data reach + /// their destination. + /// + /// On success, returns `Ok(Async::Ready(()))`. + /// + /// If flushing cannot immediately complete, this method returns + /// `Ok(Async::Pending)` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object can make + /// progress towards flushing. + fn poll_flush(&mut self) -> Poll<(), std_io::Error> { + match self.flush() { + Ok(t) => Ok(Async::Ready(t)), + Err(ref e) if e.kind() == std_io::ErrorKind::WouldBlock => { + return Ok(Async::NotReady) + } + Err(e) => return Err(e.into()), + } + } + + /// Initiates or attempts to shut down this writer, returning success when + /// the I/O connection has completely shut down. + /// + /// This method is intended to be used for asynchronous shutdown of I/O + /// connections. For example this is suitable for implementing shutdown of a + /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. + /// Protocols sometimes need to flush out final pieces of data or otherwise + /// perform a graceful shutdown handshake, reading/writing more data as + /// appropriate. This method is the hook for such protocols to implement the + /// graceful shutdown logic. + /// + /// This `shutdown` method is required by implementors of the + /// `AsyncWrite` trait. Wrappers typically just want to proxy this call + /// through to the wrapped type, and base types will typically implement + /// shutdown logic here or just return `Ok(().into())`. Note that if you're + /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that + /// transitively the entire stream has been shut down. After your wrapper's + /// shutdown logic has been executed you should shut down the underlying + /// stream. + /// + /// Invocation of a `shutdown` implies an invocation of `flush`. Once this + /// method returns `Ready` it implies that a flush successfully happened + /// before the shutdown happened. That is, callers don't need to call + /// `flush` before calling `shutdown`. They can rely that by calling + /// `shutdown` any pending buffered data will be written out. + /// + /// # Return value + /// + /// This function returns a `Poll<(), io::Error>` classified as such: + /// + /// * `Ok(Async::Ready(()))` - indicates that the connection was + /// successfully shut down and is now safe to deallocate/drop/close + /// resources associated with it. This method means that the current task + /// will no longer receive any notifications due to this method and the + /// I/O object itself is likely no longer usable. + /// + /// * `Ok(Async::NotReady)` - indicates that shutdown is initiated but could + /// not complete just yet. This may mean that more I/O needs to happen to + /// continue this shutdown operation. The current task is scheduled to + /// receive a notification when it's otherwise ready to continue the + /// shutdown operation. When woken up this method should be called again. + /// + /// * `Err(e)` - indicates a fatal error has happened with shutdown, + /// indicating that the shutdown operation did not complete successfully. + /// This typically means that the I/O object is no longer usable. + /// + /// # Errors + /// + /// This function can return normal I/O errors through `Err`, described + /// above. Additionally this method may also render the underlying + /// `Write::write` method no longer usable (e.g. will return errors in the + /// future). It's recommended that once `shutdown` is called the + /// `write` method is no longer called. + /// + /// # Panics + /// + /// This function will panic if not called within the context of a future's + /// task. + fn shutdown(&mut self) -> Poll<(), std_io::Error>; + + /// Write a `Buf` into this value, returning how many bytes were written. + /// + /// Note that this method will advance the `buf` provided automatically by + /// the number of bytes written. + fn write_buf(&mut self, buf: &mut B) -> Poll + where Self: Sized, + { + if !buf.has_remaining() { + return Ok(Async::Ready(0)); + } + + let n = try_ready!(self.poll_write(buf.bytes())); + buf.advance(n); + Ok(Async::Ready(n)) + } +} + +impl AsyncWrite for Box { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + (**self).shutdown() + } +} +impl<'a, T: ?Sized + AsyncWrite> AsyncWrite for &'a mut T { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + (**self).shutdown() + } +} + +impl AsyncRead for std_io::Repeat { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { + false + } +} + +impl AsyncWrite for std_io::Sink { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + Ok(().into()) + } +} + +// TODO: Implement `prepare_uninitialized_buffer` for `io::Take`. +// This is blocked on rust-lang/rust#27269 +impl AsyncRead for std_io::Take { +} + +// TODO: Implement `prepare_uninitialized_buffer` when upstream exposes inner +// parts +impl AsyncRead for std_io::Chain + where T: AsyncRead, + U: AsyncRead, +{ +} + +impl AsyncWrite for std_io::BufWriter { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + try_ready!(self.poll_flush()); + self.get_mut().shutdown() + } +} + +impl AsyncRead for std_io::BufReader { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.get_ref().prepare_uninitialized_buffer(buf) + } +} + +impl> AsyncRead for std_io::Cursor { +} + +impl<'a> AsyncWrite for std_io::Cursor<&'a mut [u8]> { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + Ok(().into()) + } +} + +impl AsyncWrite for std_io::Cursor> { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + Ok(().into()) + } +} + +impl AsyncWrite for std_io::Cursor> { + fn shutdown(&mut self) -> Poll<(), std_io::Error> { + Ok(().into()) + } +} diff --git a/third_party/rust/tokio-io/src/codec/bytes_codec.rs b/third_party/rust/tokio-io/src/codec/bytes_codec.rs new file mode 100644 index 000000000000..c77f5ca530c2 --- /dev/null +++ b/third_party/rust/tokio-io/src/codec/bytes_codec.rs @@ -0,0 +1,40 @@ +#![allow(deprecated)] + +use bytes::{Bytes, BufMut, BytesMut}; +use codec::{Encoder, Decoder}; +use std::io; + +/// A simple `Codec` implementation that just ships bytes around. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +pub struct BytesCodec(()); + +impl BytesCodec { + /// Creates a new `BytesCodec` for shipping around raw bytes. + pub fn new() -> BytesCodec { BytesCodec(()) } +} + +impl Decoder for BytesCodec { + type Item = BytesMut; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } +} + +impl Encoder for BytesCodec { + type Item = Bytes; + type Error = io::Error; + + fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} diff --git a/third_party/rust/tokio-io/src/codec/decoder.rs b/third_party/rust/tokio-io/src/codec/decoder.rs new file mode 100644 index 000000000000..9797fb0d0323 --- /dev/null +++ b/third_party/rust/tokio-io/src/codec/decoder.rs @@ -0,0 +1,117 @@ +use std::io; +use bytes::BytesMut; + +use {AsyncWrite, AsyncRead}; +use super::encoder::Encoder; + +use ::_tokio_codec::Framed; + +/// Decoding of frames via buffers. +/// +/// This trait is used when constructing an instance of `Framed` or +/// `FramedRead`. An implementation of `Decoder` takes a byte stream that has +/// already been buffered in `src` and decodes the data into a stream of +/// `Self::Item` frames. +/// +/// Implementations are able to track state on `self`, which enables +/// implementing stateful streaming parsers. In many cases, though, this type +/// will simply be a unit struct (e.g. `struct HttpDecoder`). + +// Note: We can't deprecate this trait, because the deprecation carries through to tokio-codec, and +// there doesn't seem to be a way to un-deprecate the re-export. +pub trait Decoder { + /// The type of decoded frames. + type Item; + + /// The type of unrecoverable frame decoding errors. + /// + /// If an individual message is ill-formed but can be ignored without + /// interfering with the processing of future messages, it may be more + /// useful to report the failure as an `Item`. + /// + /// `From` is required in the interest of making `Error` suitable + /// for returning directly from a `FramedRead`, and to enable the default + /// implementation of `decode_eof` to yield an `io::Error` when the decoder + /// fails to consume all available data. + /// + /// Note that implementors of this trait can simply indicate `type Error = + /// io::Error` to use I/O errors as this type. + type Error: From; + + /// Attempts to decode a frame from the provided buffer of bytes. + /// + /// This method is called by `FramedRead` whenever bytes are ready to be + /// parsed. The provided buffer of bytes is what's been read so far, and + /// this instance of `Decode` can determine whether an entire frame is in + /// the buffer and is ready to be returned. + /// + /// If an entire frame is available, then this instance will remove those + /// bytes from the buffer provided and return them as a decoded + /// frame. Note that removing bytes from the provided buffer doesn't always + /// necessarily copy the bytes, so this should be an efficient operation in + /// most circumstances. + /// + /// If the bytes look valid, but a frame isn't fully available yet, then + /// `Ok(None)` is returned. This indicates to the `Framed` instance that + /// it needs to read some more bytes before calling this method again. + /// + /// Note that the bytes provided may be empty. If a previous call to + /// `decode` consumed all the bytes in the buffer then `decode` will be + /// called again until it returns `None`, indicating that more bytes need to + /// be read. + /// + /// Finally, if the bytes in the buffer are malformed then an error is + /// returned indicating why. This informs `Framed` that the stream is now + /// corrupt and should be terminated. + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error>; + + /// A default method available to be called when there are no more bytes + /// available to be read from the underlying I/O. + /// + /// This method defaults to calling `decode` and returns an error if + /// `Ok(None)` is returned while there is unconsumed data in `buf`. + /// Typically this doesn't need to be implemented unless the framing + /// protocol differs near the end of the stream. + /// + /// Note that the `buf` argument may be empty. If a previous call to + /// `decode_eof` consumed all the bytes in the buffer, `decode_eof` will be + /// called again until it returns `None`, indicating that there are no more + /// frames to yield. This behavior enables returning finalization frames + /// that may not be based on inbound data. + fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { + match try!(self.decode(buf)) { + Some(frame) => Ok(Some(frame)), + None => { + if buf.is_empty() { + Ok(None) + } else { + Err(io::Error::new(io::ErrorKind::Other, + "bytes remaining on stream").into()) + } + } + } + } + + /// Provides a `Stream` and `Sink` interface for reading and writing to this + /// `Io` object, using `Decode` and `Encode` to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the `Codec` + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both `Stream` and + /// `Sink`; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling `split` on the `Framed` returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + fn framed(self, io: T) -> Framed + where Self: Encoder + Sized, + { + Framed::new(io, self) + } +} diff --git a/third_party/rust/tokio-io/src/codec/encoder.rs b/third_party/rust/tokio-io/src/codec/encoder.rs new file mode 100644 index 000000000000..222990d2e306 --- /dev/null +++ b/third_party/rust/tokio-io/src/codec/encoder.rs @@ -0,0 +1,26 @@ +use std::io; +use bytes::BytesMut; + +/// Trait of helper objects to write out messages as bytes, for use with +/// `FramedWrite`. + +// Note: We can't deprecate this trait, because the deprecation carries through to tokio-codec, and +// there doesn't seem to be a way to un-deprecate the re-export. +pub trait Encoder { + /// The type of items consumed by the `Encoder` + type Item; + + /// The type of encoding errors. + /// + /// `FramedWrite` requires `Encoder`s errors to implement `From` + /// in the interest letting it return `Error`s directly. + type Error: From; + + /// Encodes a frame into the buffer provided. + /// + /// This method will encode `item` into the byte buffer provided by `dst`. + /// The `dst` provided is an internal buffer of the `Framed` instance and + /// will be written out when possible. + fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) + -> Result<(), Self::Error>; +} diff --git a/third_party/rust/tokio-io/src/codec/lines_codec.rs b/third_party/rust/tokio-io/src/codec/lines_codec.rs new file mode 100644 index 000000000000..7056d5c8cd25 --- /dev/null +++ b/third_party/rust/tokio-io/src/codec/lines_codec.rs @@ -0,0 +1,92 @@ +#![allow(deprecated)] + +use bytes::{BufMut, BytesMut}; +use codec::{Encoder, Decoder}; +use std::{io, str}; + +/// A simple `Codec` implementation that splits up data into lines. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +pub struct LinesCodec { + // Stored index of the next index to examine for a `\n` character. + // This is used to optimize searching. + // For example, if `decode` was called with `abc`, it would hold `3`, + // because that is the next index to examine. + // The next time `decode` is called with `abcde\n`, the method will + // only look at `de\n` before returning. + next_index: usize, +} + +impl LinesCodec { + /// Returns a `LinesCodec` for splitting up data into lines. + pub fn new() -> LinesCodec { + LinesCodec { next_index: 0 } + } +} + +fn utf8(buf: &[u8]) -> Result<&str, io::Error> { + str::from_utf8(buf).map_err(|_| + io::Error::new( + io::ErrorKind::InvalidData, + "Unable to decode input as UTF8")) +} + +fn without_carriage_return(s: &[u8]) -> &[u8] { + if let Some(&b'\r') = s.last() { + &s[..s.len() - 1] + } else { + s + } +} + +impl Decoder for LinesCodec { + type Item = String; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + if let Some(newline_offset) = + buf[self.next_index..].iter().position(|b| *b == b'\n') + { + let newline_index = newline_offset + self.next_index; + let line = buf.split_to(newline_index + 1); + let line = &line[..line.len()-1]; + let line = without_carriage_return(line); + let line = utf8(line)?; + self.next_index = 0; + Ok(Some(line.to_string())) + } else { + self.next_index = buf.len(); + Ok(None) + } + } + + fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + Ok(match self.decode(buf)? { + Some(frame) => Some(frame), + None => { + // No terminating newline - return remaining data, if any + if buf.is_empty() || buf == &b"\r"[..] { + None + } else { + let line = buf.take(); + let line = without_carriage_return(&line); + let line = utf8(line)?; + self.next_index = 0; + Some(line.to_string()) + } + } + }) + } +} + +impl Encoder for LinesCodec { + type Item = String; + type Error = io::Error; + + fn encode(&mut self, line: String, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(line.len() + 1); + buf.put(line); + buf.put_u8(b'\n'); + Ok(()) + } +} diff --git a/third_party/rust/tokio-io/src/codec.rs b/third_party/rust/tokio-io/src/codec/mod.rs similarity index 94% rename from third_party/rust/tokio-io/src/codec.rs rename to third_party/rust/tokio-io/src/codec/mod.rs index 2097c7a4ac9a..c4dab6564728 100644 --- a/third_party/rust/tokio-io/src/codec.rs +++ b/third_party/rust/tokio-io/src/codec/mod.rs @@ -10,9 +10,27 @@ //! [`Stream`]: # //! [transports]: # +// tokio_io::codec originally held all codec-related helpers. This is now intended to be in +// tokio_codec instead. However, for backward compatibility, this remains here. When the next major +// breaking change comes, `Encoder` and `Decoder` need to be moved to `tokio_codec`, and the rest +// of this module should be removed. + +#![doc(hidden)] +#![allow(deprecated)] + +mod decoder; +mod encoder; +mod bytes_codec; +mod lines_codec; + +pub use self::decoder::Decoder; +pub use self::encoder::Encoder; +pub use self::bytes_codec::BytesCodec; +pub use self::lines_codec::LinesCodec; + pub use framed::{Framed, FramedParts}; -pub use framed_read::{FramedRead, Decoder}; -pub use framed_write::{FramedWrite, Encoder}; +pub use framed_read::FramedRead; +pub use framed_write::FramedWrite; pub mod length_delimited { //! Frame a stream of bytes based on a length prefix @@ -42,7 +60,7 @@ pub mod length_delimited { //! ``` //! //! The returned transport implements `Sink + Stream` for `BytesMut`. It - //! encodes the frame with a big-endian `u32` header denotating the frame + //! encodes the frame with a big-endian `u32` header denoting the frame //! payload length: //! //! ```text @@ -85,7 +103,7 @@ pub mod length_delimited { //! //! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`], //! such that each yielded [`BytesMut`] value contains the contents of an - //! entire frame. There are many configuration paramaters enabling + //! entire frame. There are many configuration parameters enabling //! [`FrameRead`] to handle a wide range of protocols. Here are some //! examples that will cover the various options at a high level. //! diff --git a/third_party/rust/tokio-io/src/framed.rs b/third_party/rust/tokio-io/src/framed.rs index e325547071aa..7235b1c6d534 100644 --- a/third_party/rust/tokio-io/src/framed.rs +++ b/third_party/rust/tokio-io/src/framed.rs @@ -1,9 +1,12 @@ +#![allow(deprecated)] + use std::io::{self, Read, Write}; use std::fmt; use {AsyncRead, AsyncWrite}; -use framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2, Decoder}; -use framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2, Encoder}; +use codec::{Decoder, Encoder}; +use framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2}; +use framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2}; use futures::{Stream, Sink, StartSend, Poll}; use bytes::{BytesMut}; @@ -12,10 +15,14 @@ use bytes::{BytesMut}; /// the `Encoder` and `Decoder` traits to encode and decode frames. /// /// You can create a `Framed` instance by using the `AsyncRead::framed` adapter. +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct Framed { inner: FramedRead2>>, } +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct Fuse(pub T, pub U); pub fn framed(inner: T, codec: U) -> Framed @@ -225,4 +232,4 @@ pub struct FramedParts pub readbuf: BytesMut, /// A buffer with unprocessed data which are not written yet. pub writebuf: BytesMut -} \ No newline at end of file +} diff --git a/third_party/rust/tokio-io/src/framed_read.rs b/third_party/rust/tokio-io/src/framed_read.rs index 7da3323c864d..69b7f4631551 100644 --- a/third_party/rust/tokio-io/src/framed_read.rs +++ b/third_party/rust/tokio-io/src/framed_read.rs @@ -1,100 +1,23 @@ -use std::{fmt, io}; +#![allow(deprecated)] + +use std::fmt; use AsyncRead; +use codec::Decoder; use framed::Fuse; use futures::{Async, Poll, Stream, Sink, StartSend}; use bytes::BytesMut; -/// Decoding of frames via buffers. -/// -/// This trait is used when constructing an instance of `Framed` or -/// `FramedRead`. An implementation of `Decoder` takes a byte stream that has -/// already been buffered in `src` and decodes the data into a stream of -/// `Self::Item` frames. -/// -/// Implementations are able to track state on `self`, which enables -/// implementing stateful streaming parsers. In many cases, though, this type -/// will simply be a unit struct (e.g. `struct HttpDecoder`). -pub trait Decoder { - /// The type of decoded frames. - type Item; - - /// The type of unrecoverable frame decoding errors. - /// - /// If an individual message is ill-formed but can be ignored without - /// interfering with the processing of future messages, it may be more - /// useful to report the failure as an `Item`. - /// - /// `From` is required in the interest of making `Error` suitable - /// for returning directly from a `FramedRead`, and to enable the default - /// implementation of `decode_eof` to yield an `io::Error` when the decoder - /// fails to consume all available data. - /// - /// Note that implementors of this trait can simply indicate `type Error = - /// io::Error` to use I/O errors as this type. - type Error: From; - - /// Attempts to decode a frame from the provided buffer of bytes. - /// - /// This method is called by `FramedRead` whenever bytes are ready to be - /// parsed. The provided buffer of bytes is what's been read so far, and - /// this instance of `Decode` can determine whether an entire frame is in - /// the buffer and is ready to be returned. - /// - /// If an entire frame is available, then this instance will remove those - /// bytes from the buffer provided and return them as a decoded - /// frame. Note that removing bytes from the provided buffer doesn't always - /// necessarily copy the bytes, so this should be an efficient operation in - /// most circumstances. - /// - /// If the bytes look valid, but a frame isn't fully available yet, then - /// `Ok(None)` is returned. This indicates to the `Framed` instance that - /// it needs to read some more bytes before calling this method again. - /// - /// Note that the bytes provided may be empty. If a previous call to - /// `decode` consumed all the bytes in the buffer then `decode` will be - /// called again until it returns `None`, indicating that more bytes need to - /// be read. - /// - /// Finally, if the bytes in the buffer are malformed then an error is - /// returned indicating why. This informs `Framed` that the stream is now - /// corrupt and should be terminated. - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error>; - - /// A default method available to be called when there are no more bytes - /// available to be read from the underlying I/O. - /// - /// This method defaults to calling `decode` and returns an error if - /// `Ok(None)` is returned while there is unconsumed data in `buf`. - /// Typically this doesn't need to be implemented unless the framing - /// protocol differs near the end of the stream. - /// - /// Note that the `buf` argument may be empty. If a previous call to - /// `decode_eof` consumed all the bytes in the bufer, `decode_eof` will be - /// called again until it returns `None`, indicating that there are no more - /// frames to yield. This behavior enables returning finalization frames - /// that may not be based on inbound data. - fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { - match try!(self.decode(buf)) { - Some(frame) => Ok(Some(frame)), - None => { - if buf.is_empty() { - Ok(None) - } else { - Err(io::Error::new(io::ErrorKind::Other, - "bytes remaining on stream").into()) - } - } - } - } -} - /// A `Stream` of messages decoded from an `AsyncRead`. +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct FramedRead { inner: FramedRead2>, } +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct FramedRead2 { inner: T, eof: bool, diff --git a/third_party/rust/tokio-io/src/framed_write.rs b/third_party/rust/tokio-io/src/framed_write.rs index 90f835bf2d96..392300ea905b 100644 --- a/third_party/rust/tokio-io/src/framed_write.rs +++ b/third_party/rust/tokio-io/src/framed_write.rs @@ -1,39 +1,24 @@ +#![allow(deprecated)] + use std::io::{self, Read}; use std::fmt; use {AsyncRead, AsyncWrite}; -use codec::Decoder; +use codec::{Decoder, Encoder}; use framed::Fuse; use futures::{Async, AsyncSink, Poll, Stream, Sink, StartSend}; use bytes::BytesMut; -/// Trait of helper objects to write out messages as bytes, for use with -/// `FramedWrite`. -pub trait Encoder { - /// The type of items consumed by the `Encoder` - type Item; - - /// The type of encoding errors. - /// - /// `FramedWrite` requires `Encoder`s errors to implement `From` - /// in the interest letting it return `Error`s directly. - type Error: From; - - /// Encodes a frame into the buffer provided. - /// - /// This method will encode `msg` into the byte buffer provided by `buf`. - /// The `buf` provided is an internal buffer of the `Framed` instance and - /// will be written out when possible. - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) - -> Result<(), Self::Error>; -} - /// A `Sink` of frames encoded to an `AsyncWrite`. +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct FramedWrite { inner: FramedWrite2>, } +#[deprecated(since = "0.1.7", note = "Moved to tokio-codec")] +#[doc(hidden)] pub struct FramedWrite2 { inner: T, buffer: BytesMut, @@ -205,7 +190,7 @@ impl Sink for FramedWrite2 while !self.buffer.is_empty() { trace!("writing; remaining={}", self.buffer.len()); - let n = try_nb!(self.inner.write(&self.buffer)); + let n = try_ready!(self.inner.poll_write(&self.buffer)); if n == 0 { return Err(io::Error::new(io::ErrorKind::WriteZero, "failed to @@ -218,7 +203,7 @@ impl Sink for FramedWrite2 } // Try flushing the underlying IO - try_nb!(self.inner.flush()); + try_ready!(self.inner.poll_flush()); trace!("framed transport flushed"); return Ok(Async::Ready(())); diff --git a/third_party/rust/tokio-io/src/copy.rs b/third_party/rust/tokio-io/src/io/copy.rs similarity index 92% rename from third_party/rust/tokio-io/src/copy.rs rename to third_party/rust/tokio-io/src/io/copy.rs index 8b8c0fe4e1d3..b21dc1c4ade4 100644 --- a/third_party/rust/tokio-io/src/copy.rs +++ b/third_party/rust/tokio-io/src/io/copy.rs @@ -60,7 +60,7 @@ impl Future for Copy // continue. if self.pos == self.cap && !self.read_done { let reader = self.reader.as_mut().unwrap(); - let n = try_nb!(reader.read(&mut self.buf)); + let n = try_ready!(reader.poll_read(&mut self.buf)); if n == 0 { self.read_done = true; } else { @@ -72,7 +72,7 @@ impl Future for Copy // If our buffer has some data, let's write it out! while self.pos < self.cap { let writer = self.writer.as_mut().unwrap(); - let i = try_nb!(writer.write(&self.buf[self.pos..self.cap])); + let i = try_ready!(writer.poll_write(&self.buf[self.pos..self.cap])); if i == 0 { return Err(io::Error::new(io::ErrorKind::WriteZero, "write zero byte into writer")); @@ -86,7 +86,7 @@ impl Future for Copy // data and finish the transfer. // done with the entire transfer. if self.pos == self.cap && self.read_done { - try_nb!(self.writer.as_mut().unwrap().flush()); + try_ready!(self.writer.as_mut().unwrap().poll_flush()); let reader = self.reader.take().unwrap(); let writer = self.writer.take().unwrap(); return Ok((self.amt, reader, writer).into()) diff --git a/third_party/rust/tokio-io/src/flush.rs b/third_party/rust/tokio-io/src/io/flush.rs similarity index 94% rename from third_party/rust/tokio-io/src/flush.rs rename to third_party/rust/tokio-io/src/io/flush.rs index 29065f9f9617..dabdc6c9c600 100644 --- a/third_party/rust/tokio-io/src/flush.rs +++ b/third_party/rust/tokio-io/src/io/flush.rs @@ -37,7 +37,7 @@ impl Future for Flush type Error = io::Error; fn poll(&mut self) -> Poll { - try_nb!(self.a.as_mut().unwrap().flush()); + try_ready!(self.a.as_mut().unwrap().poll_flush()); Ok(Async::Ready(self.a.take().unwrap())) } } diff --git a/third_party/rust/tokio-io/src/io.rs b/third_party/rust/tokio-io/src/io/mod.rs similarity index 52% rename from third_party/rust/tokio-io/src/io.rs rename to third_party/rust/tokio-io/src/io/mod.rs index e85cf3a75b2c..95eea0957e68 100644 --- a/third_party/rust/tokio-io/src/io.rs +++ b/third_party/rust/tokio-io/src/io/mod.rs @@ -9,14 +9,24 @@ //! [found online]: https://tokio.rs/docs/getting-started/core/ //! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/ -pub use copy::{copy, Copy}; -pub use flush::{flush, Flush}; +mod copy; +mod flush; +mod read; +mod read_exact; +mod read_to_end; +mod read_until; +mod shutdown; +mod write_all; + +pub use allow_std::AllowStdIo; +pub use self::copy::{copy, Copy}; +pub use self::flush::{flush, Flush}; pub use lines::{lines, Lines}; -pub use read::{read, Read}; -pub use read_exact::{read_exact, ReadExact}; -pub use read_to_end::{read_to_end, ReadToEnd}; -pub use read_until::{read_until, ReadUntil}; -pub use shutdown::{shutdown, Shutdown}; +pub use self::read::{read, Read}; +pub use self::read_exact::{read_exact, ReadExact}; +pub use self::read_to_end::{read_to_end, ReadToEnd}; +pub use self::read_until::{read_until, ReadUntil}; +pub use self::shutdown::{shutdown, Shutdown}; pub use split::{ReadHalf, WriteHalf}; pub use window::Window; -pub use write_all::{write_all, WriteAll}; +pub use self::write_all::{write_all, WriteAll}; diff --git a/third_party/rust/tokio-io/src/read.rs b/third_party/rust/tokio-io/src/io/read.rs similarity index 92% rename from third_party/rust/tokio-io/src/read.rs rename to third_party/rust/tokio-io/src/io/read.rs index abfb459c84af..4c5a96652ece 100644 --- a/third_party/rust/tokio-io/src/read.rs +++ b/third_party/rust/tokio-io/src/io/read.rs @@ -44,7 +44,7 @@ impl Future for Read fn poll(&mut self) -> Poll<(R, T, usize), io::Error> { let nread = match self.state { - State::Pending { ref mut rd, ref mut buf } => try_nb!(rd.read(&mut buf.as_mut()[..])), + State::Pending { ref mut rd, ref mut buf } => try_ready!(rd.poll_read(&mut buf.as_mut()[..])), State::Empty => panic!("poll a Read after it's done"), }; diff --git a/third_party/rust/tokio-io/src/read_exact.rs b/third_party/rust/tokio-io/src/io/read_exact.rs similarity index 96% rename from third_party/rust/tokio-io/src/read_exact.rs rename to third_party/rust/tokio-io/src/io/read_exact.rs index 142512421b0b..b1e164403af9 100644 --- a/third_party/rust/tokio-io/src/read_exact.rs +++ b/third_party/rust/tokio-io/src/io/read_exact.rs @@ -65,7 +65,7 @@ impl Future for ReadExact State::Reading { ref mut a, ref mut buf, ref mut pos } => { let buf = buf.as_mut(); while *pos < buf.len() { - let n = try_nb!(a.read(&mut buf[*pos..])); + let n = try_ready!(a.poll_read(&mut buf[*pos..])); *pos += n; if n == 0 { return Err(eof()) diff --git a/third_party/rust/tokio-io/src/read_to_end.rs b/third_party/rust/tokio-io/src/io/read_to_end.rs similarity index 100% rename from third_party/rust/tokio-io/src/read_to_end.rs rename to third_party/rust/tokio-io/src/io/read_to_end.rs diff --git a/third_party/rust/tokio-io/src/read_until.rs b/third_party/rust/tokio-io/src/io/read_until.rs similarity index 100% rename from third_party/rust/tokio-io/src/read_until.rs rename to third_party/rust/tokio-io/src/io/read_until.rs diff --git a/third_party/rust/tokio-io/src/shutdown.rs b/third_party/rust/tokio-io/src/io/shutdown.rs similarity index 94% rename from third_party/rust/tokio-io/src/shutdown.rs rename to third_party/rust/tokio-io/src/io/shutdown.rs index 18040eadc03c..96a8886ddc23 100644 --- a/third_party/rust/tokio-io/src/shutdown.rs +++ b/third_party/rust/tokio-io/src/io/shutdown.rs @@ -38,7 +38,7 @@ impl Future for Shutdown type Error = io::Error; fn poll(&mut self) -> Poll { - try_nb!(self.a.as_mut().unwrap().shutdown()); + try_ready!(self.a.as_mut().unwrap().shutdown()); Ok(Async::Ready(self.a.take().unwrap())) } } diff --git a/third_party/rust/tokio-io/src/write_all.rs b/third_party/rust/tokio-io/src/io/write_all.rs similarity index 97% rename from third_party/rust/tokio-io/src/write_all.rs rename to third_party/rust/tokio-io/src/io/write_all.rs index d000f1b9c12f..50b11fbc888e 100644 --- a/third_party/rust/tokio-io/src/write_all.rs +++ b/third_party/rust/tokio-io/src/io/write_all.rs @@ -68,7 +68,7 @@ impl Future for WriteAll State::Writing { ref mut a, ref buf, ref mut pos } => { let buf = buf.as_ref(); while *pos < buf.len() { - let n = try_nb!(a.write(&buf[*pos..])); + let n = try_ready!(a.poll_write(&buf[*pos..])); *pos += n; if n == 0 { return Err(zero_write()) diff --git a/third_party/rust/tokio-io/src/length_delimited.rs b/third_party/rust/tokio-io/src/length_delimited.rs index 4ffe655c26a3..6b552f2b58e5 100644 --- a/third_party/rust/tokio-io/src/length_delimited.rs +++ b/third_party/rust/tokio-io/src/length_delimited.rs @@ -1,11 +1,14 @@ +#![allow(deprecated)] + use {codec, AsyncRead, AsyncWrite}; -use bytes::{Buf, BufMut, BytesMut, IntoBuf, BigEndian, LittleEndian}; +use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use bytes::buf::Chain; use futures::{Async, AsyncSink, Stream, Sink, StartSend, Poll}; use std::{cmp, fmt}; +use std::error::Error as StdError; use std::io::{self, Cursor}; /// Configure length delimited `FramedRead`, `FramedWrite`, and `Framed` values. @@ -55,6 +58,11 @@ pub struct FramedRead { inner: codec::FramedRead, } +/// An error when the number of bytes read is more than max frame length. +pub struct FrameTooBig { + _priv: (), +} + #[derive(Debug)] struct Decoder { // Configuration values @@ -285,13 +293,15 @@ impl Decoder { // match endianess let n = if self.builder.length_field_is_big_endian { - src.get_uint::(field_len) + src.get_uint_be(field_len) } else { - src.get_uint::(field_len) + src.get_uint_le(field_len) }; if n > self.builder.max_frame_len as u64 { - return Err(io::Error::new(io::ErrorKind::InvalidData, "frame size too big")); + return Err(io::Error::new(io::ErrorKind::InvalidData, FrameTooBig { + _priv: (), + })); } // The check above ensures there is no overflow @@ -378,6 +388,24 @@ impl FramedWrite { } impl FramedWrite { + /// Returns the current max frame setting + /// + /// This is the largest size this codec will write to the wire. Larger + /// frames will be rejected. + pub fn max_frame_length(&self) -> usize { + self.builder.max_frame_len + } + + /// Updates the max frame setting. + /// + /// The change takes effect the next time a frame is encoded. In other + /// words, if a frame is currently in process of being encoded with a frame + /// size greater than `val` but less than the max frame length in effect + /// before calling this function, then the frame will be allowed. + pub fn set_max_frame_length(&mut self, val: usize) { + self.builder.max_frame_length(val); + } + /// Returns a reference to the underlying I/O stream wrapped by /// `FramedWrite`. /// @@ -434,7 +462,9 @@ impl FramedWrite { let n = buf.remaining(); if n > self.builder.max_frame_len { - return Err(io::Error::new(io::ErrorKind::InvalidInput, "frame too big")); + return Err(io::Error::new(io::ErrorKind::InvalidInput, FrameTooBig { + _priv: (), + })); } // Adjust `n` with bounds checking @@ -451,9 +481,9 @@ impl FramedWrite { }; if self.builder.length_field_is_big_endian { - head.put_uint::(n as u64, self.builder.length_field_len); + head.put_uint_be(n as u64, self.builder.length_field_len); } else { - head.put_uint::(n as u64, self.builder.length_field_len); + head.put_uint_le(n as u64, self.builder.length_field_len); } debug_assert!(self.frame.is_none()); @@ -483,7 +513,7 @@ impl Sink for FramedWrite { try_ready!(self.do_write()); // Try flushing the underlying IO - try_nb!(self.inner.flush()); + try_ready!(self.inner.poll_flush()); return Ok(Async::Ready(())); } @@ -621,6 +651,32 @@ impl Builder { self } + /// Read the length field as a native endian integer + /// + /// The default setting is big endian. + /// + /// This configuration option applies to both encoding and decoding. + /// + /// # Examples + /// + /// ``` + /// # use tokio_io::AsyncRead; + /// use tokio_io::codec::length_delimited::Builder; + /// + /// # fn bind_read(io: T) { + /// Builder::new() + /// .native_endian() + /// .new_read(io); + /// # } + /// ``` + pub fn native_endian(&mut self) -> &mut Self { + if cfg!(target_endian = "big") { + self.big_endian() + } else { + self.little_endian() + } + } + /// Sets the max frame length /// /// This configuration option applies to both encoding and decoding. The @@ -631,6 +687,9 @@ impl Builder { /// encoding, the length of the submitted payload is checked against this /// setting. /// + /// When frames exceed the max length, an `io::Error` with the custom value + /// of the `FrameTooBig` type will be returned. + /// /// # Examples /// /// ``` @@ -828,3 +887,25 @@ impl Builder { self.num_skip.unwrap_or(self.length_field_offset + self.length_field_len) } } + + +// ===== impl FrameTooBig ===== + +impl fmt::Debug for FrameTooBig { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FrameTooBig") + .finish() + } +} + +impl fmt::Display for FrameTooBig { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.description()) + } +} + +impl StdError for FrameTooBig { + fn description(&self) -> &str { + "frame size too big" + } +} diff --git a/third_party/rust/tokio-io/src/lib.rs b/third_party/rust/tokio-io/src/lib.rs index 24bba5aaec80..345efc1761ca 100644 --- a/third_party/rust/tokio-io/src/lib.rs +++ b/third_party/rust/tokio-io/src/lib.rs @@ -6,8 +6,8 @@ //! [found online]: https://tokio.rs/docs/getting-started/core/ //! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/ -#![deny(missing_docs, missing_debug_implementations)] -#![doc(html_root_url = "https://docs.rs/tokio-io/0.1")] +#![deny(missing_docs, missing_debug_implementations, warnings)] +#![doc(html_root_url = "https://docs.rs/tokio-io/0.1.7")] #[macro_use] extern crate log; @@ -17,19 +17,14 @@ extern crate futures; extern crate bytes; use std::io as std_io; -use std::io::Write; -use futures::{Async, Poll}; -use futures::future::BoxFuture; -use futures::stream::BoxStream; - -use bytes::{Buf, BufMut}; +use futures::{Future, Stream}; /// A convenience typedef around a `Future` whose error component is `io::Error` -pub type IoFuture = BoxFuture; +pub type IoFuture = Box + Send>; /// A convenience typedef around a `Stream` whose error component is `io::Error` -pub type IoStream = BoxStream; +pub type IoStream = Box + Send>; /// A convenience macro for working with `io::Result` from the `Read` and /// `Write` traits. @@ -51,331 +46,20 @@ macro_rules! try_nb { pub mod io; pub mod codec; -mod copy; -mod flush; +mod allow_std; +mod async_read; +mod async_write; mod framed; mod framed_read; mod framed_write; mod length_delimited; mod lines; -mod read; -mod read_exact; -mod read_to_end; -mod read_until; -mod shutdown; mod split; mod window; -mod write_all; +pub mod _tokio_codec; -use codec::{Decoder, Encoder, Framed}; -use split::{ReadHalf, WriteHalf}; - -/// A trait for readable objects which operated in an asynchronous and -/// futures-aware fashion. -/// -/// This trait inherits from `io::Read` and indicates as a marker that an I/O -/// object is **nonblocking**, meaning that it will return an error instead of -/// blocking when bytes are unavailable, but the stream hasn't reached EOF. -/// Specifically this means that the `read` function for types that implement -/// this trait can have a few return values: -/// -/// * `Ok(n)` means that `n` bytes of data was immediately read and placed into -/// the output buffer, where `n` == 0 implies that EOF has been reached. -/// * `Err(e) if e.kind() == ErrorKind::WouldBlock` means that no data was read -/// into the buffer provided. The I/O object is not currently readable but may -/// become readable in the future. Most importantly, **the current future's -/// task is scheduled to get unparked when the object is readable**. This -/// means that like `Future::poll` you'll receive a notification when the I/O -/// object is readable again. -/// * `Err(e)` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the `read` method only works in the -/// context of a future's task. The object may panic if used outside of a task. -pub trait AsyncRead: std_io::Read { - /// Prepares an uninitialized buffer to be safe to pass to `read`. Returns - /// `true` if the supplied buffer was zeroed out. - /// - /// While it would be highly unusual, implementations of [`io::Read`] are - /// able to read data from the buffer passed as an argument. Because of - /// this, the buffer passed to [`io::Read`] must be initialized memory. In - /// situations where large numbers of buffers are used, constantly having to - /// zero out buffers can be expensive. - /// - /// This function does any necessary work to prepare an uninitialized buffer - /// to be safe to pass to `read`. If `read` guarantees to never attempt read - /// data out of the supplied buffer, then `prepare_uninitialized_buffer` - /// doesn't need to do any work. - /// - /// If this function returns `true`, then the memory has been zeroed out. - /// This allows implementations of `AsyncRead` which are composed of - /// multiple sub implementations to efficiently implement - /// `prepare_uninitialized_buffer`. - /// - /// This function isn't actually `unsafe` to call but `unsafe` to implement. - /// The implementor must ensure that either the whole `buf` has been zeroed - /// or `read_buf()` overwrites the buffer without reading it and returns - /// correct value. - /// - /// This function is called from [`read_buf`]. - /// - /// [`io::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html - /// [`read_buf`]: #method.read_buf - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - for i in 0..buf.len() { - buf[i] = 0; - } - - true - } - - /// Pull some bytes from this source into the specified `Buf`, returning - /// how many bytes were read. - /// - /// The `buf` provided will have bytes read into it and the internal cursor - /// will be advanced if any bytes were read. Note that this method typically - /// will not reallocate the buffer provided. - fn read_buf(&mut self, buf: &mut B) -> Poll - where Self: Sized, - { - if !buf.has_remaining_mut() { - return Ok(Async::Ready(0)); - } - - unsafe { - let n = { - let b = buf.bytes_mut(); - - self.prepare_uninitialized_buffer(b); - - try_nb!(self.read(b)) - }; - - buf.advance_mut(n); - Ok(Async::Ready(n)) - } - } - - /// Provides a `Stream` and `Sink` interface for reading and writing to this - /// `Io` object, using `Decode` and `Encode` to read and write the raw data. - /// - /// Raw I/O objects work with byte sequences, but higher-level code usually - /// wants to batch these into meaningful chunks, called "frames". This - /// method layers framing on top of an I/O object, by using the `Codec` - /// traits to handle encoding and decoding of messages frames. Note that - /// the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both `Stream` and - /// `Sink`; grouping this into a single object is often useful for layering - /// things like gzip or TLS, which require both read and write access to the - /// underlying object. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling `split` on the `Framed` returned by this method, which will - /// break them into separate objects, allowing them to interact more easily. - fn framed(self, codec: T) -> Framed - where Self: AsyncWrite + Sized, - { - framed::framed(self, codec) - } - - /// Helper method for splitting this read/write object into two halves. - /// - /// The two halves returned implement the `Read` and `Write` traits, - /// respectively. - fn split(self) -> (ReadHalf, WriteHalf) - where Self: AsyncWrite + Sized, - { - split::split(self) - } -} - -impl AsyncRead for Box { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - (**self).prepare_uninitialized_buffer(buf) - } -} - -impl<'a, T: ?Sized + AsyncRead> AsyncRead for &'a mut T { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - (**self).prepare_uninitialized_buffer(buf) - } -} - -impl<'a> AsyncRead for &'a [u8] { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [u8]) -> bool { - false - } -} - -/// A trait for writable objects which operated in an asynchronous and -/// futures-aware fashion. -/// -/// This trait inherits from `io::Write` and indicates that an I/O object is -/// **nonblocking**, meaning that it will return an error instead of blocking -/// when bytes cannot currently be written, but hasn't closed. Specifically -/// this means that the `write` function for types that implement this trait -/// can have a few return values: -/// -/// * `Ok(n)` means that `n` bytes of data was immediately written . -/// * `Err(e) if e.kind() == ErrorKind::WouldBlock` means that no data was -/// written from the buffer provided. The I/O object is not currently -/// writable but may become writable in the future. Most importantly, **the -/// current future's task is scheduled to get unparked when the object is -/// readable**. This means that like `Future::poll` you'll receive a -/// notification when the I/O object is writable again. -/// * `Err(e)` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the `write` method only works in the -/// context of a future's task. The object may panic if used outside of a task. -pub trait AsyncWrite: std_io::Write { - /// Initiates or attempts to shut down this writer, returning success when - /// the I/O connection has completely shut down. - /// - /// This method is intended to be used for asynchronous shutdown of I/O - /// connections. For example this is suitable for implementing shutdown of a - /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. - /// Protocols sometimes need to flush out final pieces of data or otherwise - /// perform a graceful shutdown handshake, reading/writing more data as - /// appropriate. This method is the hook for such protocols to implement the - /// graceful shutdown logic. - /// - /// This `shutdown` method is required by implementors of the - /// `AsyncWrite` trait. Wrappers typically just want to proxy this call - /// through to the wrapped type, and base types will typically implement - /// shutdown logic here or just return `Ok(().into())`. Note that if you're - /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that - /// transitively the entire stream has been shut down. After your wrapper's - /// shutdown logic has been executed you should shut down the underlying - /// stream. - /// - /// Invocation of a `shutdown` implies an invocation of `flush`. Once this - /// method returns `Ready` it implies that a flush successfully happened - /// before the shutdown happened. That is, callers don't need to call - /// `flush` before calling `shutdown`. They can rely that by calling - /// `shutdown` any pending buffered data will be written out. - /// - /// # Return value - /// - /// This function returns a `Poll<(), io::Error>` classified as such: - /// - /// * `Ok(Async::Ready(()))` - indicates that the connection was - /// successfully shut down and is now safe to deallocate/drop/close - /// resources associated with it. This method means that the current task - /// will no longer receive any notifications due to this method and the - /// I/O object itself is likely no longer usable. - /// - /// * `Ok(Async::NotReady)` - indicates that shutdown is initiated but could - /// not complete just yet. This may mean that more I/O needs to happen to - /// continue this shutdown operation. The current task is scheduled to - /// receive a notification when it's otherwise ready to continue the - /// shutdown operation. When woken up this method should be called again. - /// - /// * `Err(e)` - indicates a fatal error has happened with shutdown, - /// indicating that the shutdown operation did not complete successfully. - /// This typically means that the I/O object is no longer usable. - /// - /// # Errors - /// - /// This function can return normal I/O errors through `Err`, described - /// above. Additionally this method may also render the underlying - /// `Write::write` method no longer usable (e.g. will return errors in the - /// future). It's recommended that once `shutdown` is called the - /// `write` method is no longer called. - /// - /// # Panics - /// - /// This function will panic if not called within the context of a future's - /// task. - fn shutdown(&mut self) -> Poll<(), std_io::Error>; - - /// Write a `Buf` into this value, returning how many bytes were written. - /// - /// Note that this method will advance the `buf` provided automatically by - /// the number of bytes written. - fn write_buf(&mut self, buf: &mut B) -> Poll - where Self: Sized, - { - if !buf.has_remaining() { - return Ok(Async::Ready(0)); - } - - let n = try_nb!(self.write(buf.bytes())); - buf.advance(n); - Ok(Async::Ready(n)) - } -} - -impl AsyncWrite for Box { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - (**self).shutdown() - } -} -impl<'a, T: ?Sized + AsyncWrite> AsyncWrite for &'a mut T { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - (**self).shutdown() - } -} - -impl AsyncRead for std_io::Repeat { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { - false - } -} - -impl AsyncWrite for std_io::Sink { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - Ok(().into()) - } -} - -// TODO: Implement `prepare_uninitialized_buffer` for `io::Take`. -// This is blocked on rust-lang/rust#27269 -impl AsyncRead for std_io::Take { -} - -// TODO: Implement `prepare_uninitialized_buffer` when upstream exposes inner -// parts -impl AsyncRead for std_io::Chain - where T: AsyncRead, - U: AsyncRead, -{ -} - -impl AsyncWrite for std_io::BufWriter { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - try_nb!(self.flush()); - self.get_mut().shutdown() - } -} - -impl AsyncRead for std_io::BufReader { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - self.get_ref().prepare_uninitialized_buffer(buf) - } -} - -impl> AsyncRead for std_io::Cursor { -} - -impl<'a> AsyncWrite for std_io::Cursor<&'a mut [u8]> { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - Ok(().into()) - } -} - -impl AsyncWrite for std_io::Cursor> { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - Ok(().into()) - } -} - -impl AsyncWrite for std_io::Cursor> { - fn shutdown(&mut self) -> Poll<(), std_io::Error> { - Ok(().into()) - } -} +pub use self::async_read::AsyncRead; +pub use self::async_write::AsyncWrite; fn _assert_objects() { fn _assert() {} diff --git a/third_party/rust/tokio-io/src/split.rs b/third_party/rust/tokio-io/src/split.rs index 88a24da49606..8993c4f77e95 100644 --- a/third_party/rust/tokio-io/src/split.rs +++ b/third_party/rust/tokio-io/src/split.rs @@ -2,6 +2,7 @@ use std::io::{self, Read, Write}; use futures::{Async, Poll}; use futures::sync::BiLock; +use bytes::{Buf, BufMut}; use {AsyncRead, AsyncWrite}; @@ -36,6 +37,12 @@ impl Read for ReadHalf { } impl AsyncRead for ReadHalf { + fn read_buf(&mut self, buf: &mut B) -> Poll { + match self.handle.poll_lock() { + Async::Ready(mut l) => l.read_buf(buf), + Async::NotReady => Err(would_block()), + } + } } impl Write for WriteHalf { @@ -61,4 +68,13 @@ impl AsyncWrite for WriteHalf { Async::NotReady => Err(would_block()), } } + + fn write_buf(&mut self, buf: &mut B) -> Poll + where Self: Sized, + { + match self.handle.poll_lock() { + Async::Ready(mut l) => l.write_buf(buf), + Async::NotReady => Err(would_block()), + } + } } diff --git a/third_party/rust/tokio-io/tests/length_delimited.rs b/third_party/rust/tokio-io/tests/length_delimited.rs index a95bddbc3b78..b51cda2c1805 100644 --- a/third_party/rust/tokio-io/tests/length_delimited.rs +++ b/third_party/rust/tokio-io/tests/length_delimited.rs @@ -1,6 +1,5 @@ extern crate tokio_io; extern crate futures; -extern crate bytes; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::codec::length_delimited::*; @@ -49,6 +48,23 @@ fn read_single_frame_one_packet_little_endian() { assert_eq!(io.poll().unwrap(), Ready(None)); } +#[test] +fn read_single_frame_one_packet_native_endian() { + let data = if cfg!(target_endian = "big") { + b"\x00\x00\x00\x09abcdefghi" + } else { + b"\x09\x00\x00\x00abcdefghi" + }; + let mut io = Builder::new() + .native_endian() + .new_read(mock! { + Ok(data[..].into()), + }); + + assert_eq!(io.poll().unwrap(), Ready(Some(b"abcdefghi"[..].into()))); + assert_eq!(io.poll().unwrap(), Ready(None)); +} + #[test] fn read_single_multi_frame_one_packet() { let mut data: Vec = vec![]; @@ -187,7 +203,7 @@ fn read_max_frame_len() { } #[test] -fn update_max_frame_len_at_rest() { +fn read_update_max_frame_len_at_rest() { let mut io = Builder::new() .new_read(mock! { Ok(b"\x00\x00\x00\x09abcdefghi"[..].into()), @@ -200,7 +216,7 @@ fn update_max_frame_len_at_rest() { } #[test] -fn update_max_frame_len_in_flight() { +fn read_update_max_frame_len_in_flight() { let mut io = Builder::new() .new_read(mock! { Ok(b"\x00\x00\x00\x09abcd"[..].into()), @@ -407,6 +423,51 @@ fn write_single_frame_with_short_length_field() { assert!(io.get_ref().calls.is_empty()); } +#[test] +fn write_max_frame_len() { + let mut io = Builder::new() + .max_frame_length(5) + .new_write(mock! { }); + + assert_eq!(io.start_send("abcdef").unwrap_err().kind(), io::ErrorKind::InvalidInput); + assert!(io.get_ref().calls.is_empty()); +} + +#[test] +fn write_update_max_frame_len_at_rest() { + let mut io = Builder::new() + .new_write(mock! { + Ok(b"\x00\x00\x00\x06"[..].into()), + Ok(b"abcdef"[..].into()), + Ok(Flush), + }); + + assert!(io.start_send("abcdef").unwrap().is_ready()); + assert!(io.poll_complete().unwrap().is_ready()); + io.set_max_frame_length(5); + assert_eq!(io.start_send("abcdef").unwrap_err().kind(), io::ErrorKind::InvalidInput); + assert!(io.get_ref().calls.is_empty()); +} + +#[test] +fn write_update_max_frame_len_in_flight() { + let mut io = Builder::new() + .new_write(mock! { + Ok(b"\x00\x00\x00\x06"[..].into()), + Ok(b"ab"[..].into()), + Err(would_block()), + Ok(b"cdef"[..].into()), + Ok(Flush), + }); + + assert!(io.start_send("abcdef").unwrap().is_ready()); + assert!(!io.poll_complete().unwrap().is_ready()); + io.set_max_frame_length(5); + assert!(io.poll_complete().unwrap().is_ready()); + assert_eq!(io.start_send("abcdef").unwrap_err().kind(), io::ErrorKind::InvalidInput); + assert!(io.get_ref().calls.is_empty()); +} + // ===== Test utils ===== fn would_block() -> io::Error { diff --git a/third_party/rust/tokio-reactor/.cargo-checksum.json b/third_party/rust/tokio-reactor/.cargo-checksum.json new file mode 100644 index 000000000000..70277e290270 --- /dev/null +++ b/third_party/rust/tokio-reactor/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"a2d159cb0d6286e6bfddc5827b5c354cc4b3573ead9e1415215ffaacc23fd104","Cargo.toml":"d9499bc735e511b9c8335cde014572092238fb69adcbf185721e7bed45a99c50","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"0a77f2dd2bd499aa3e88809202695d99eadb813f75dcdd5b9f9df3083f2b48ed","src/atomic_task.rs":"fc2a3327bafff8155f307bbddef71b8a3e62f52f9234565673d88af83e8fa8aa","src/background.rs":"560e74f4e6ba3d191d3fcfb13a6d52ede65c777cfca4a6e8b0d1144292c1b372","src/lib.rs":"b73086b2e2a42f0a09c5bdaba7b3c74d821ba4878949c2897b7c3233027ad824","src/poll_evented.rs":"fedc483360e1018e25b6d544b2f3672926718994ea2aa6fcdd91326f1dc5456b","src/registration.rs":"1d9a2de9ebf3953348050329e641777605492c11af8992afe5d88805dfc4efab"},"package":"8703a5762ff6913510dc64272c714c4389ffd8c4b3cf602879b8bd14ff06b604"} \ No newline at end of file diff --git a/third_party/rust/tokio-reactor/CHANGELOG.md b/third_party/rust/tokio-reactor/CHANGELOG.md new file mode 100644 index 000000000000..05ed84ae831f --- /dev/null +++ b/third_party/rust/tokio-reactor/CHANGELOG.md @@ -0,0 +1,18 @@ +# 0.1.3 (August 6, 2018) + +* Misc small fixes (#508) + +# 0.1.2 (June 13, 2018) + +* Fix deadlock that can happen when shutting down (#409) +* Handle::default() lazily binds to reactor (#350) + +# 0.1.1 (March 22, 2018) + +* Fix threading bugs (#227) +* Fix notification bugs (#243) +* Optionally support futures 0.2 (#172) + +# 0.1.0 (March 09, 2018) + +* Initial release diff --git a/third_party/rust/tokio-reactor/Cargo.toml b/third_party/rust/tokio-reactor/Cargo.toml new file mode 100644 index 000000000000..0f169c8a22a1 --- /dev/null +++ b/third_party/rust/tokio-reactor/Cargo.toml @@ -0,0 +1,40 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-reactor" +version = "0.1.3" +authors = ["Carl Lerche "] +description = "Event loop that drives Tokio I/O resources.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-reactor/0.1" +readme = "README.md" +categories = ["asynchronous", "network-programming"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.futures] +version = "0.1.19" + +[dependencies.log] +version = "0.4.1" + +[dependencies.mio] +version = "0.6.14" + +[dependencies.slab] +version = "0.4.0" + +[dependencies.tokio-executor] +version = "0.1.1" + +[dependencies.tokio-io] +version = "0.1.6" diff --git a/third_party/rust/tokio-reactor/LICENSE b/third_party/rust/tokio-reactor/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-reactor/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-reactor/README.md b/third_party/rust/tokio-reactor/README.md new file mode 100644 index 000000000000..0227c14e841b --- /dev/null +++ b/third_party/rust/tokio-reactor/README.md @@ -0,0 +1,42 @@ +# tokio-reactor + +Event loop that drives Tokio I/O resources. + +[Documentation](https://tokio-rs.github.io/tokio/tokio_reactor/) + +## Overview + +The reactor is the engine that drives asynchronous I/O resources (like TCP and +UDP sockets). It is backed by [`mio`] and acts as a bridge between [`mio`] and +[`futures`]. + +The crate provides: + +* [`Reactor`] is the main type of this crate. It performs the event loop logic. + +* [`Handle`] provides a reference to a reactor instance. + +* [`Registration`] and [`PollEvented`] allow third parties to implement I/O + resources that are driven by the reactor. + +Application authors will not use this crate directly. Instead, they will use the +[`tokio`] crate. Library authors should only depend on `tokio-reactor` if they +are building a custom I/O resource. + +[`mio`]: http://github.com/carllerche/mio +[`futures`]: http://github.com/rust-lang-nursery/futures-rs +[`Reactor`]: https://tokio-rs.github.io/tokio/tokio_reactor/struct.Reactor.html +[`Handle`]: https://tokio-rs.github.io/tokio/tokio_reactor/struct.Handle.html +[`Registration`]: https://tokio-rs.github.io/tokio/tokio_reactor/struct.Registration.html +[`PollEvented`]: https://tokio-rs.github.io/tokio/tokio_reactor/struct.PollEvented.html +[`tokio`]: ../ + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-reactor/src/atomic_task.rs b/third_party/rust/tokio-reactor/src/atomic_task.rs new file mode 100644 index 000000000000..b48dca402cb4 --- /dev/null +++ b/third_party/rust/tokio-reactor/src/atomic_task.rs @@ -0,0 +1,297 @@ +#![allow(dead_code)] + +use super::Task; + +use std::fmt; +use std::cell::UnsafeCell; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{Acquire, Release, AcqRel}; + +/// A synchronization primitive for task notification. +/// +/// `AtomicTask` will coordinate concurrent notifications with the consumer +/// potentially "updating" the underlying task to notify. This is useful in +/// scenarios where a computation completes in another thread and wants to +/// notify the consumer, but the consumer is in the process of being migrated to +/// a new logical task. +/// +/// Consumers should call `register` before checking the result of a computation +/// and producers should call `notify` after producing the computation (this +/// differs from the usual `thread::park` pattern). It is also permitted for +/// `notify` to be called **before** `register`. This results in a no-op. +/// +/// A single `AtomicTask` may be reused for any number of calls to `register` or +/// `notify`. +/// +/// `AtomicTask` does not provide any memory ordering guarantees, as such the +/// user should use caution and use other synchronization primitives to guard +/// the result of the underlying computation. +pub(crate) struct AtomicTask { + state: AtomicUsize, + task: UnsafeCell>, +} + +// `AtomicTask` is a multi-consumer, single-producer transfer cell. The cell +// stores a `Task` value produced by calls to `register` and many threads can +// race to take the task (to notify it) by calling `notify. +// +// If a new `Task` instance is produced by calling `register` before an existing +// one is consumed, then the existing one is overwritten. +// +// While `AtomicTask` is single-producer, the implementation ensures memory +// safety. In the event of concurrent calls to `register`, there will be a +// single winner whose task will get stored in the cell. The losers will not +// have their tasks notified. As such, callers should ensure to add +// synchronization to calls to `register`. +// +// The implementation uses a single `AtomicUsize` value to coordinate access to +// the `Task` cell. There are two bits that are operated on independently. These +// are represented by `REGISTERING` and `NOTIFYING`. +// +// The `REGISTERING` bit is set when a producer enters the critical section. The +// `NOTIFYING` bit is set when a consumer enters the critical section. Neither +// bit being set is represented by `WAITING`. +// +// A thread obtains an exclusive lock on the task cell by transitioning the +// state from `WAITING` to `REGISTERING` or `NOTIFYING`, depending on the +// operation the thread wishes to perform. When this transition is made, it is +// guaranteed that no other thread will access the task cell. +// +// # Registering +// +// On a call to `register`, an attempt to transition the state from WAITING to +// REGISTERING is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread sets the task cell to the task +// provided as an argument. Then it attempts to transition the state back from +// `REGISTERING` -> `WAITING`. +// +// If this transition is successful, then the registering process is complete +// and the next call to `notify` will observe the task. +// +// If the transition fails, then there was a concurrent call to `notify` that +// was unable to access the task cell (due to the registering thread holding the +// lock). To handle this, the registering thread removes the task it just set +// from the cell and calls `notify` on it. This call to notify represents the +// attempt to notify by the other thread (that set the `NOTIFYING` bit). The +// state is then transitioned from `REGISTERING | NOTIFYING` back to `WAITING`. +// This transition must succeed because, at this point, the state cannot be +// transitioned by another thread. +// +// # Notifying +// +// On a call to `notify`, an attempt to transition the state from `WAITING` to +// `NOTIFYING` is made. On success, the caller obtains a lock on the task cell. +// +// If the lock is obtained, then the thread takes ownership of the current value +// in the task cell, and calls `notify` on it. The state is then transitioned +// back to `WAITING`. This transition must succeed as, at this point, the state +// cannot be transitioned by another thread. +// +// If the thread is unable to obtain the lock, the `NOTIFYING` bit is still. +// This is because it has either been set by the current thread but the previous +// value included the `REGISTERING` bit **or** a concurrent thread is in the +// `NOTIFYING` critical section. Either way, no action must be taken. +// +// If the current thread is the only concurrent call to `notify` and another +// thread is in the `register` critical section, when the other thread **exits** +// the `register` critical section, it will observe the `NOTIFYING` bit and +// handle the notify itself. +// +// If another thread is in the `notify` critical section, then it will handle +// notifying the task. +// +// # A potential race (is safely handled). +// +// Imagine the following situation: +// +// * Thread A obtains the `notify` lock and notifies a task. +// +// * Before thread A releases the `notify` lock, the notified task is scheduled. +// +// * Thread B attempts to notify the task. In theory this should result in the +// task being notified, but it cannot because thread A still holds the notify +// lock. +// +// This case is handled by requiring users of `AtomicTask` to call `register` +// **before** attempting to observe the application state change that resulted +// in the task being notified. The notifiers also change the application state +// before calling notify. +// +// Because of this, the task will do one of two things. +// +// 1) Observe the application state change that Thread B is notifying on. In +// this case, it is OK for Thread B's notification to be lost. +// +// 2) Call register before attempting to observe the application state. Since +// Thread A still holds the `notify` lock, the call to `register` will result +// in the task notifying itself and get scheduled again. + +/// Idle state +const WAITING: usize = 0; + +/// A new task value is being registered with the `AtomicTask` cell. +const REGISTERING: usize = 0b01; + +/// The task currently registered with the `AtomicTask` cell is being notified. +const NOTIFYING: usize = 0b10; + +impl AtomicTask { + /// Create an `AtomicTask` initialized with the given `Task` + pub fn new() -> AtomicTask { + // Make sure that task is Sync + trait AssertSync: Sync {} + impl AssertSync for Task {} + + AtomicTask { + state: AtomicUsize::new(WAITING), + task: UnsafeCell::new(None), + } + } + + /// Registers the provided task to be notified on calls to `notify`. + /// + /// The new task will take place of any previous tasks that were registered + /// by previous calls to `register`. Any calls to `notify` that happen after + /// a call to `register` (as defined by the memory ordering rules), will + /// notify the `register` caller's task. + /// + /// It is safe to call `register` with multiple other threads concurrently + /// calling `notify`. This will result in the `register` caller's current + /// task being notified once. + /// + /// This function is safe to call concurrently, but this is generally a bad + /// idea. Concurrent calls to `register` will attempt to register different + /// tasks to be notified. One of the callers will win and have its task set, + /// but there is no guarantee as to which caller will succeed. + pub fn register_task(&self, task: Task) { + match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { + WAITING => { + unsafe { + // Locked acquired, update the waker cell + *self.task.get() = Some(task.clone()); + + // Release the lock. If the state transitioned to include + // the `NOTIFYING` bit, this means that a notify has been + // called concurrently, so we have to remove the task and + // notify it.` + // + // Start by assuming that the state is `REGISTERING` as this + // is what we jut set it to. + let mut curr = REGISTERING; + + // If a task has to be notified, it will be set here. + let mut notify: Option = None; + + loop { + let res = self.state.compare_exchange( + curr, WAITING, AcqRel, Acquire); + + match res { + Ok(_) => { + // The atomic exchange was successful, now + // notify the task (if set) and return. + if let Some(task) = notify { + task.notify(); + } + + return; + } + Err(actual) => { + // This branch can only be reached if a + // concurrent thread called `notify`. In this + // case, `actual` **must** be `REGISTERING | + // `NOTIFYING`. + debug_assert_eq!(actual, REGISTERING | NOTIFYING); + + // Take the task to notify once the atomic operation has + // completed. + notify = (*self.task.get()).take(); + + // Update `curr` for the next iteration of the + // loop + curr = actual; + } + } + } + } + } + NOTIFYING => { + // Currently in the process of notifying the task, i.e., + // `notify` is currently being called on the old task handle. + // So, we call notify on the new task handle + task.notify(); + } + state => { + // In this case, a concurrent thread is holding the + // "registering" lock. This probably indicates a bug in the + // caller's code as racing to call `register` doesn't make much + // sense. + // + // We just want to maintain memory safety. It is ok to drop the + // call to `register`. + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING); + } + } + } + + /// Attempts to take the `Task` value out of the `AtomicTask` with the + /// intention that the caller will notify the task. + pub fn take_to_notify(&self) -> Option { + // AcqRel ordering is used in order to acquire the value of the `task` + // cell as well as to establish a `release` ordering with whatever + // memory the `AtomicTask` is associated with. + match self.state.fetch_or(NOTIFYING, AcqRel) { + WAITING => { + // The notifying lock has been acquired. + let task = unsafe { (*self.task.get()).take() }; + + // Release the lock + self.state.fetch_and(!NOTIFYING, Release); + + task + } + state => { + // There is a concurrent thread currently updating the + // associated task. + // + // Nothing more to do as the `NOTIFYING` bit has been set. It + // doesn't matter if there are concurrent registering threads or + // not. + // + debug_assert!( + state == REGISTERING || + state == REGISTERING | NOTIFYING || + state == NOTIFYING); + + None + } + } + } + + /// Notifies the task that last called `register`. + /// + /// If `register` has not been called yet, then this does nothing. + pub fn notify(&self) { + if let Some(task) = self.take_to_notify() { + task.notify(); + } + } +} + +impl Default for AtomicTask { + fn default() -> Self { + AtomicTask::new() + } +} + +impl fmt::Debug for AtomicTask { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "AtomicTask") + } +} + +unsafe impl Send for AtomicTask {} +unsafe impl Sync for AtomicTask {} diff --git a/third_party/rust/tokio-reactor/src/background.rs b/third_party/rust/tokio-reactor/src/background.rs new file mode 100644 index 000000000000..88f78a8bcbba --- /dev/null +++ b/third_party/rust/tokio-reactor/src/background.rs @@ -0,0 +1,217 @@ +use {Reactor, Handle, Task}; +use atomic_task::AtomicTask; + +use futures::{Future, Async, Poll, task}; + +use std::io; +use std::thread; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +/// Handle to the reactor running on a background thread. +/// +/// Instances are created by calling [`Reactor::background`]. +/// +/// [`Reactor::background`]: struct.Reactor.html#method.background +#[derive(Debug)] +pub struct Background { + /// When `None`, the reactor thread will run until the process terminates. + inner: Option, +} + +/// Future that resolves when the reactor thread has shutdown. +#[derive(Debug)] +pub struct Shutdown { + inner: Inner, +} + +/// Actual Background handle. +#[derive(Debug)] +struct Inner { + /// Handle to the reactor + handle: Handle, + + /// Shared state between the background handle and the reactor thread. + shared: Arc, +} + +#[derive(Debug)] +struct Shared { + /// Signal the reactor thread to shutdown. + shutdown: AtomicUsize, + + /// Task to notify when the reactor thread enters a shutdown state. + shutdown_task: AtomicTask, +} + +/// Notifies the reactor thread to shutdown once the reactor becomes idle. +const SHUTDOWN_IDLE: usize = 1; + +/// Notifies the reactor thread to shutdown immediately. +const SHUTDOWN_NOW: usize = 2; + +/// The reactor is currently shutdown. +const SHUTDOWN: usize = 3; + +// ===== impl Background ===== + +impl Background { + /// Launch a reactor in the background and return a handle to the thread. + pub(crate) fn new(reactor: Reactor) -> io::Result { + // Grab a handle to the reactor + let handle = reactor.handle().clone(); + + // Create the state shared between the background handle and the reactor + // thread. + let shared = Arc::new(Shared { + shutdown: AtomicUsize::new(0), + shutdown_task: AtomicTask::new(), + }); + + // For the reactor thread + let shared2 = shared.clone(); + + // Start the reactor thread + thread::Builder::new() + .spawn(move || run(reactor, shared2))?; + + Ok(Background { + inner: Some(Inner { + handle, + shared, + }), + }) + } + + /// Returns a reference to the reactor handle. + pub fn handle(&self) -> &Handle { + &self.inner.as_ref().unwrap().handle + } + + /// Shutdown the reactor on idle. + /// + /// Returns a future that completes once the reactor thread has shutdown. + pub fn shutdown_on_idle(mut self) -> Shutdown { + let inner = self.inner.take().unwrap(); + inner.shutdown_on_idle(); + + Shutdown { inner } + } + + /// Shutdown the reactor immediately + /// + /// Returns a future that completes once the reactor thread has shutdown. + pub fn shutdown_now(mut self) -> Shutdown { + let inner = self.inner.take().unwrap(); + inner.shutdown_now(); + + Shutdown { inner } + } + + /// Run the reactor on its thread until the process terminates. + pub fn forget(mut self) { + drop(self.inner.take()); + } +} + +impl Drop for Background { + fn drop(&mut self) { + let inner = match self.inner.take() { + Some(i) => i, + None => return, + }; + + inner.shutdown_now(); + + let shutdown = Shutdown { inner }; + let _ = shutdown.wait(); + } +} + +// ===== impl Shutdown ===== + +impl Future for Shutdown { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + let task = Task::Futures1(task::current()); + self.inner.shared.shutdown_task.register_task(task); + + if !self.inner.is_shutdown() { + return Ok(Async::NotReady); + } + + Ok(().into()) + } +} + +// ===== impl Inner ===== + +impl Inner { + /// Returns true if the reactor thread is shutdown. + fn is_shutdown(&self) -> bool { + self.shared.shutdown.load(SeqCst) == SHUTDOWN + } + + /// Notify the reactor thread to shutdown once the reactor transitions to an + /// idle state. + fn shutdown_on_idle(&self) { + self.shared.shutdown + .compare_and_swap(0, SHUTDOWN_IDLE, SeqCst); + self.handle.wakeup(); + } + + /// Notify the reactor thread to shutdown immediately. + fn shutdown_now(&self) { + let mut curr = self.shared.shutdown.load(SeqCst); + + loop { + if curr >= SHUTDOWN_NOW { + return; + } + + let act = self.shared.shutdown + .compare_and_swap(curr, SHUTDOWN_NOW, SeqCst); + + if act == curr { + self.handle.wakeup(); + return; + } + + curr = act; + } + } +} + +// ===== impl Reactor thread ===== + +fn run(mut reactor: Reactor, shared: Arc) { + debug!("starting background reactor"); + loop { + let shutdown = shared.shutdown.load(SeqCst); + + if shutdown == SHUTDOWN_NOW { + debug!("shutting background reactor down NOW"); + break; + } + + if shutdown == SHUTDOWN_IDLE && reactor.is_idle() { + debug!("shutting background reactor on idle"); + break; + } + + reactor.turn(None).unwrap(); + } + + drop(reactor); + + // Transition the state to shutdown + shared.shutdown.store(SHUTDOWN, SeqCst); + + // Notify any waiters + shared.shutdown_task.notify(); + + debug!("background reactor has shutdown"); +} diff --git a/third_party/rust/tokio-reactor/src/lib.rs b/third_party/rust/tokio-reactor/src/lib.rs new file mode 100644 index 000000000000..381d77af0373 --- /dev/null +++ b/third_party/rust/tokio-reactor/src/lib.rs @@ -0,0 +1,773 @@ +//! Event loop that drives Tokio I/O resources. +//! +//! The reactor is the engine that drives asynchronous I/O resources (like TCP and +//! UDP sockets). It is backed by [`mio`] and acts as a bridge between [`mio`] and +//! [`futures`]. +//! +//! The crate provides: +//! +//! * [`Reactor`] is the main type of this crate. It performs the event loop logic. +//! +//! * [`Handle`] provides a reference to a reactor instance. +//! +//! * [`Registration`] and [`PollEvented`] allow third parties to implement I/O +//! resources that are driven by the reactor. +//! +//! Application authors will not use this crate directly. Instead, they will use the +//! `tokio` crate. Library authors should only depend on `tokio-reactor` if they +//! are building a custom I/O resource. +//! +//! For more details, see [reactor module] documentation in the Tokio crate. +//! +//! [`mio`]: http://github.com/carllerche/mio +//! [`futures`]: http://github.com/rust-lang-nursery/futures-rs +//! [`Reactor`]: struct.Reactor.html +//! [`Handle`]: struct.Handle.html +//! [`Registration`]: struct.Registration.html +//! [`PollEvented`]: struct.PollEvented.html +//! [reactor module]: https://docs.rs/tokio/0.1/tokio/reactor/index.html + +#![doc(html_root_url = "https://docs.rs/tokio-reactor/0.1.3")] +#![deny(missing_docs, warnings, missing_debug_implementations)] + +#[macro_use] +extern crate futures; +#[macro_use] +extern crate log; +extern crate mio; +extern crate slab; +extern crate tokio_executor; +extern crate tokio_io; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +mod atomic_task; +pub(crate) mod background; +mod poll_evented; +mod registration; + +// ===== Public re-exports ===== + +pub use self::background::{Background, Shutdown}; +pub use self::registration::Registration; +pub use self::poll_evented::PollEvented; + +// ===== Private imports ===== + +use atomic_task::AtomicTask; + +use tokio_executor::Enter; +use tokio_executor::park::{Park, Unpark}; + +use std::{fmt, usize}; +use std::io; +use std::mem; +use std::cell::RefCell; +use std::sync::atomic::Ordering::{Relaxed, SeqCst}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::{Arc, Weak, RwLock}; +use std::time::{Duration, Instant}; + +use log::Level; +use mio::event::Evented; +use slab::Slab; + +/// The core reactor, or event loop. +/// +/// The event loop is the main source of blocking in an application which drives +/// all other I/O events and notifications happening. Each event loop can have +/// multiple handles pointing to it, each of which can then be used to create +/// various I/O objects to interact with the event loop in interesting ways. +pub struct Reactor { + /// Reuse the `mio::Events` value across calls to poll. + events: mio::Events, + + /// State shared between the reactor and the handles. + inner: Arc, + + _wakeup_registration: mio::Registration, +} + +/// A reference to a reactor. +/// +/// A `Handle` is used for associating I/O objects with an event loop +/// explicitly. Typically though you won't end up using a `Handle` that often +/// and will instead use the default reactor for the execution context. +/// +/// By default, most components bind lazily to reactors. +/// To get this behavior when manually passing a `Handle`, use `default()`. +#[derive(Clone)] +pub struct Handle { + inner: Option, +} + +/// Like `Handle`, but never `None`. +#[derive(Clone)] +struct HandlePriv { + inner: Weak, +} + +/// Return value from the `turn` method on `Reactor`. +/// +/// Currently this value doesn't actually provide any functionality, but it may +/// in the future give insight into what happened during `turn`. +#[derive(Debug)] +pub struct Turn { + _priv: (), +} + +/// Error returned from `Handle::set_fallback`. +#[derive(Clone, Debug)] +pub struct SetFallbackError(()); + +#[deprecated(since = "0.1.2", note = "use SetFallbackError instead")] +#[doc(hidden)] +pub type SetDefaultError = SetFallbackError; + +#[test] +fn test_handle_size() { + use std::mem; + assert_eq!(mem::size_of::(), mem::size_of::()); +} + +struct Inner { + /// The underlying system event queue. + io: mio::Poll, + + /// ABA guard counter + next_aba_guard: AtomicUsize, + + /// Dispatch slabs for I/O and futures events + io_dispatch: RwLock>, + + /// Used to wake up the reactor from a call to `turn` + wakeup: mio::SetReadiness +} + +struct ScheduledIo { + aba_guard: usize, + readiness: AtomicUsize, + reader: AtomicTask, + writer: AtomicTask, +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +pub(crate) enum Direction { + Read, + Write, +} + +/// The global fallback reactor. +static HANDLE_FALLBACK: AtomicUsize = ATOMIC_USIZE_INIT; + +/// Tracks the reactor for the current execution context. +thread_local!(static CURRENT_REACTOR: RefCell> = RefCell::new(None)); + +const TOKEN_SHIFT: usize = 22; + +// Kind of arbitrary, but this reserves some token space for later usage. +const MAX_SOURCES: usize = (1 << TOKEN_SHIFT) - 1; +const TOKEN_WAKEUP: mio::Token = mio::Token(MAX_SOURCES); + +fn _assert_kinds() { + fn _assert() {} + + _assert::(); +} + +/// A wakeup handle for a task, which may be either a futures 0.1 or 0.2 task +#[derive(Debug, Clone)] +pub(crate) enum Task { + Futures1(futures::task::Task), + #[cfg(feature = "unstable-futures")] + Futures2(futures2::task::Waker), +} + +// ===== impl Reactor ===== + +/// Set the default reactor for the duration of the closure +/// +/// # Panics +/// +/// This function panics if there already is a default reactor set. +pub fn with_default(handle: &Handle, enter: &mut Enter, f: F) -> R +where F: FnOnce(&mut Enter) -> R +{ + // Ensure that the executor is removed from the thread-local context + // when leaving the scope. This handles cases that involve panicking. + struct Reset; + + impl Drop for Reset { + fn drop(&mut self) { + CURRENT_REACTOR.with(|current| { + let mut current = current.borrow_mut(); + *current = None; + }); + } + } + + // This ensures the value for the current reactor gets reset even if there + // is a panic. + let _r = Reset; + + CURRENT_REACTOR.with(|current| { + { + let mut current = current.borrow_mut(); + + assert!(current.is_none(), "default Tokio reactor already set \ + for execution context"); + + let handle = match handle.as_priv() { + Some(handle) => handle, + None => { + panic!("`handle` does not reference a reactor"); + } + }; + + *current = Some(handle.clone()); + } + + f(enter) + }) +} + +impl Reactor { + /// Creates a new event loop, returning any error that happened during the + /// creation. + pub fn new() -> io::Result { + let io = mio::Poll::new()?; + let wakeup_pair = mio::Registration::new2(); + + io.register(&wakeup_pair.0, + TOKEN_WAKEUP, + mio::Ready::readable(), + mio::PollOpt::level())?; + + Ok(Reactor { + events: mio::Events::with_capacity(1024), + _wakeup_registration: wakeup_pair.0, + inner: Arc::new(Inner { + io: io, + next_aba_guard: AtomicUsize::new(0), + io_dispatch: RwLock::new(Slab::with_capacity(1)), + wakeup: wakeup_pair.1, + }), + }) + } + + /// Returns a handle to this event loop which can be sent across threads + /// and can be used as a proxy to the event loop itself. + /// + /// Handles are cloneable and clones always refer to the same event loop. + /// This handle is typically passed into functions that create I/O objects + /// to bind them to this event loop. + pub fn handle(&self) -> Handle { + Handle { + inner: Some(HandlePriv { + inner: Arc::downgrade(&self.inner), + }), + } + } + + /// Configures the fallback handle to be returned from `Handle::default`. + /// + /// The `Handle::default()` function will by default lazily spin up a global + /// thread and run a reactor on this global thread. This behavior is not + /// always desirable in all applications, however, and sometimes a different + /// fallback reactor is desired. + /// + /// This function will attempt to globally alter the return value of + /// `Handle::default()` to return the `handle` specified rather than a + /// lazily initialized global thread. If successful then all future calls to + /// `Handle::default()` which would otherwise fall back to the global thread + /// will instead return a clone of the handle specified. + /// + /// # Errors + /// + /// This function may not always succeed in configuring the fallback handle. + /// If this function was previously called (or perhaps concurrently called + /// on many threads) only the *first* invocation of this function will + /// succeed. All other invocations will return an error. + /// + /// Additionally if the global reactor thread has already been initialized + /// then this function will also return an error. (aka if `Handle::default` + /// has been called previously in this program). + pub fn set_fallback(&self) -> Result<(), SetFallbackError> { + set_fallback(self.handle().into_priv().unwrap()) + } + + /// Performs one iteration of the event loop, blocking on waiting for events + /// for at most `max_wait` (forever if `None`). + /// + /// This method is the primary method of running this reactor and processing + /// I/O events that occur. This method executes one iteration of an event + /// loop, blocking at most once waiting for events to happen. + /// + /// If a `max_wait` is specified then the method should block no longer than + /// the duration specified, but this shouldn't be used as a super-precise + /// timer but rather a "ballpark approximation" + /// + /// # Return value + /// + /// This function returns an instance of `Turn` + /// + /// `Turn` as of today has no extra information with it and can be safely + /// discarded. In the future `Turn` may contain information about what + /// happened while this reactor blocked. + /// + /// # Errors + /// + /// This function may also return any I/O error which occurs when polling + /// for readiness of I/O objects with the OS. This is quite unlikely to + /// arise and typically mean that things have gone horribly wrong at that + /// point. Currently this is primarily only known to happen for internal + /// bugs to `tokio` itself. + pub fn turn(&mut self, max_wait: Option) -> io::Result { + self.poll(max_wait)?; + Ok(Turn { _priv: () }) + } + + /// Returns true if the reactor is currently idle. + /// + /// Idle is defined as all tasks that have been spawned have completed, + /// either successfully or with an error. + pub fn is_idle(&self) -> bool { + self.inner.io_dispatch + .read().unwrap() + .is_empty() + } + + /// Run this reactor on a background thread. + /// + /// This function takes ownership, spawns a new thread, and moves the + /// reactor to this new thread. It then runs the reactor, driving all + /// associated I/O resources, until the `Background` handle is dropped or + /// explicitly shutdown. + pub fn background(self) -> io::Result { + Background::new(self) + } + + fn poll(&mut self, max_wait: Option) -> io::Result<()> { + // Block waiting for an event to happen, peeling out how many events + // happened. + match self.inner.io.poll(&mut self.events, max_wait) { + Ok(_) => {} + Err(e) => return Err(e), + } + + let start = if log_enabled!(Level::Debug) { + Some(Instant::now()) + } else { + None + }; + + // Process all the events that came in, dispatching appropriately + let mut events = 0; + for event in self.events.iter() { + events += 1; + let token = event.token(); + trace!("event {:?} {:?}", event.readiness(), event.token()); + + if token == TOKEN_WAKEUP { + self.inner.wakeup.set_readiness(mio::Ready::empty()).unwrap(); + } else { + self.dispatch(token, event.readiness()); + } + } + + if let Some(start) = start { + let dur = start.elapsed(); + debug!("loop process - {} events, {}.{:03}s", + events, + dur.as_secs(), + dur.subsec_nanos() / 1_000_000); + } + + Ok(()) + } + + fn dispatch(&self, token: mio::Token, ready: mio::Ready) { + let aba_guard = token.0 & !MAX_SOURCES; + let token = token.0 & MAX_SOURCES; + + let mut rd = None; + let mut wr = None; + + // Create a scope to ensure that notifying the tasks stays out of the + // lock's critical section. + { + let io_dispatch = self.inner.io_dispatch.read().unwrap(); + + let io = match io_dispatch.get(token) { + Some(io) => io, + None => return, + }; + + if aba_guard != io.aba_guard { + return; + } + + io.readiness.fetch_or(ready.as_usize(), Relaxed); + + if ready.is_writable() || platform::is_hup(&ready) { + wr = io.writer.take_to_notify(); + } + + if !(ready & (!mio::Ready::writable())).is_empty() { + rd = io.reader.take_to_notify(); + } + } + + if let Some(task) = rd { + task.notify(); + } + + if let Some(task) = wr { + task.notify(); + } + } +} + +impl Park for Reactor { + type Unpark = Handle; + type Error = io::Error; + + fn unpark(&self) -> Self::Unpark { + self.handle() + } + + fn park(&mut self) -> io::Result<()> { + self.turn(None)?; + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> io::Result<()> { + self.turn(Some(duration))?; + Ok(()) + } +} + +impl fmt::Debug for Reactor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Reactor") + } +} + +// ===== impl Handle ===== + +impl Handle { + /// Returns a handle to the current reactor. + pub fn current() -> Handle { + // TODO: Should this panic on error? + HandlePriv::try_current() + .map(|handle| Handle { + inner: Some(handle), + }) + .unwrap_or(Handle { + inner: Some(HandlePriv { + inner: Weak::new(), + }) + }) + } + + fn as_priv(&self) -> Option<&HandlePriv> { + self.inner.as_ref() + } + + fn into_priv(self) -> Option { + self.inner + } + + fn wakeup(&self) { + if let Some(handle) = self.as_priv() { + handle.wakeup(); + } + } +} + +impl Unpark for Handle { + fn unpark(&self) { + if let Some(ref h) = self.inner { + h.wakeup(); + } + } +} + +impl Default for Handle { + /// Returns a "default" handle, i.e., a handle that lazily binds to a reactor. + fn default() -> Handle { + Handle { inner: None } + } +} + +impl fmt::Debug for Handle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Handle") + } +} + +fn set_fallback(handle: HandlePriv) -> Result<(), SetFallbackError> { + unsafe { + let val = handle.into_usize(); + match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) { + Ok(_) => Ok(()), + Err(_) => { + drop(HandlePriv::from_usize(val)); + Err(SetFallbackError(())) + } + } + } +} + +// ===== impl HandlePriv ===== + +impl HandlePriv { + /// Try to get a handle to the current reactor. + /// + /// Returns `Err` if no handle is found. + pub(crate) fn try_current() -> io::Result { + CURRENT_REACTOR.with(|current| { + match *current.borrow() { + Some(ref handle) => Ok(handle.clone()), + None => HandlePriv::fallback(), + } + }) + } + + /// Returns a handle to the fallback reactor. + fn fallback() -> io::Result { + let mut fallback = HANDLE_FALLBACK.load(SeqCst); + + // If the fallback hasn't been previously initialized then let's spin + // up a helper thread and try to initialize with that. If we can't + // actually create a helper thread then we'll just return a "defunct" + // handle which will return errors when I/O objects are attempted to be + // associated. + if fallback == 0 { + let reactor = match Reactor::new() { + Ok(reactor) => reactor, + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, + "failed to create reactor")), + }; + + // If we successfully set ourselves as the actual fallback then we + // want to `forget` the helper thread to ensure that it persists + // globally. If we fail to set ourselves as the fallback that means + // that someone was racing with this call to `Handle::default`. + // They ended up winning so we'll destroy our helper thread (which + // shuts down the thread) and reload the fallback. + if set_fallback(reactor.handle().into_priv().unwrap()).is_ok() { + let ret = reactor.handle().into_priv().unwrap(); + + match reactor.background() { + Ok(bg) => bg.forget(), + // The global handle is fubar, but y'all probably got bigger + // problems if a thread can't spawn. + Err(_) => {} + } + + return Ok(ret); + } + + fallback = HANDLE_FALLBACK.load(SeqCst); + } + + // At this point our fallback handle global was configured so we use + // its value to reify a handle, clone it, and then forget our reified + // handle as we don't actually have an owning reference to it. + assert!(fallback != 0); + + let ret = unsafe { + let handle = HandlePriv::from_usize(fallback); + let ret = handle.clone(); + + // This prevents `handle` from being dropped and having the ref + // count decremented. + drop(handle.into_usize()); + + ret + }; + + Ok(ret) + } + + /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise + /// makes the next call to `turn` return immediately. + /// + /// This method is intended to be used in situations where a notification + /// needs to otherwise be sent to the main reactor. If the reactor is + /// currently blocked inside of `turn` then it will wake up and soon return + /// after this method has been called. If the reactor is not currently + /// blocked in `turn`, then the next call to `turn` will not block and + /// return immediately. + fn wakeup(&self) { + if let Some(inner) = self.inner() { + inner.wakeup.set_readiness(mio::Ready::readable()).unwrap(); + } + } + + fn into_usize(self) -> usize { + unsafe { + mem::transmute::, usize>(self.inner) + } + } + + unsafe fn from_usize(val: usize) -> HandlePriv { + let inner = mem::transmute::>(val);; + HandlePriv { inner } + } + + fn inner(&self) -> Option> { + self.inner.upgrade() + } +} + +impl fmt::Debug for HandlePriv { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "HandlePriv") + } +} + +// ===== impl Inner ===== + +impl Inner { + /// Register an I/O resource with the reactor. + /// + /// The registration token is returned. + fn add_source(&self, source: &Evented) + -> io::Result + { + // Get an ABA guard value + let aba_guard = self.next_aba_guard.fetch_add(1 << TOKEN_SHIFT, Relaxed); + + let mut io_dispatch = self.io_dispatch.write().unwrap(); + + if io_dispatch.len() == MAX_SOURCES { + return Err(io::Error::new(io::ErrorKind::Other, "reactor at max \ + registered I/O resources")); + } + + // Acquire a write lock + let key = io_dispatch.insert(ScheduledIo { + aba_guard, + readiness: AtomicUsize::new(0), + reader: AtomicTask::new(), + writer: AtomicTask::new(), + }); + + try!(self.io.register(source, + mio::Token(aba_guard | key), + mio::Ready::all(), + mio::PollOpt::edge())); + + Ok(key) + } + + /// Deregisters an I/O resource from the reactor. + fn deregister_source(&self, source: &Evented) -> io::Result<()> { + self.io.deregister(source) + } + + fn drop_source(&self, token: usize) { + debug!("dropping I/O source: {}", token); + self.io_dispatch.write().unwrap().remove(token); + } + + /// Registers interest in the I/O resource associated with `token`. + fn register(&self, token: usize, dir: Direction, t: Task) { + debug!("scheduling direction for: {}", token); + let io_dispatch = self.io_dispatch.read().unwrap(); + let sched = io_dispatch.get(token).unwrap(); + + let (task, ready) = match dir { + Direction::Read => (&sched.reader, !mio::Ready::writable()), + Direction::Write => (&sched.writer, mio::Ready::writable()), + }; + + task.register_task(t); + + if sched.readiness.load(SeqCst) & ready.as_usize() != 0 { + task.notify(); + } + } +} + +impl Drop for Inner { + fn drop(&mut self) { + // When a reactor is dropped it needs to wake up all blocked tasks as + // they'll never receive a notification, and all connected I/O objects + // will start returning errors pretty quickly. + let io = self.io_dispatch.read().unwrap(); + for (_, io) in io.iter() { + io.writer.notify(); + io.reader.notify(); + } + } +} + +impl Direction { + fn mask(&self) -> mio::Ready { + match *self { + Direction::Read => { + // Everything except writable is signaled through read. + mio::Ready::all() - mio::Ready::writable() + } + Direction::Write => mio::Ready::writable() | platform::hup(), + } + } +} + +impl Task { + fn notify(&self) { + match *self { + Task::Futures1(ref task) => task.notify(), + + #[cfg(feature = "unstable-futures")] + Task::Futures2(ref waker) => waker.wake(), + } + } +} + +#[cfg(unix)] +mod platform { + use mio::Ready; + use mio::unix::UnixReady; + + pub fn hup() -> Ready { + UnixReady::hup().into() + } + + pub fn is_hup(ready: &Ready) -> bool { + UnixReady::from(*ready).is_hup() + } +} + +#[cfg(windows)] +mod platform { + use mio::Ready; + + pub fn hup() -> Ready { + Ready::empty() + } + + pub fn is_hup(_: &Ready) -> bool { + false + } +} + +#[cfg(feature = "unstable-futures")] +fn lift_async(old: futures::Async) -> futures2::Async { + match old { + futures::Async::Ready(x) => futures2::Async::Ready(x), + futures::Async::NotReady => futures2::Async::Pending, + } +} + +#[cfg(feature = "unstable-futures")] +fn lower_async(new: futures2::Async) -> futures::Async { + match new { + futures2::Async::Ready(x) => futures::Async::Ready(x), + futures2::Async::Pending => futures::Async::NotReady, + } +} diff --git a/third_party/rust/tokio-reactor/src/poll_evented.rs b/third_party/rust/tokio-reactor/src/poll_evented.rs new file mode 100644 index 000000000000..99dd8aa46709 --- /dev/null +++ b/third_party/rust/tokio-reactor/src/poll_evented.rs @@ -0,0 +1,664 @@ +use {Handle, Registration}; + +use futures::{task, Async, Poll}; +use mio; +use mio::event::Evented; +use tokio_io::{AsyncRead, AsyncWrite}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +use std::fmt; +use std::io::{self, Read, Write}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; + +/// Associates an I/O resource that implements the [`std::Read`] and / or +/// [`std::Write`] traits with the reactor that drives it. +/// +/// `PollEvented` uses [`Registration`] internally to take a type that +/// implements [`mio::Evented`] as well as [`std::Read`] and or [`std::Write`] +/// and associate it with a reactor that will drive it. +/// +/// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be +/// used from within the future's execution model. As such, the `PollEvented` +/// type provides [`AsyncRead`] and [`AsyncWrite`] implementations using the +/// underlying I/O resource as well as readiness events provided by the reactor. +/// +/// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is +/// `Sync`), the caller must ensure that there are at most two tasks that use a +/// `PollEvented` instance concurrently. One for reading and one for writing. +/// While violating this requirement is "safe" from a Rust memory model point of +/// view, it will result in unexpected behavior in the form of lost +/// notifications and tasks hanging. +/// +/// ## Readiness events +/// +/// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, +/// this type also supports access to the underlying readiness event stream. +/// While similar in function to what [`Registration`] provides, the semantics +/// are a bit different. +/// +/// Two functions are provided to access the readiness events: +/// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the +/// current readiness state of the `PollEvented` instance. If +/// [`poll_read_ready`] indicates read readiness, immediately calling +/// [`poll_read_ready`] again will also indicate read readiness. +/// +/// When the operation is attempted and is unable to succeed due to the I/O +/// resource not being ready, the caller must call [`clear_read_ready`] or +/// [`clear_write_ready`]. This clears the readiness state until a new readiness +/// event is received. +/// +/// This allows the caller to implement additional functions. For example, +/// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and +/// [`clear_read_ready`]. +/// +/// ```rust,ignore +/// pub fn poll_accept(&mut self) -> Poll<(net::TcpStream, SocketAddr), io::Error> { +/// let ready = Ready::readable(); +/// +/// try_ready!(self.poll_evented.poll_read_ready(ready)); +/// +/// match self.poll_evented.get_ref().accept_std() { +/// Ok(pair) => Ok(Async::Ready(pair)), +/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { +/// self.poll_evented.clear_read_ready(ready); +/// Ok(Async::NotReady) +/// } +/// Err(e) => Err(e), +/// } +/// } +/// ``` +/// +/// ## Platform-specific events +/// +/// `PollEvented` also allows receiving platform-specific `mio::Ready` events. +/// These events are included as part of the read readiness event stream. The +/// write readiness event stream is only for `Ready::writable()` events. +/// +/// [`std::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +/// [`std::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +/// [`AsyncRead`]: ../io/trait.AsyncRead.html +/// [`AsyncWrite`]: ../io/trait.AsyncWrite.html +/// [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html +/// [`Registration`]: struct.Registration.html +/// [`TcpListener`]: ../net/struct.TcpListener.html +/// [`clear_read_ready`]: #method.clear_read_ready +/// [`clear_read_ready`]: #method.clear_read_ready +/// [`poll_read_ready`]: #method.poll_read_ready +/// [`poll_write_ready`]: #method.poll_write_ready +pub struct PollEvented { + io: Option, + inner: Inner, +} + +struct Inner { + registration: Registration, + + /// Currently visible read readiness + read_readiness: AtomicUsize, + + /// Currently visible write readiness + write_readiness: AtomicUsize, +} + +// ===== impl PollEvented ===== + +macro_rules! poll_ready { + ($me:expr, $mask:expr, $cache:ident, $take:ident, $poll:expr) => {{ + $me.register()?; + + // Load cached & encoded readiness. + let mut cached = $me.inner.$cache.load(Relaxed); + let mask = $mask | ::platform::hup(); + + // See if the current readiness matches any bits. + let mut ret = mio::Ready::from_usize(cached) & $mask; + + if ret.is_empty() { + // Readiness does not match, consume the registration's readiness + // stream. This happens in a loop to ensure that the stream gets + // drained. + loop { + let ready = try_ready!($poll); + cached |= ready.as_usize(); + + // Update the cache store + $me.inner.$cache.store(cached, Relaxed); + + ret |= ready & mask; + + if !ret.is_empty() { + return Ok(ret.into()); + } + } + } else { + // Check what's new with the registration stream. This will not + // request to be notified + if let Some(ready) = $me.inner.registration.$take()? { + cached |= ready.as_usize(); + $me.inner.$cache.store(cached, Relaxed); + } + + Ok(mio::Ready::from_usize(cached).into()) + } + }} +} + +impl PollEvented +where E: Evented +{ + /// Creates a new `PollEvented` associated with the default reactor. + pub fn new(io: E) -> PollEvented { + PollEvented { + io: Some(io), + inner: Inner { + registration: Registration::new(), + read_readiness: AtomicUsize::new(0), + write_readiness: AtomicUsize::new(0), + } + } + } + + /// Creates a new `PollEvented` associated with the specified reactor. + pub fn new_with_handle(io: E, handle: &Handle) -> io::Result { + let ret = PollEvented::new(io); + + if let Some(handle) = handle.as_priv() { + ret.inner.registration + .register_with_priv(ret.io.as_ref().unwrap(), handle)?; + } + + Ok(ret) + } + + /// Returns a shared reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_ref(&self) -> &E { + self.io.as_ref().unwrap() + } + + /// Returns a mutable reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_mut(&mut self) -> &mut E { + self.io.as_mut().unwrap() + } + + /// Consumes self, returning the inner I/O object + /// + /// This function will deregister the I/O resource from the reactor before + /// returning. If the deregistration operation fails, an error is returned. + /// + /// Note that deregistering does not guarantee that the I/O resource can be + /// registered with a different reactor. Some I/O resource types can only be + /// associated with a single reactor instance for their lifetime. + pub fn into_inner(mut self) -> io::Result { + let io = self.io.take().unwrap(); + self.inner.registration.deregister(&io)?; + Ok(io) + } + + /// Check the I/O resource's read readiness state. + /// + /// The mask argument allows specifying what readiness to notify on. This + /// can be any value, including platform specific readiness, **except** + /// `writable`. HUP is always implicitly included on platforms that support + /// it. + /// + /// If the resource is not ready for a read then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a read-ready state until readiness is + /// cleared by calling [`clear_read_ready`]. + /// + /// [`clear_read_ready`]: #method.clear_read_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable. + /// * called from outside of a task context. + pub fn poll_read_ready(&self, mask: mio::Ready) -> Poll { + assert!(!mask.is_writable(), "cannot poll for write readiness"); + poll_ready!( + self, mask, read_readiness, take_read_ready, + self.inner.registration.poll_read_ready() + ) + } + + /// Like `poll_read_ready` but compatible with futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn poll_read_ready2(&self, cx: &mut futures2::task::Context, mask: mio::Ready) + -> futures2::Poll + { + assert!(!mask.is_writable(), "cannot poll for write readiness"); + let mut res = || poll_ready!( + self, mask, read_readiness, take_read_ready, + self.inner.registration.poll_read_ready2(cx).map(::lower_async) + ); + res().map(::lift_async) + } + + /// Clears the I/O resource's read readiness state and registers the current + /// task to be notified once a read readiness event is received. + /// + /// After calling this function, `poll_read_ready` will return `NotReady` + /// until a new read readiness event has been received. + /// + /// The `mask` argument specifies the readiness bits to clear. This may not + /// include `writable` or `hup`. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable or HUP + /// * called from outside of a task context. + pub fn clear_read_ready(&self, ready: mio::Ready) -> io::Result<()> { + // Cannot clear write readiness + assert!(!ready.is_writable(), "cannot clear write readiness"); + assert!(!::platform::is_hup(&ready), "cannot clear HUP readiness"); + + self.inner.read_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_read_ready(ready)?.is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Like `clear_read_ready` but compatible with futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn clear_read_ready2(&self, cx: &mut futures2::task::Context, ready: mio::Ready) + -> io::Result<()> + { + // Cannot clear write readiness + assert!(!ready.is_writable(), "cannot clear write readiness"); + assert!(!::platform::is_hup(&ready), "cannot clear HUP readiness"); + + self.inner.read_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_read_ready2(cx, ready)?.is_ready() { + // Notify the current task + cx.waker().wake() + } + + Ok(()) + } + + /// Check the I/O resource's write readiness state. + /// + /// This always checks for writable readiness and also checks for HUP + /// readiness on platforms that support it. + /// + /// If the resource is not ready for a write then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a write-ready state until readiness is + /// cleared by calling [`clear_write_ready`]. + /// + /// [`clear_write_ready`]: #method.clear_write_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` contains bits besides `writable` and `hup`. + /// * called from outside of a task context. + pub fn poll_write_ready(&self) -> Poll { + poll_ready!( + self, + mio::Ready::writable(), + write_readiness, + take_write_ready, + self.inner.registration.poll_write_ready() + ) + } + + /// Like `poll_write_ready` but compatible with futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn poll_write_ready2(&self, cx: &mut futures2::task::Context) + -> futures2::Poll + { + let mut res = || poll_ready!( + self, + mio::Ready::writable(), + write_readiness, + take_write_ready, + self.inner.registration.poll_write_ready2(cx).map(::lower_async) + ); + res().map(::lift_async) + } + + + /// Resets the I/O resource's write readiness state and registers the current + /// task to be notified once a write readiness event is received. + /// + /// This only clears writable readiness. HUP (on platforms that support HUP) + /// cannot be cleared as it is a final state. + /// + /// After calling this function, `poll_write_ready(Ready::writable())` will + /// return `NotReady` until a new write readiness event has been received. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn clear_write_ready(&self) -> io::Result<()> { + let ready = mio::Ready::writable(); + + self.inner.write_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_write_ready()?.is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Like `clear_write_ready`, but compatible with futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn clear_write_ready2(&self, cx: &mut futures2::task::Context) -> io::Result<()> { + let ready = mio::Ready::writable(); + + self.inner.write_readiness.fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_write_ready2(cx)?.is_ready() { + // Notify the current task + cx.waker().wake() + } + + Ok(()) + } + + /// Ensure that the I/O resource is registered with the reactor. + fn register(&self) -> io::Result<()> { + self.inner.registration.register(self.io.as_ref().unwrap())?; + Ok(()) + } +} + +// ===== Read / Write impls ===== + +impl Read for PollEvented +where E: Evented + Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().read(buf); + + if is_wouldblock(&r) { + self.clear_read_ready(mio::Ready::readable())?; + } + + return r + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::io::AsyncRead for PollEvented + where E: Evented, E: Read, +{ + fn poll_read(&mut self, cx: &mut futures2::task::Context, buf: &mut [u8]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.poll_read_ready2(cx, mio::Ready::readable())? { + return Ok(futures2::Async::Pending); + } + + match self.get_mut().read(buf) { + Ok(n) => Ok(futures2::Async::Ready(n)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_read_ready2(cx, mio::Ready::readable())?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } +} + +impl Write for PollEvented +where E: Evented + Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().write(buf); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } + + fn flush(&mut self) -> io::Result<()> { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().flush(); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::io::AsyncWrite for PollEvented + where E: Evented, E: Write, +{ + fn poll_write(&mut self, cx: &mut futures2::task::Context, buf: &[u8]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.poll_write_ready2(cx)? { + return Ok(futures2::Async::Pending); + } + + match self.get_mut().write(buf) { + Ok(n) => Ok(futures2::Async::Ready(n)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_write_ready2(cx)?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + fn poll_flush(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + if let futures2::Async::Pending = self.poll_write_ready2(cx)? { + return Ok(futures2::Async::Pending); + } + + match self.get_mut().flush() { + Ok(_) => Ok(futures2::Async::Ready(())), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_write_ready2(cx)?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + fn poll_close(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_flush(self, cx) + } +} + + +impl AsyncRead for PollEvented +where E: Evented + Read, +{ +} + +impl AsyncWrite for PollEvented +where E: Evented + Write, +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +// ===== &'a Read / &'a Write impls ===== + +impl<'a, E> Read for &'a PollEvented +where E: Evented, &'a E: Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().read(buf); + + if is_wouldblock(&r) { + self.clear_read_ready(mio::Ready::readable())?; + } + + return r + } +} + +#[cfg(feature = "unstable-futures")] +impl<'a, E> futures2::io::AsyncRead for &'a PollEvented + where E: Evented, &'a E: Read, +{ + fn poll_read(&mut self, cx: &mut futures2::task::Context, buf: &mut [u8]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.poll_read_ready2(cx, mio::Ready::readable())? { + return Ok(futures2::Async::Pending); + } + + match self.get_ref().read(buf) { + Ok(n) => Ok(futures2::Async::Ready(n)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_read_ready2(cx, mio::Ready::readable())?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } +} + +impl<'a, E> Write for &'a PollEvented +where E: Evented, &'a E: Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().write(buf); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } + + fn flush(&mut self) -> io::Result<()> { + if let Async::NotReady = self.poll_write_ready()? { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_ref().flush(); + + if is_wouldblock(&r) { + self.clear_write_ready()?; + } + + return r + } +} + +#[cfg(feature = "unstable-futures")] +impl<'a, E> futures2::io::AsyncWrite for &'a PollEvented + where E: Evented, &'a E: Write, +{ + fn poll_write(&mut self, cx: &mut futures2::task::Context, buf: &[u8]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.poll_write_ready2(cx)? { + return Ok(futures2::Async::Pending); + } + + match self.get_ref().write(buf) { + Ok(n) => Ok(futures2::Async::Ready(n)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_write_ready2(cx)?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + fn poll_flush(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + if let futures2::Async::Pending = self.poll_write_ready2(cx)? { + return Ok(futures2::Async::Pending); + } + + match self.get_ref().flush() { + Ok(_) => Ok(futures2::Async::Ready(())), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_write_ready2(cx)?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + fn poll_close(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_flush(self, cx) + } +} + +impl<'a, E> AsyncRead for &'a PollEvented +where E: Evented, &'a E: Read, +{ +} + +impl<'a, E> AsyncWrite for &'a PollEvented +where E: Evented, &'a E: Write, +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +fn is_wouldblock(r: &io::Result) -> bool { + match *r { + Ok(_) => false, + Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, + } +} + +impl fmt::Debug for PollEvented { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PollEvented") + .field("io", &self.io) + .finish() + } +} + +impl Drop for PollEvented { + fn drop(&mut self) { + if let Some(io) = self.io.take() { + // Ignore errors + let _ = self.inner.registration.deregister(&io); + } + } +} diff --git a/third_party/rust/tokio-reactor/src/registration.rs b/third_party/rust/tokio-reactor/src/registration.rs new file mode 100644 index 000000000000..278b57680c18 --- /dev/null +++ b/third_party/rust/tokio-reactor/src/registration.rs @@ -0,0 +1,569 @@ +use {Handle, HandlePriv, Direction, Task}; + +use futures::{Async, Poll, task}; +use mio::{self, Evented}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +use std::{io, ptr, usize}; +use std::cell::UnsafeCell; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +/// Associates an I/O resource with the reactor instance that drives it. +/// +/// A registration represents an I/O resource registered with a Reactor such +/// that it will receive task notifications on readiness. This is the lowest +/// level API for integrating with a reactor. +/// +/// The association between an I/O resource is made by calling [`register`]. +/// Once the association is established, it remains established until the +/// registration instance is dropped. Subsequent calls to [`register`] are +/// no-ops. +/// +/// A registration instance represents two separate readiness streams. One for +/// the read readiness and one for write readiness. These streams are +/// independent and can be consumed from separate tasks. +/// +/// **Note**: while `Registration` is `Sync`, the caller must ensure that there +/// are at most two tasks that use a registration instance concurrently. One +/// task for [`poll_read_ready`] and one task for [`poll_write_ready`]. While +/// violating this requirement is "safe" from a Rust memory safety point of +/// view, it will result in unexpected behavior in the form of lost +/// notifications and tasks hanging. +/// +/// ## Platform-specific events +/// +/// `Registration` also allows receiving platform-specific `mio::Ready` events. +/// These events are included as part of the read readiness event stream. The +/// write readiness event stream is only for `Ready::writable()` events. +/// +/// [`register`]: #method.register +/// [`poll_read_ready`]: #method.poll_read_ready`] +/// [`poll_write_ready`]: #method.poll_write_ready`] +#[derive(Debug)] +pub struct Registration { + /// Stores the handle. Once set, the value is not changed. + /// + /// Setting this requires acquiring the lock from state. + inner: UnsafeCell>, + + /// Tracks the state of the registration. + /// + /// The least significant 2 bits are used to track the lifecycle of the + /// registration. The rest of the `state` variable is a pointer to tasks + /// that must be notified once the lock is released. + state: AtomicUsize, +} + +#[derive(Debug)] +struct Inner { + handle: HandlePriv, + token: usize, +} + +/// Tasks waiting on readiness notifications. +#[derive(Debug)] +struct Node { + direction: Direction, + task: Task, + next: *mut Node, +} + +/// Initial state. The handle is not set and the registration is idle. +const INIT: usize = 0; + +/// A thread locked the state and will associate a handle. +const LOCKED: usize = 1; + +/// A handle has been associated with the registration. +const READY: usize = 2; + +/// Masks the lifecycle state +const LIFECYCLE_MASK: usize = 0b11; + +/// A fake token used to identify error situations +const ERROR: usize = usize::MAX; + +// ===== impl Registration ===== + +impl Registration { + /// Create a new `Registration`. + /// + /// This registration is not associated with a Reactor instance. Call + /// `register` to establish the association. + pub fn new() -> Registration { + Registration { + inner: UnsafeCell::new(None), + state: AtomicUsize::new(INIT), + } + } + + /// Register the I/O resource with the default reactor. + /// + /// This function is safe to call concurrently and repeatedly. However, only + /// the first call will establish the registration. Subsequent calls will be + /// no-ops. + /// + /// # Return + /// + /// If the registration happened successfully, `Ok(true)` is returned. + /// + /// If an I/O resource has previously been successfully registered, + /// `Ok(false)` is returned. + /// + /// If an error is encountered during registration, `Err` is returned. + pub fn register(&self, io: &T) -> io::Result + where T: Evented, + { + self.register2(io, || HandlePriv::try_current()) + } + + /// Deregister the I/O resource from the reactor it is associated with. + /// + /// This function must be called before the I/O resource associated with the + /// registration is dropped. + /// + /// Note that deregistering does not guarantee that the I/O resource can be + /// registered with a different reactor. Some I/O resource types can only be + /// associated with a single reactor instance for their lifetime. + /// + /// # Return + /// + /// If the deregistration was successful, `Ok` is returned. Any calls to + /// `Reactor::turn` that happen after a successful call to `deregister` will + /// no longer result in notifications getting sent for this registration. + /// + /// `Err` is returned if an error is encountered. + pub fn deregister(&mut self, io: &T) -> io::Result<()> + where T: Evented, + { + // The state does not need to be checked and coordination is not + // necessary as this function takes `&mut self`. This guarantees a + // single thread is accessing the instance. + if let Some(inner) = unsafe { (*self.inner.get()).as_ref() } { + inner.deregister(io)?; + } + + Ok(()) + } + + /// Register the I/O resource with the specified reactor. + /// + /// This function is safe to call concurrently and repeatedly. However, only + /// the first call will establish the registration. Subsequent calls will be + /// no-ops. + /// + /// If the registration happened successfully, `Ok(true)` is returned. + /// + /// If an I/O resource has previously been successfully registered, + /// `Ok(false)` is returned. + /// + /// If an error is encountered during registration, `Err` is returned. + pub fn register_with(&self, io: &T, handle: &Handle) -> io::Result + where T: Evented, + { + self.register2(io, || { + match handle.as_priv() { + Some(handle) => Ok(handle.clone()), + None => HandlePriv::try_current(), + } + }) + } + + pub(crate) fn register_with_priv(&self, io: &T, handle: &HandlePriv) -> io::Result + where T: Evented, + { + self.register2(io, || Ok(handle.clone())) + } + + fn register2(&self, io: &T, f: F) -> io::Result + where T: Evented, + F: Fn() -> io::Result, + { + let mut state = self.state.load(SeqCst); + + loop { + match state { + INIT => { + // Registration is currently not associated with a handle. + // Get a handle then attempt to lock the state. + let handle = f()?; + + let actual = self.state.compare_and_swap(INIT, LOCKED, SeqCst); + + if actual != state { + state = actual; + continue; + } + + // Create the actual registration + let (inner, res) = Inner::new(io, handle); + + unsafe { *self.inner.get() = Some(inner); } + + // Transition out of the locked state. This acquires the + // current value, potentially having a list of tasks that + // are pending readiness notifications. + let actual = self.state.swap(READY, SeqCst); + + // Consume the stack of nodes + + let mut read = false; + let mut write = false; + let mut ptr = (actual & !LIFECYCLE_MASK) as *mut Node; + + let inner = unsafe { (*self.inner.get()).as_ref().unwrap() }; + + while !ptr.is_null() { + let node = unsafe { Box::from_raw(ptr) }; + let node = *node; + let Node { + direction, + task, + next, + } = node; + + let flag = match direction { + Direction::Read => &mut read, + Direction::Write => &mut write, + }; + + if !*flag { + *flag = true; + + inner.register(direction, task); + } + + ptr = next; + } + + return res.map(|_| true); + } + _ => return Ok(false), + } + } + } + + /// Poll for events on the I/O resource's read readiness stream. + /// + /// If the I/O resource receives a new read readiness event since the last + /// call to `poll_read_ready`, it is returned. If it has not, the current + /// task is notified once a new event is received. + /// + /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, + /// the function will always return `Ready(HUP)`. This should be treated as + /// the end of the readiness stream. + /// + /// Ensure that [`register`] has been called first. + /// + /// # Return value + /// + /// There are several possible return values: + /// + /// * `Ok(Async::Ready(readiness))` means that the I/O resource has received + /// a new readiness event. The readiness value is included. + /// + /// * `Ok(NotReady)` means that no new readiness events have been received + /// since the last call to `poll_read_ready`. + /// + /// * `Err(err)` means that the registration has encountered an error. This + /// error either represents a permanent internal error **or** the fact + /// that [`register`] was not called first. + /// + /// [`register`]: #method.register + /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_read_ready(&self) -> Poll { + self.poll_ready(Direction::Read, true, || Task::Futures1(task::current())) + .map(|v| match v { + Some(v) => Async::Ready(v), + _ => Async::NotReady, + }) + } + + /// Like `poll_ready_ready`, but compatible with futures 0.2 + #[cfg(feature = "unstable-futures")] + pub fn poll_read_ready2(&self, cx: &mut futures2::task::Context) + -> futures2::Poll + { + use futures2::Async as Async2; + self.poll_ready(Direction::Read, true, || Task::Futures2(cx.waker().clone())) + .map(|v| match v { + Some(v) => Async2::Ready(v), + _ => Async2::Pending, + }) + } + + /// Consume any pending read readiness event. + /// + /// This function is identical to [`poll_read_ready`] **except** that it + /// will not notify the current task when a new event is received. As such, + /// it is safe to call this function from outside of a task context. + /// + /// [`poll_read_ready`]: #method.poll_read_ready + pub fn take_read_ready(&self) -> io::Result> { + self.poll_ready(Direction::Read, false, || panic!()) + + } + + /// Poll for events on the I/O resource's write readiness stream. + /// + /// If the I/O resource receives a new write readiness event since the last + /// call to `poll_write_ready`, it is returned. If it has not, the current + /// task is notified once a new event is received. + /// + /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, + /// the function will always return `Ready(HUP)`. This should be treated as + /// the end of the readiness stream. + /// + /// Ensure that [`register`] has been called first. + /// + /// # Return value + /// + /// There are several possible return values: + /// + /// * `Ok(Async::Ready(readiness))` means that the I/O resource has received + /// a new readiness event. The readiness value is included. + /// + /// * `Ok(NotReady)` means that no new readiness events have been received + /// since the last call to `poll_write_ready`. + /// + /// * `Err(err)` means that the registration has encountered an error. This + /// error either represents a permanent internal error **or** the fact + /// that [`register`] was not called first. + /// + /// [`register`]: #method.register + /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_write_ready(&self) -> Poll { + self.poll_ready(Direction::Write, true, || Task::Futures1(task::current())) + .map(|v| match v { + Some(v) => Async::Ready(v), + _ => Async::NotReady, + }) + } + + /// Like `poll_write_ready`, but compatible with futures 0.2 + #[cfg(feature = "unstable-futures")] + pub fn poll_write_ready2(&self, cx: &mut futures2::task::Context) + -> futures2::Poll + { + use futures2::Async as Async2; + self.poll_ready(Direction::Write, true, || Task::Futures2(cx.waker().clone())) + .map(|v| match v { + Some(v) => Async2::Ready(v), + _ => Async2::Pending, + }) + } + + /// Consume any pending write readiness event. + /// + /// This function is identical to [`poll_write_ready`] **except** that it + /// will not notify the current task when a new event is received. As such, + /// it is safe to call this function from outside of a task context. + /// + /// [`poll_write_ready`]: #method.poll_write_ready + pub fn take_write_ready(&self) -> io::Result> { + self.poll_ready(Direction::Write, false, || unreachable!()) + } + + fn poll_ready(&self, direction: Direction, notify: bool, task: F) + -> io::Result> + where F: Fn() -> Task + { + let mut state = self.state.load(SeqCst); + + // Cache the node pointer + let mut node = None; + + loop { + match state { + INIT => { + return Err(io::Error::new(io::ErrorKind::Other, "must call `register` + before poll_read_ready")); + } + READY => { + let inner = unsafe { (*self.inner.get()).as_ref().unwrap() }; + return inner.poll_ready(direction, notify, task); + } + LOCKED => { + if !notify { + // Skip the notification tracking junk. + return Ok(None); + } + + let next_ptr = (state & !LIFECYCLE_MASK) as *mut Node; + + let task = task(); + + // Get the node + let mut n = node.take().unwrap_or_else(|| { + Box::new(Node { + direction, + task: task, + next: ptr::null_mut(), + }) + }); + + n.next = next_ptr; + + let node_ptr = Box::into_raw(n); + let next = node_ptr as usize | (state & LIFECYCLE_MASK); + + let actual = self.state.compare_and_swap(state, next, SeqCst); + + if actual != state { + // Back out of the node boxing + let n = unsafe { Box::from_raw(node_ptr) }; + + // Save this for next loop + node = Some(n); + + state = actual; + continue; + } + + return Ok(None); + } + _ => unreachable!(), + } + } + } +} + +unsafe impl Send for Registration {} +unsafe impl Sync for Registration {} + +// ===== impl Inner ===== + +impl Inner { + fn new(io: &T, handle: HandlePriv) -> (Self, io::Result<()>) + where T: Evented, + { + let mut res = Ok(()); + + let token = match handle.inner() { + Some(inner) => match inner.add_source(io) { + Ok(token) => token, + Err(e) => { + res = Err(e); + ERROR + } + }, + None => { + res = Err(io::Error::new(io::ErrorKind::Other, "event loop gone")); + ERROR + } + }; + + let inner = Inner { + handle, + token, + }; + + (inner, res) + } + + fn register(&self, direction: Direction, task: Task) { + if self.token == ERROR { + task.notify(); + return; + } + + let inner = match self.handle.inner() { + Some(inner) => inner, + None => { + task.notify(); + return; + } + }; + + inner.register(self.token, direction, task); + } + + fn deregister(&self, io: &E) -> io::Result<()> { + if self.token == ERROR { + return Err(io::Error::new(io::ErrorKind::Other, "failed to associate with reactor")); + } + + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), + }; + + inner.deregister_source(io) + } + + fn poll_ready(&self, direction: Direction, notify: bool, task: F) + -> io::Result> + where F: FnOnce() -> Task + { + if self.token == ERROR { + return Err(io::Error::new(io::ErrorKind::Other, "failed to associate with reactor")); + } + + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), + }; + + let mask = direction.mask(); + let mask_no_hup = (mask - ::platform::hup()).as_usize(); + + let io_dispatch = inner.io_dispatch.read().unwrap(); + let sched = &io_dispatch[self.token]; + + // This consumes the current readiness state **except** for HUP. HUP is + // excluded because a) it is a final state and never transitions out of + // HUP and b) both the read AND the write directions need to be able to + // observe this state. + // + // If HUP were to be cleared when `direction` is `Read`, then when + // `poll_ready` is called again with a _`direction` of `Write`, the HUP + // state would not be visible. + let mut ready = mask & mio::Ready::from_usize( + sched.readiness.fetch_and(!mask_no_hup, SeqCst)); + + if ready.is_empty() && notify { + let task = task(); + // Update the task info + match direction { + Direction::Read => sched.reader.register_task(task), + Direction::Write => sched.writer.register_task(task), + } + + // Try again + ready = mask & mio::Ready::from_usize( + sched.readiness.fetch_and(!mask_no_hup, SeqCst)); + } + + if ready.is_empty() { + Ok(None) + } else { + Ok(Some(ready)) + } + } +} + +impl Drop for Inner { + fn drop(&mut self) { + if self.token == ERROR { + return; + } + + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return, + }; + + inner.drop_source(self.token); + } +} diff --git a/third_party/rust/tokio-tcp/.cargo-checksum.json b/third_party/rust/tokio-tcp/.cargo-checksum.json new file mode 100644 index 000000000000..55146fc94689 --- /dev/null +++ b/third_party/rust/tokio-tcp/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"3c998bcf9a586c04572939b0b2a13de8c9b209ea1e1f39985a68f93c15eb8ca6","Cargo.toml":"873a8b587ce1997708b10e2a04d3f4ad1547643bbaa03daa7b5dc9a44bcf6693","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"2a2758b6ffb7c22a360e759c577ebb057191247dc13aa24acf2bb9c8094c78f4","src/incoming.rs":"aab273caab1ec1db0228e52cb6c69525912b2db8f43ff545ee91658c27fec1a2","src/lib.rs":"874211f0a17e7efc285e0a1108a377a3f9761abb9fccf473f80823dabf6474b3","src/listener.rs":"98ca8d5ef5836273fe29af73e90b884c7755d830c32258086950fcca96bdb28b","src/stream.rs":"af393e6fb88cb94567d59999f563b76d6665883c41d1808c52a21795b00cd3c1","tests/chain.rs":"fd1a8dc4e8d838bb201dcbae99bddf6093d5cedb0c21bf05e574b19924c051b1","tests/echo.rs":"dbe85496609f488257bf21f36253eaff3aaea08432fdfdc4f246edc8ca1586df","tests/limit.rs":"df5f6fbcceae7df613e83f541b8debed5f06ee42900c5462f4322bd0564ae118","tests/stream-buffered.rs":"1996ce7bc2664da310ea1417d0baef7bff5a20dc51dc2efead662b82e71f9d6b","tests/tcp.rs":"32217ca59e86c315af29ddb9e9746de4ba1ac32d32e0bc93ca2f877561bf1331"},"package":"5b4c329b47f071eb8a746040465fa751bd95e4716e98daef6a9b4e434c17d565"} \ No newline at end of file diff --git a/third_party/rust/tokio-tcp/CHANGELOG.md b/third_party/rust/tokio-tcp/CHANGELOG.md new file mode 100644 index 000000000000..93605645e9fc --- /dev/null +++ b/third_party/rust/tokio-tcp/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.1.1 (August 6, 2018) + +* Add `TcpStream::try_clone` (#448) + +# 0.1.0 (March 23, 2018) + +* Initial release diff --git a/third_party/rust/tokio-tcp/Cargo.toml b/third_party/rust/tokio-tcp/Cargo.toml new file mode 100644 index 000000000000..ee398dee34a4 --- /dev/null +++ b/third_party/rust/tokio-tcp/Cargo.toml @@ -0,0 +1,42 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-tcp" +version = "0.1.1" +authors = ["Carl Lerche "] +description = "TCP bindings for tokio.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-tcp/0.1" +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.bytes] +version = "0.4" + +[dependencies.futures] +version = "0.1.19" + +[dependencies.iovec] +version = "0.1" + +[dependencies.mio] +version = "0.6.14" + +[dependencies.tokio-io] +version = "0.1.6" + +[dependencies.tokio-reactor] +version = "0.1.1" +[dev-dependencies.env_logger] +version = "0.4" +default-features = false diff --git a/third_party/rust/tokio-tcp/LICENSE b/third_party/rust/tokio-tcp/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-tcp/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-tcp/README.md b/third_party/rust/tokio-tcp/README.md new file mode 100644 index 000000000000..9cfc177b801a --- /dev/null +++ b/third_party/rust/tokio-tcp/README.md @@ -0,0 +1,15 @@ +# tokio-tcp + +TCP bindings for `tokio`. + +[Documentation](https://tokio-rs.github.io/tokio/tokio_tcp/) + +## License + +This project is licensed under the [MIT license](./LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-tcp/src/incoming.rs b/third_party/rust/tokio-tcp/src/incoming.rs new file mode 100644 index 000000000000..6726224b8201 --- /dev/null +++ b/third_party/rust/tokio-tcp/src/incoming.rs @@ -0,0 +1,45 @@ +use super::TcpListener; +use super::TcpStream; + +use std::io; +use futures::stream::Stream; +use futures::{Poll, Async}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Stream returned by the `TcpListener::incoming` function representing the +/// stream of sockets received from a listener. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct Incoming { + inner: TcpListener, +} + +impl Incoming { + pub(crate) fn new(listener: TcpListener) -> Incoming { + Incoming { inner: listener } + } +} + +impl Stream for Incoming { + type Item = TcpStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll, io::Error> { + let (socket, _) = try_ready!(self.inner.poll_accept()); + Ok(Async::Ready(Some(socket))) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::Stream for Incoming { + type Item = TcpStream; + type Error = io::Error; + + fn poll_next(&mut self, cx: &mut futures2::task::Context) + -> futures2::Poll, io::Error> + { + Ok(self.inner.poll_accept2(cx)?.map(|(sock, _)| Some(sock))) + } +} diff --git a/third_party/rust/tokio-tcp/src/lib.rs b/third_party/rust/tokio-tcp/src/lib.rs new file mode 100644 index 000000000000..c7713ee21377 --- /dev/null +++ b/third_party/rust/tokio-tcp/src/lib.rs @@ -0,0 +1,59 @@ +//! TCP bindings for `tokio`. +//! +//! This module contains the TCP networking types, similar to the standard +//! library, which can be used to implement networking protocols. +//! +//! Connecting to an address, via TCP, can be done using [`TcpStream`]'s +//! [`connect`] method, which returns [`ConnectFuture`]. `ConnectFuture` +//! implements a future which returns a `TcpStream`. +//! +//! To listen on an address [`TcpListener`] can be used. `TcpListener`'s +//! [`incoming`][incoming_method] method can be used to accept new connections. +//! It return the [`Incoming`] struct, which implements a stream which returns +//! `TcpStream`s. +//! +//! [`TcpStream`]: struct.TcpStream.html +//! [`connect`]: struct.TcpStream.html#method.connect +//! [`ConnectFuture`]: struct.ConnectFuture.html +//! [`TcpListener`]: struct.TcpListener.html +//! [incoming_method]: struct.TcpListener.html#method.incoming +//! [`Incoming`]: struct.Incoming.html + +#![doc(html_root_url = "https://docs.rs/tokio-tcp/0.1.1")] +#![deny(missing_docs, warnings, missing_debug_implementations)] + +extern crate bytes; +#[macro_use] +extern crate futures; +extern crate iovec; +extern crate mio; +extern crate tokio_io; +extern crate tokio_reactor; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +mod incoming; +mod listener; +mod stream; + +pub use self::incoming::Incoming; +pub use self::listener::TcpListener; +pub use self::stream::TcpStream; +pub use self::stream::ConnectFuture; + +#[cfg(feature = "unstable-futures")] +fn lift_async(old: futures::Async) -> futures2::Async { + match old { + futures::Async::Ready(x) => futures2::Async::Ready(x), + futures::Async::NotReady => futures2::Async::Pending, + } +} + +#[cfg(feature = "unstable-futures")] +fn lower_async(new: futures2::Async) -> futures::Async { + match new { + futures2::Async::Ready(x) => futures::Async::Ready(x), + futures2::Async::Pending => futures::Async::NotReady, + } +} diff --git a/third_party/rust/tokio-tcp/src/listener.rs b/third_party/rust/tokio-tcp/src/listener.rs new file mode 100644 index 000000000000..1eff355676cb --- /dev/null +++ b/third_party/rust/tokio-tcp/src/listener.rs @@ -0,0 +1,261 @@ +use super::Incoming; +use super::TcpStream; + +use std::fmt; +use std::io; +use std::net::{self, SocketAddr}; + +use futures::{Poll, Async}; +use mio; +use tokio_reactor::{Handle, PollEvented}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// An I/O object representing a TCP socket listening for incoming connections. +/// +/// This object can be converted into a stream of incoming connections for +/// various forms of processing. +pub struct TcpListener { + io: PollEvented, +} + +impl TcpListener { + /// Create a new TCP listener associated with this event loop. + /// + /// The TCP listener will bind to the provided `addr` address, if available. + /// If the result is `Ok`, the socket has successfully bound. + pub fn bind(addr: &SocketAddr) -> io::Result { + let l = mio::net::TcpListener::bind(addr)?; + Ok(TcpListener::new(l)) + } + + #[deprecated(since = "0.1.2", note = "use poll_accept instead")] + #[doc(hidden)] + pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { + match self.poll_accept()? { + Async::Ready(ret) => Ok(ret), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Attempt to accept a connection and create a new connected `TcpStream` if + /// successful. + /// + /// Note that typically for simple usage it's easier to treat incoming + /// connections as a `Stream` of `TcpStream`s with the `incoming` method + /// below. + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready((socket, addr)))`. + /// + /// If the listener is not ready to accept, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the listener becomes ready to accept. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_accept(&mut self) -> Poll<(TcpStream, SocketAddr), io::Error> { + let (io, addr) = try_ready!(self.poll_accept_std()); + + let io = mio::net::TcpStream::from_stream(io)?; + let io = TcpStream::new(io); + + Ok((io, addr).into()) + } + + /// Like `poll_accept`, but for futures 0.2 + #[cfg(feature = "unstable-futures")] + pub fn poll_accept2(&mut self, cx: &mut futures2::task::Context) + -> futures2::Poll<(TcpStream, SocketAddr), io::Error> + { + let (io, addr) = match self.poll_accept_std2(cx)? { + futures2::Async::Ready(x) => x, + futures2::Async::Pending => return Ok(futures2::Async::Pending), + }; + + let io = mio::net::TcpStream::from_stream(io)?; + let io = TcpStream::new(io); + + Ok((io, addr).into()) + } + + #[deprecated(since = "0.1.2", note = "use poll_accept_std instead")] + #[doc(hidden)] + pub fn accept_std(&mut self) -> io::Result<(net::TcpStream, SocketAddr)> { + match self.poll_accept_std()? { + Async::Ready(ret) => Ok(ret), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Attempt to accept a connection and create a new connected `TcpStream` if + /// successful. + /// + /// This function is the same as `accept` above except that it returns a + /// `std::net::TcpStream` instead of a `tokio::net::TcpStream`. This in turn + /// can then allow for the TCP stream to be associated with a different + /// reactor than the one this `TcpListener` is associated with. + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready((socket, addr)))`. + /// + /// If the listener is not ready to accept, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the listener becomes ready to accept. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_accept_std(&mut self) -> Poll<(net::TcpStream, SocketAddr), io::Error> { + try_ready!(self.io.poll_read_ready(mio::Ready::readable())); + + match self.io.get_ref().accept_std() { + Ok(pair) => Ok(pair.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(mio::Ready::readable())?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + /// Like `poll_accept_std`, but for futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn poll_accept_std2(&mut self, cx: &mut futures2::task::Context) + -> futures2::Poll<(net::TcpStream, SocketAddr), io::Error> + { + if let futures2::Async::Pending = self.io.poll_read_ready2(cx, mio::Ready::readable())? { + return Ok(futures2::Async::Pending); + } + + match self.io.get_ref().accept_std() { + Ok(pair) => Ok(pair.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready2(cx, mio::Ready::readable())?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + /// Create a new TCP listener from the standard library's TCP listener. + /// + /// This method can be used when the `Handle::tcp_listen` method isn't + /// sufficient because perhaps some more configuration is needed in terms of + /// before the calls to `bind` and `listen`. + /// + /// This API is typically paired with the `net2` crate and the `TcpBuilder` + /// type to build up and customize a listener before it's shipped off to the + /// backing event loop. This allows configuration of options like + /// `SO_REUSEPORT`, binding to multiple addresses, etc. + /// + /// The `addr` argument here is one of the addresses that `listener` is + /// bound to and the listener will only be guaranteed to accept connections + /// of the same address type currently. + /// + /// Finally, the `handle` argument is the event loop that this listener will + /// be bound to. + /// Use `Handle::default()` to lazily bind to an event loop, just like `bind` does. + /// + /// The platform specific behavior of this function looks like: + /// + /// * On Unix, the socket is placed into nonblocking mode and connections + /// can be accepted as normal + /// + /// * On Windows, the address is stored internally and all future accepts + /// will only be for the same IP version as `addr` specified. That is, if + /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as + /// well (same for IPv6). + pub fn from_std(listener: net::TcpListener, handle: &Handle) + -> io::Result + { + let io = mio::net::TcpListener::from_std(listener)?; + let io = PollEvented::new_with_handle(io, handle)?; + Ok(TcpListener { io }) + } + + fn new(listener: mio::net::TcpListener) -> TcpListener { + let io = PollEvented::new(listener); + TcpListener { io } + } + + /// Returns the local address that this listener is bound to. + /// + /// This can be useful, for example, when binding to port 0 to figure out + /// which port was actually bound. + pub fn local_addr(&self) -> io::Result { + self.io.get_ref().local_addr() + } + + /// Consumes this listener, returning a stream of the sockets this listener + /// accepts. + /// + /// This method returns an implementation of the `Stream` trait which + /// resolves to the sockets the are accepted on this listener. + /// + /// # Errors + /// + /// Note that accepting a connection can lead to various errors and not all of them are + /// necessarily fatal ‒ for example having too many open file descriptors or the other side + /// closing the connection while it waits in an accept queue. These would terminate the stream + /// if not handled in any way. + /// + /// If aiming for production, decision what to do about them must be made. The + /// [`tk-listen`](https://crates.io/crates/tk-listen) crate might be of some help. + pub fn incoming(self) -> Incoming { + Incoming::new(self) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: #method.set_ttl + pub fn ttl(&self) -> io::Result { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +#[cfg(unix)] +mod sys { + use std::os::unix::prelude::*; + use super::TcpListener; + + impl AsRawFd for TcpListener { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::{TcpListener; + // + // impl AsRawHandle for TcpListener { + // fn as_raw_handle(&self) -> RawHandle { + // self.listener.io().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio-tcp/src/stream.rs b/third_party/rust/tokio-tcp/src/stream.rs new file mode 100644 index 000000000000..8f4eeaeb0138 --- /dev/null +++ b/third_party/rust/tokio-tcp/src/stream.rs @@ -0,0 +1,755 @@ +use std::fmt; +use std::io::{self, Read, Write}; +use std::mem; +use std::net::{self, SocketAddr, Shutdown}; +use std::time::Duration; + +use bytes::{Buf, BufMut}; +use futures::{Future, Poll, Async}; +use iovec::IoVec; +use mio; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_reactor::{Handle, PollEvented}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// An I/O object representing a TCP stream connected to a remote endpoint. +/// +/// A TCP stream can either be created by connecting to an endpoint, via the +/// [`connect`] method, or by [accepting] a connection from a [listener]. +/// +/// [`connect`]: struct.TcpStream.html#method.connect +/// [accepting]: struct.TcpListener.html#method.accept +/// [listener]: struct.TcpListener.html +pub struct TcpStream { + io: PollEvented, +} + +/// Future returned by `TcpStream::connect` which will resolve to a `TcpStream` +/// when the stream is connected. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct ConnectFuture { + inner: ConnectFutureState, +} + +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +enum ConnectFutureState { + Waiting(TcpStream), + Error(io::Error), + Empty, +} + +impl TcpStream { + /// Create a new TCP stream connected to the specified address. + /// + /// This function will create a new TCP socket and attempt to connect it to + /// the `addr` provided. The returned future will be resolved once the + /// stream has successfully connected, or it wil return an error if one + /// occurs. + pub fn connect(addr: &SocketAddr) -> ConnectFuture { + use self::ConnectFutureState::*; + + let inner = match mio::net::TcpStream::connect(addr) { + Ok(tcp) => Waiting(TcpStream::new(tcp)), + Err(e) => Error(e), + }; + + ConnectFuture { inner } + } + + pub(crate) fn new(connected: mio::net::TcpStream) -> TcpStream { + let io = PollEvented::new(connected); + TcpStream { io } + } + + /// Create a new `TcpStream` from a `net::TcpStream`. + /// + /// This function will convert a TCP stream created by the standard library + /// to a TCP stream ready to be used with the provided event loop handle. + /// Use `Handle::default()` to lazily bind to an event loop, just like `connect` does. + pub fn from_std(stream: net::TcpStream, handle: &Handle) + -> io::Result + { + let io = mio::net::TcpStream::from_stream(stream)?; + let io = PollEvented::new_with_handle(io, handle)?; + + Ok(TcpStream { io }) + } + + /// Creates a new `TcpStream` from the pending socket inside the given + /// `std::net::TcpStream`, connecting it to the address specified. + /// + /// This constructor allows configuring the socket before it's actually + /// connected, and this function will transfer ownership to the returned + /// `TcpStream` if successful. An unconnected `TcpStream` can be created + /// with the `net2::TcpBuilder` type (and also configured via that route). + /// + /// The platform specific behavior of this function looks like: + /// + /// * On Unix, the socket is placed into nonblocking mode and then a + /// `connect` call is issued. + /// + /// * On Windows, the address is stored internally and the connect operation + /// is issued when the returned `TcpStream` is registered with an event + /// loop. Note that on Windows you must `bind` a socket before it can be + /// connected, so if a custom `TcpBuilder` is used it should be bound + /// (perhaps to `INADDR_ANY`) before this method is called. + pub fn connect_std(stream: net::TcpStream, + addr: &SocketAddr, + handle: &Handle) + -> ConnectFuture + { + use self::ConnectFutureState::*; + + let io = mio::net::TcpStream::connect_stream(stream, addr) + .and_then(|io| PollEvented::new_with_handle(io, handle)); + + let inner = match io { + Ok(io) => Waiting(TcpStream { io }), + Err(e) => Error(e), + }; + + ConnectFuture { inner: inner } + } + + /// Check the TCP stream's read readiness state. + /// + /// The mask argument allows specifying what readiness to notify on. This + /// can be any value, including platform specific readiness, **except** + /// `writable`. HUP is always implicitly included on platforms that support + /// it. + /// + /// If the resource is not ready for a read then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The stream will remain in a read-ready state until calls to `poll_read` + /// return `NotReady`. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable. + /// * called from outside of a task context. + pub fn poll_read_ready(&self, mask: mio::Ready) -> Poll { + self.io.poll_read_ready(mask) + } + + /// Like `poll_read_ready`, but compatible with futures 0.2 + #[cfg(feature = "unstable-futures")] + pub fn poll_read_ready2(&self, cx: &mut futures2::task::Context, mask: mio::Ready) + -> futures2::Poll + { + self.io.poll_read_ready2(cx, mask) + } + + /// Check the TCP stream's write readiness state. + /// + /// This always checks for writable readiness and also checks for HUP + /// readiness on platforms that support it. + /// + /// If the resource is not ready for a write then `Async::NotReady` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a write-ready state until calls to + /// `poll_write` return `NotReady`. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` contains bits besides `writable` and `hup`. + /// * called from outside of a task context. + pub fn poll_write_ready(&self) -> Poll { + self.io.poll_write_ready() + } + + /// Like `poll_write_ready`, but compatible with futures 0.2. + #[cfg(feature = "unstable-futures")] + pub fn poll_write_ready2(&self, cx: &mut futures2::task::Context) + -> futures2::Poll + { + self.io.poll_write_ready2(cx) + } + + /// Returns the local address that this stream is bound to. + pub fn local_addr(&self) -> io::Result { + self.io.get_ref().local_addr() + } + + /// Returns the remote address that this stream is connected to. + pub fn peer_addr(&self) -> io::Result { + self.io.get_ref().peer_addr() + } + + #[deprecated(since = "0.1.2", note = "use poll_peek instead")] + #[doc(hidden)] + pub fn peek(&mut self, buf: &mut [u8]) -> io::Result { + match self.poll_peek(buf)? { + Async::Ready(n) => Ok(n), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying recv system call. + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the socket becomes readable or is closed. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_peek(&mut self, buf: &mut [u8]) -> Poll { + try_ready!(self.io.poll_read_ready(mio::Ready::readable())); + + match self.io.get_ref().peek(buf) { + Ok(ret) => Ok(ret.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(mio::Ready::readable())?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + /// Like `poll_peek` but compatible with futures 0.2 + #[cfg(feature = "unstable-futures")] + pub fn poll_peek2(&mut self, cx: &mut futures2::task::Context, buf: &mut [u8]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.io.poll_read_ready2(cx, mio::Ready::readable())? { + return Ok(futures2::Async::Pending); + } + + match self.io.get_ref().peek(buf) { + Ok(ret) => Ok(ret.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready2(cx, mio::Ready::readable())?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value (see the + /// documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.io.get_ref().shutdown(how) + } + + /// Gets the value of the `TCP_NODELAY` option on this socket. + /// + /// For more information about this option, see [`set_nodelay`]. + /// + /// [`set_nodelay`]: #method.set_nodelay + pub fn nodelay(&self) -> io::Result { + self.io.get_ref().nodelay() + } + + /// Sets the value of the `TCP_NODELAY` option on this socket. + /// + /// If set, this option disables the Nagle algorithm. This means that + /// segments are always sent as soon as possible, even if there is only a + /// small amount of data. When not set, data is buffered until there is a + /// sufficient amount to send out, thereby avoiding the frequent sending of + /// small packets. + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + self.io.get_ref().set_nodelay(nodelay) + } + + /// Gets the value of the `SO_RCVBUF` option on this socket. + /// + /// For more information about this option, see [`set_recv_buffer_size`]. + /// + /// [`set_recv_buffer_size`]: #tymethod.set_recv_buffer_size + pub fn recv_buffer_size(&self) -> io::Result { + self.io.get_ref().recv_buffer_size() + } + + /// Sets the value of the `SO_RCVBUF` option on this socket. + /// + /// Changes the size of the operating system's receive buffer associated + /// with the socket. + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_recv_buffer_size(size) + } + + /// Gets the value of the `SO_SNDBUF` option on this socket. + /// + /// For more information about this option, see [`set_send_buffer`]. + /// + /// [`set_send_buffer`]: #tymethod.set_send_buffer + pub fn send_buffer_size(&self) -> io::Result { + self.io.get_ref().send_buffer_size() + } + + /// Sets the value of the `SO_SNDBUF` option on this socket. + /// + /// Changes the size of the operating system's send buffer associated with + /// the socket. + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_send_buffer_size(size) + } + + /// Returns whether keepalive messages are enabled on this socket, and if so + /// the duration of time between them. + /// + /// For more information about this option, see [`set_keepalive`]. + /// + /// [`set_keepalive`]: #tymethod.set_keepalive + pub fn keepalive(&self) -> io::Result> { + self.io.get_ref().keepalive() + } + + /// Sets whether keepalive messages are enabled to be sent on this socket. + /// + /// On Unix, this option will set the `SO_KEEPALIVE` as well as the + /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). + /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. + /// + /// If `None` is specified then keepalive messages are disabled, otherwise + /// the duration specified will be the time to remain idle before sending a + /// TCP keepalive probe. + /// + /// Some platforms specify this value in seconds, so sub-second + /// specifications may be omitted. + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + self.io.get_ref().set_keepalive(keepalive) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: #tymethod.set_ttl + pub fn ttl(&self) -> io::Result { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } + + /// Reads the linger duration for this socket by getting the `SO_LINGER` + /// option. + /// + /// For more information about this option, see [`set_linger`]. + /// + /// [`set_linger`]: #tymethod.set_linger + pub fn linger(&self) -> io::Result> { + self.io.get_ref().linger() + } + + /// Sets the linger duration of this socket by setting the `SO_LINGER` + /// option. + /// + /// This option controls the action taken when a stream has unsent messages + /// and the stream is closed. If `SO_LINGER` is set, the system + /// shall block the process until it can transmit the data or until the + /// time expires. + /// + /// If `SO_LINGER` is not specified, and the stream is closed, the system + /// handles the call in a way that allows the process to continue as quickly + /// as possible. + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.io.get_ref().set_linger(dur) + } + + /// Creates a new independently owned handle to the underlying socket. + /// + /// The returned `TcpStream` is a reference to the same stream that this + /// object references. Both handles will read and write the same stream of + /// data, and options set on one stream will be propagated to the other + /// stream. + pub fn try_clone(&self) -> io::Result { + let io = self.io.get_ref().try_clone()?; + Ok(TcpStream::new(io)) + } +} + +// ===== impl Read / Write ===== + +impl Read for TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.io.read(buf) + } +} + +impl Write for TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.io.write(buf) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl AsyncRead for TcpStream { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { + false + } + + fn read_buf(&mut self, buf: &mut B) -> Poll { + <&TcpStream>::read_buf(&mut &*self, buf) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::io::AsyncRead for TcpStream { + fn poll_read(&mut self, cx: &mut futures2::task::Context, buf: &mut [u8]) + -> futures2::Poll + { + futures2::io::AsyncRead::poll_read(&mut self.io, cx, buf) + } + + fn poll_vectored_read(&mut self, cx: &mut futures2::task::Context, vec: &mut [&mut IoVec]) + -> futures2::Poll + { + futures2::io::AsyncRead::poll_vectored_read(&mut &*self, cx, vec) + } + + unsafe fn initializer(&self) -> futures2::io::Initializer { + futures2::io::Initializer::nop() + } +} + +impl AsyncWrite for TcpStream { + fn shutdown(&mut self) -> Poll<(), io::Error> { + <&TcpStream>::shutdown(&mut &*self) + } + + fn write_buf(&mut self, buf: &mut B) -> Poll { + <&TcpStream>::write_buf(&mut &*self, buf) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::io::AsyncWrite for TcpStream { + fn poll_write(&mut self, cx: &mut futures2::task::Context, buf: &[u8]) + -> futures2::Poll + { + futures2::io::AsyncWrite::poll_write(&mut self.io, cx, buf) + } + + fn poll_vectored_write(&mut self, cx: &mut futures2::task::Context, vec: &[&IoVec]) + -> futures2::Poll + { + futures2::io::AsyncWrite::poll_vectored_write(&mut &*self, cx, vec) + } + + fn poll_flush(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_flush(&mut self.io, cx) + } + + fn poll_close(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_close(&mut self.io, cx) + } +} + +// ===== impl Read / Write for &'a ===== + +impl<'a> Read for &'a TcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + (&self.io).read(buf) + } +} + +impl<'a> Write for &'a TcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + (&self.io).write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + (&self.io).flush() + } +} + +impl<'a> AsyncRead for &'a TcpStream { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { + false + } + + fn read_buf(&mut self, buf: &mut B) -> Poll { + if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { + return Ok(Async::NotReady) + } + + let r = unsafe { + // The `IoVec` type can't have a 0-length size, so we create a bunch + // of dummy versions on the stack with 1 length which we'll quickly + // overwrite. + let b1: &mut [u8] = &mut [0]; + let b2: &mut [u8] = &mut [0]; + let b3: &mut [u8] = &mut [0]; + let b4: &mut [u8] = &mut [0]; + let b5: &mut [u8] = &mut [0]; + let b6: &mut [u8] = &mut [0]; + let b7: &mut [u8] = &mut [0]; + let b8: &mut [u8] = &mut [0]; + let b9: &mut [u8] = &mut [0]; + let b10: &mut [u8] = &mut [0]; + let b11: &mut [u8] = &mut [0]; + let b12: &mut [u8] = &mut [0]; + let b13: &mut [u8] = &mut [0]; + let b14: &mut [u8] = &mut [0]; + let b15: &mut [u8] = &mut [0]; + let b16: &mut [u8] = &mut [0]; + let mut bufs: [&mut IoVec; 16] = [ + b1.into(), b2.into(), b3.into(), b4.into(), + b5.into(), b6.into(), b7.into(), b8.into(), + b9.into(), b10.into(), b11.into(), b12.into(), + b13.into(), b14.into(), b15.into(), b16.into(), + ]; + let n = buf.bytes_vec_mut(&mut bufs); + self.io.get_ref().read_bufs(&mut bufs[..n]) + }; + + match r { + Ok(n) => { + unsafe { buf.advance_mut(n); } + Ok(Async::Ready(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(mio::Ready::readable())?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } +} + +#[cfg(feature = "unstable-futures")] +impl<'a> futures2::io::AsyncRead for &'a TcpStream { + fn poll_read(&mut self, cx: &mut futures2::task::Context, buf: &mut [u8]) + -> futures2::Poll + { + futures2::io::AsyncRead::poll_read(&mut &self.io, cx, buf) + } + + fn poll_vectored_read(&mut self, cx: &mut futures2::task::Context, vec: &mut [&mut IoVec]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.io.poll_read_ready2(cx, mio::Ready::readable())? { + return Ok(futures2::Async::Pending) + } + + let r = self.io.get_ref().read_bufs(vec); + + match r { + Ok(n) => { + Ok(futures2::Async::Ready(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready2(cx, mio::Ready::readable())?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + unsafe fn initializer(&self) -> futures2::io::Initializer { + futures2::io::Initializer::nop() + } +} + +impl<'a> AsyncWrite for &'a TcpStream { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } + + fn write_buf(&mut self, buf: &mut B) -> Poll { + if let Async::NotReady = self.io.poll_write_ready()? { + return Ok(Async::NotReady) + } + + let r = { + // The `IoVec` type can't have a zero-length size, so create a dummy + // version from a 1-length slice which we'll overwrite with the + // `bytes_vec` method. + static DUMMY: &[u8] = &[0]; + let iovec = <&IoVec>::from(DUMMY); + let mut bufs = [iovec; 64]; + let n = buf.bytes_vec(&mut bufs); + self.io.get_ref().write_bufs(&bufs[..n]) + }; + match r { + Ok(n) => { + buf.advance(n); + Ok(Async::Ready(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready()?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } +} + +#[cfg(feature = "unstable-futures")] +impl<'a> futures2::io::AsyncWrite for &'a TcpStream { + fn poll_write(&mut self, cx: &mut futures2::task::Context, buf: &[u8]) + -> futures2::Poll + { + futures2::io::AsyncWrite::poll_write(&mut &self.io, cx, buf) + } + + fn poll_vectored_write(&mut self, cx: &mut futures2::task::Context, vec: &[&IoVec]) + -> futures2::Poll + { + if let futures2::Async::Pending = self.io.poll_write_ready2(cx)? { + return Ok(futures2::Async::Pending) + } + + let r = self.io.get_ref().write_bufs(vec); + + match r { + Ok(n) => { + Ok(futures2::Async::Ready(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready2(cx)?; + Ok(futures2::Async::Pending) + } + Err(e) => Err(e), + } + } + + fn poll_flush(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_flush(&mut &self.io, cx) + } + + fn poll_close(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), io::Error> { + futures2::io::AsyncWrite::poll_close(&mut &self.io, cx) + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +impl Future for ConnectFuture { + type Item = TcpStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + self.inner.poll() + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::Future for ConnectFuture { + type Item = TcpStream; + type Error = io::Error; + + fn poll(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll { + futures2::Future::poll(&mut self.inner, cx) + } +} + +impl ConnectFutureState { + fn poll_inner(&mut self, f: F) -> Poll + where F: FnOnce(&mut PollEvented) -> Poll + { + { + let stream = match *self { + ConnectFutureState::Waiting(ref mut s) => s, + ConnectFutureState::Error(_) => { + let e = match mem::replace(self, ConnectFutureState::Empty) { + ConnectFutureState::Error(e) => e, + _ => panic!(), + }; + return Err(e) + } + ConnectFutureState::Empty => panic!("can't poll TCP stream twice"), + }; + + // Once we've connected, wait for the stream to be writable as + // that's when the actual connection has been initiated. Once we're + // writable we check for `take_socket_error` to see if the connect + // actually hit an error or not. + // + // If all that succeeded then we ship everything on up. + if let Async::NotReady = f(&mut stream.io)? { + return Ok(Async::NotReady) + } + + if let Some(e) = try!(stream.io.get_ref().take_error()) { + return Err(e) + } + } + + match mem::replace(self, ConnectFutureState::Empty) { + ConnectFutureState::Waiting(stream) => Ok(Async::Ready(stream)), + _ => panic!(), + } + } +} + +impl Future for ConnectFutureState { + type Item = TcpStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + self.poll_inner(|io| io.poll_write_ready()) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::Future for ConnectFutureState { + type Item = TcpStream; + type Error = io::Error; + + fn poll(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll { + self.poll_inner(|io| io.poll_write_ready2(cx).map(::lower_async)) + .map(::lift_async) + } +} + +#[cfg(unix)] +mod sys { + use std::os::unix::prelude::*; + use super::TcpStream; + + impl AsRawFd for TcpStream { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::TcpStream; + // + // impl AsRawHandle for TcpStream { + // fn as_raw_handle(&self) -> RawHandle { + // self.io.get_ref().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio-tcp/tests/chain.rs b/third_party/rust/tokio-tcp/tests/chain.rs new file mode 100644 index 000000000000..c4e37f10309b --- /dev/null +++ b/third_party/rust/tokio-tcp/tests/chain.rs @@ -0,0 +1,49 @@ +extern crate futures; +extern crate tokio_tcp; +extern crate tokio_io; + +use std::net::TcpStream; +use std::thread; +use std::io::{Write, Read}; + +use futures::Future; +use futures::stream::Stream; +use tokio_io::io::read_to_end; +use tokio_tcp::TcpListener; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn chain_clients() { + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let t = thread::spawn(move || { + let mut s1 = TcpStream::connect(&addr).unwrap(); + s1.write_all(b"foo ").unwrap(); + let mut s2 = TcpStream::connect(&addr).unwrap(); + s2.write_all(b"bar ").unwrap(); + let mut s3 = TcpStream::connect(&addr).unwrap(); + s3.write_all(b"baz").unwrap(); + }); + + let clients = srv.incoming().take(3); + let copied = clients.collect().and_then(|clients| { + let mut clients = clients.into_iter(); + let a = clients.next().unwrap(); + let b = clients.next().unwrap(); + let c = clients.next().unwrap(); + + read_to_end(a.chain(b).chain(c), Vec::new()) + }); + + let (_, data) = t!(copied.wait()); + t.join().unwrap(); + + assert_eq!(data, b"foo bar baz"); +} diff --git a/third_party/rust/tokio-tcp/tests/echo.rs b/third_party/rust/tokio-tcp/tests/echo.rs new file mode 100644 index 000000000000..3c020b193eba --- /dev/null +++ b/third_party/rust/tokio-tcp/tests/echo.rs @@ -0,0 +1,51 @@ +extern crate env_logger; +extern crate futures; +extern crate tokio_tcp; +extern crate tokio_io; + +use std::io::{Read, Write}; +use std::net::TcpStream; +use std::thread; + +use futures::Future; +use futures::stream::Stream; +use tokio_tcp::TcpListener; +use tokio_io::AsyncRead; +use tokio_io::io::copy; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn echo_server() { + drop(env_logger::init()); + + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let msg = "foo bar baz"; + let t = thread::spawn(move || { + let mut s = TcpStream::connect(&addr).unwrap(); + + for _i in 0..1024 { + assert_eq!(t!(s.write(msg.as_bytes())), msg.len()); + let mut buf = [0; 1024]; + assert_eq!(t!(s.read(&mut buf)), msg.len()); + assert_eq!(&buf[..msg.len()], msg.as_bytes()); + } + }); + + let clients = srv.incoming(); + let client = clients.into_future().map(|e| e.0.unwrap()).map_err(|e| e.0); + let halves = client.map(|s| s.split()); + let copied = halves.and_then(|(a, b)| copy(a, b)); + + let (amt, _, _) = t!(copied.wait()); + t.join().unwrap(); + + assert_eq!(amt, msg.len() as u64 * 1024); +} diff --git a/third_party/rust/tokio-tcp/tests/limit.rs b/third_party/rust/tokio-tcp/tests/limit.rs new file mode 100644 index 000000000000..8714da9a51f4 --- /dev/null +++ b/third_party/rust/tokio-tcp/tests/limit.rs @@ -0,0 +1,43 @@ +extern crate futures; +extern crate tokio_tcp; +extern crate tokio_io; + +use std::net::TcpStream; +use std::thread; +use std::io::{Write, Read}; + +use futures::Future; +use futures::stream::Stream; +use tokio_io::io::read_to_end; +use tokio_tcp::TcpListener; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn limit() { + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let t = thread::spawn(move || { + let mut s1 = TcpStream::connect(&addr).unwrap(); + s1.write_all(b"foo bar baz").unwrap(); + }); + + let clients = srv.incoming().take(1); + let copied = clients.collect().and_then(|clients| { + let mut clients = clients.into_iter(); + let a = clients.next().unwrap(); + + read_to_end(a.take(4), Vec::new()) + }); + + let (_, data) = t!(copied.wait()); + t.join().unwrap(); + + assert_eq!(data, b"foo "); +} diff --git a/third_party/rust/tokio-tcp/tests/stream-buffered.rs b/third_party/rust/tokio-tcp/tests/stream-buffered.rs new file mode 100644 index 000000000000..a6d71298dc6f --- /dev/null +++ b/third_party/rust/tokio-tcp/tests/stream-buffered.rs @@ -0,0 +1,54 @@ +extern crate env_logger; +extern crate futures; +extern crate tokio_tcp; +extern crate tokio_io; + +use std::io::{Read, Write}; +use std::net::TcpStream; +use std::thread; + +use futures::Future; +use futures::stream::Stream; +use tokio_io::io::copy; +use tokio_io::AsyncRead; +use tokio_tcp::TcpListener; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn echo_server() { + drop(env_logger::init()); + + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let t = thread::spawn(move || { + let mut s1 = t!(TcpStream::connect(&addr)); + let mut s2 = t!(TcpStream::connect(&addr)); + + let msg = b"foo"; + assert_eq!(t!(s1.write(msg)), msg.len()); + assert_eq!(t!(s2.write(msg)), msg.len()); + let mut buf = [0; 1024]; + assert_eq!(t!(s1.read(&mut buf)), msg.len()); + assert_eq!(&buf[..msg.len()], msg); + assert_eq!(t!(s2.read(&mut buf)), msg.len()); + assert_eq!(&buf[..msg.len()], msg); + }); + + let future = srv.incoming() + .map(|s| s.split()) + .map(|(a, b)| copy(a, b).map(|_| ())) + .buffered(10) + .take(2) + .collect(); + + t!(future.wait()); + + t.join().unwrap(); +} diff --git a/third_party/rust/tokio-tcp/tests/tcp.rs b/third_party/rust/tokio-tcp/tests/tcp.rs new file mode 100644 index 000000000000..c905711b2052 --- /dev/null +++ b/third_party/rust/tokio-tcp/tests/tcp.rs @@ -0,0 +1,132 @@ +extern crate env_logger; +extern crate tokio_io; +extern crate tokio_tcp; +extern crate mio; +extern crate futures; + +use std::{net, thread}; +use std::sync::mpsc::channel; + +use futures::{Future, Stream}; +use tokio_tcp::{TcpListener, TcpStream}; + + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn connect() { + drop(env_logger::init()); + let srv = t!(net::TcpListener::bind("127.0.0.1:0")); + let addr = t!(srv.local_addr()); + let t = thread::spawn(move || { + t!(srv.accept()).0 + }); + + let stream = TcpStream::connect(&addr); + let mine = t!(stream.wait()); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} + +#[test] +fn accept() { + drop(env_logger::init()); + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let (tx, rx) = channel(); + let client = srv.incoming().map(move |t| { + tx.send(()).unwrap(); + t + }).into_future().map_err(|e| e.0); + assert!(rx.try_recv().is_err()); + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap() + }); + + let (mine, _remaining) = t!(client.wait()); + let mine = mine.unwrap(); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} + +#[test] +fn accept2() { + drop(env_logger::init()); + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap() + }); + + let (tx, rx) = channel(); + let client = srv.incoming().map(move |t| { + tx.send(()).unwrap(); + t + }).into_future().map_err(|e| e.0); + assert!(rx.try_recv().is_err()); + + let (mine, _remaining) = t!(client.wait()); + mine.unwrap(); + t.join().unwrap(); +} + +#[cfg(unix)] +mod unix { + use tokio_tcp::TcpStream; + + use env_logger; + use futures::{Future, future}; + use mio::unix::UnixReady; + use tokio_io::AsyncRead; + + use std::io::Write; + use std::{net, thread}; + use std::time::Duration; + + #[test] + fn poll_hup() { + drop(env_logger::init()); + + let srv = t!(net::TcpListener::bind("127.0.0.1:0")); + let addr = t!(srv.local_addr()); + let t = thread::spawn(move || { + let mut client = t!(srv.accept()).0; + client.write(b"hello world").unwrap(); + thread::sleep(Duration::from_millis(200)); + }); + + let mut stream = t!(TcpStream::connect(&addr).wait()); + + // Poll for HUP before reading. + future::poll_fn(|| { + stream.poll_read_ready(UnixReady::hup().into()) + }).wait().unwrap(); + + // Same for write half + future::poll_fn(|| { + stream.poll_write_ready() + }).wait().unwrap(); + + let mut buf = vec![0; 11]; + + // Read the data + future::poll_fn(|| { + stream.poll_read(&mut buf) + }).wait().unwrap(); + + assert_eq!(b"hello world", &buf[..]); + + t.join().unwrap(); + } +} diff --git a/third_party/rust/tokio-threadpool/.cargo-checksum.json b/third_party/rust/tokio-threadpool/.cargo-checksum.json new file mode 100644 index 000000000000..d9447866cb7f --- /dev/null +++ b/third_party/rust/tokio-threadpool/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"c570376201990c5ca64a54055f2e2749f1a649cb1683a3fd62f362f06d9273b6","Cargo.toml":"7f4a229fbf18797142a6784f602c0ce6112be9014f580c2130e99d36a215ece3","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"ae825ffe27ec4a58a1012c1d9c740ce1d0840cef3c89d408be05575d80378ee3","benches/basic.rs":"19f096cce2a59c0c427a4ef73987c77c9ab895cb1418e70653a95204c949f30d","benches/blocking.rs":"7c0247da9d3d07055ef2e45dd49b263d8cf7c0d612de2ba14de3bad5d00262c3","benches/depth.rs":"442a4af9c27a5fccf0e15b47c2525cb956c0b4e842fa91962c45dafd7c5b0aca","examples/depth.rs":"332973eb3eed6e02476d0048b280b430a38e5a3736dd1a53e6f1b75668d08a73","examples/hello.rs":"585e1dd671616d7a9722f40aca7a795052e50671511d9323f3dc2c4404af567d","examples/smoke.rs":"4ec9a19f955edba790e424462f428e61a688f3bca325061e4c773e6c6b9436d4","src/blocking.rs":"5bb414b4d153418b49d6e464b417719a6b24ee86e2188dbc7003cf2336a175ae","src/builder.rs":"8157f2538cde83978d55c3712730d7b4231bd4c0ca51acc7020897a629d1ac3c","src/callback.rs":"a050b5642d16768cdb9fc8d04b19a2d903ee214da01c7c6d0c1d6d57129b5b4a","src/config.rs":"b81f10ee65325a047fcae71bc588648bdf51bb49cc7ee0be9b46e2a986978f41","src/futures2_wake.rs":"f9028fe2da5ccfef7b2a58e8295a2557bc3a1ffbf1558b93e3617c814fb2914a","src/lib.rs":"4e4a85749bb598e3e82d24733de30068922d61e2192ba936f0697b25f069da65","src/notifier.rs":"4454fca243bde792e179ccf7a26a2889cc38ce57f85a57e15cb8edb8ad76770a","src/park/boxed.rs":"44669ceb698bf82e9088c336350fd63a8a52b1fb9a8a638d87d2da14eceb0bcc","src/park/default_park.rs":"5ff827b802fd1f7199bec9da4b92f0b2cbd8b548ad8b3b95bba4f2874fbc8877","src/park/mod.rs":"63b391ff690d98088bf586278e0b9c735cb551472c0fc85bc0238433bd0aad76","src/pool/backup.rs":"e390436dcc123981b1f04bba0263369e393ce8481eae6c0b06acfbf3e561127d","src/pool/backup_stack.rs":"cbe092312a17e53aeb37ea6f0119d18d2b6d19a808e6da6588ecf4b85f15818b","src/pool/mod.rs":"528ee1433e66cc5c9b66ab2633f675cc73476885f801d5a5d84b3733f1bf154a","src/pool/state.rs":"43cb32c33ce0da6bb18e536fd7f475e00b2924d22b5a710e6a889f3b0e9ca65f","src/sender.rs":"1852331c4721a275c7a972a34c3da97895fe493bf3b3504d4cbb81efd146e8b7","src/shutdown.rs":"c7e2e702d58f720ce1b8cbdf921e34449e984a3cf3140827f82f6d0beb3ffdcb","src/shutdown_task.rs":"99f3599f8e7b7f6cf4f0216007876dc639a0b5dbf642498a8fec3c312cf5e810","src/task/blocking.rs":"75a4a2a001476144f1e0564c3893d4aa7a88acd7d541038073dba1bcef776b76","src/task/blocking_state.rs":"4116e314417b53608947374ce243e116596383b2f0c8728a5814f7d73fcc574d","src/task/mod.rs":"b8b5a8872f78f9e0b25aefa8edd57b9828b2991f0edf73869310281a07c461c1","src/task/queue.rs":"2f28e2d4a055ccb159777c55610249bb102cb257521d8649bb776e8891b21631","src/task/state.rs":"1c63f5892a5ecdfed7dc17e58f32cfd2cbd8cb3757bedb8d73ba94ed826cd61a","src/thread_pool.rs":"3a3c90e39ff0fc8817185a41a177c3802dfebbba658a651da06296afc8a7b28a","src/worker/entry.rs":"84faab8c59dec38ea3850d0b0cfc52183dee8b01b49a1b59301bc17ec1036a7d","src/worker/mod.rs":"2c7fdefdc70d2e2686fb456d3bea56b0dd67c7ba690a00fb6ada6c1ca39d5ac4","src/worker/stack.rs":"3915d4bca624c0760f7888f87a2210b130e71c851a9553d9b95ade7eea2df40a","src/worker/state.rs":"009e93325ad7823c3f41d7c79ba73cc27844bdf28df70d39a8eb24502c704f8e","tests/blocking.rs":"d80c278b5d9168651ac77c88ea3781d2ea3c9178b5e864bb461e704e6b7df50e","tests/hammer.rs":"aa239a52668f4f77c17bb87125efb054d5604c9c0df09db9f4e564ed8deda693","tests/threadpool.rs":"ad8012b5433c0a1cdff40a7e9dbc216d87c0b4bea763ea57a106e542fe5694ce"},"package":"24ab84f574027b0e875378f31575cf175360891919e93a3490f07e76e00e4efb"} \ No newline at end of file diff --git a/third_party/rust/tokio-threadpool/CHANGELOG.md b/third_party/rust/tokio-threadpool/CHANGELOG.md new file mode 100644 index 000000000000..57716e6b4d09 --- /dev/null +++ b/third_party/rust/tokio-threadpool/CHANGELOG.md @@ -0,0 +1,25 @@ +# 0.1.5 (July 3, 2018) + +* Fix race condition bug when threads are woken up (#459). +* Improve `BlockingError` message (#451). + +# 0.1.4 (June 6, 2018) + +* Fix bug that can occur with multiple pools in a process (#375). + +# 0.1.3 (May 2, 2018) + +* Add `blocking` annotation (#317). + +# 0.1.2 (March 30, 2018) + +* Add the ability to specify a custom thread parker. + +# 0.1.1 (March 22, 2018) + +* Handle futures that panic on the threadpool. +* Optionally support futures 0.2. + +# 0.1.0 (March 09, 2018) + +* Initial release diff --git a/third_party/rust/tokio-threadpool/Cargo.toml b/third_party/rust/tokio-threadpool/Cargo.toml new file mode 100644 index 000000000000..79f5b635e502 --- /dev/null +++ b/third_party/rust/tokio-threadpool/Cargo.toml @@ -0,0 +1,51 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-threadpool" +version = "0.1.5" +authors = ["Carl Lerche "] +description = "A task scheduler backed by a work-stealing thread pool.\n" +homepage = "https://github.com/tokio-rs/tokio" +documentation = "https://docs.rs/tokio-threadpool" +keywords = ["futures", "tokio"] +categories = ["concurrency", "asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.crossbeam-deque] +version = "0.3" + +[dependencies.futures] +version = "0.1.19" + +[dependencies.log] +version = "0.4" + +[dependencies.num_cpus] +version = "1.2" + +[dependencies.rand] +version = "0.4" + +[dependencies.tokio-executor] +version = "0.1.2" +[dev-dependencies.env_logger] +version = "0.4" + +[dev-dependencies.futures-cpupool] +version = "0.1.7" + +[dev-dependencies.threadpool] +version = "1.7.1" + +[dev-dependencies.tokio-timer] +version = "0.1" diff --git a/third_party/rust/tokio-threadpool/LICENSE b/third_party/rust/tokio-threadpool/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-threadpool/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-threadpool/README.md b/third_party/rust/tokio-threadpool/README.md new file mode 100644 index 000000000000..6ed817c816e0 --- /dev/null +++ b/third_party/rust/tokio-threadpool/README.md @@ -0,0 +1,52 @@ +# Tokio Thread Pool + +A library for scheduling execution of futures concurrently across a pool of +threads. + +### Why not Rayon? + +Rayon is designed to handle parallelizing single computations by breaking them +into smaller chunks. The scheduling for each individual chunk doesn't matter as +long as the root computation completes in a timely fashion. In other words, +Rayon does not provide any guarantees of fairness with regards to how each task +gets scheduled. + +On the other hand, `tokio-threadpool` is a general purpose scheduler and +attempts to schedule each task fairly. This is the ideal behavior when +scheduling a set of unrelated tasks. + +### Why not futures-cpupool? + +It's 10x slower. + +## Examples + +```rust +extern crate tokio_threadpool; +extern crate futures; + +use tokio_threadpool::*; +use futures::*; +use futures::sync::oneshot; + +pub fn main() { + let (tx, _pool) = ThreadPool::new(); + + let res = oneshot::spawn(future::lazy(|| { + println!("Running on the pool"); + Ok::<_, ()>("complete") + }), &tx); + + println!("Result: {:?}", res.wait()); +} +``` + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-threadpool/benches/basic.rs b/third_party/rust/tokio-threadpool/benches/basic.rs new file mode 100644 index 000000000000..e2d43bbd8db1 --- /dev/null +++ b/third_party/rust/tokio-threadpool/benches/basic.rs @@ -0,0 +1,162 @@ +#![feature(test)] +#![deny(warnings)] + +extern crate tokio_threadpool; +extern crate futures; +extern crate futures_cpupool; +extern crate num_cpus; +extern crate test; + +const NUM_SPAWN: usize = 10_000; +const NUM_YIELD: usize = 1_000; +const TASKS_PER_CPU: usize = 50; + +mod threadpool { + use futures::{future, task, Async}; + use tokio_threadpool::*; + use num_cpus; + use test; + use std::sync::{mpsc, Arc}; + use std::sync::atomic::AtomicUsize; + use std::sync::atomic::Ordering::SeqCst; + + #[bench] + fn spawn_many(b: &mut test::Bencher) { + let threadpool = ThreadPool::new(); + + let (tx, rx) = mpsc::sync_channel(10); + let rem = Arc::new(AtomicUsize::new(0)); + + b.iter(move || { + rem.store(super::NUM_SPAWN, SeqCst); + + for _ in 0..super::NUM_SPAWN { + let tx = tx.clone(); + let rem = rem.clone(); + + threadpool.spawn(future::lazy(move || { + if 1 == rem.fetch_sub(1, SeqCst) { + tx.send(()).unwrap(); + } + + Ok(()) + })); + } + + let _ = rx.recv().unwrap(); + }); + } + + #[bench] + fn yield_many(b: &mut test::Bencher) { + let threadpool = ThreadPool::new(); + let tasks = super::TASKS_PER_CPU * num_cpus::get(); + + let (tx, rx) = mpsc::sync_channel(tasks); + + b.iter(move || { + for _ in 0..tasks { + let mut rem = super::NUM_YIELD; + let tx = tx.clone(); + + threadpool.spawn(future::poll_fn(move || { + rem -= 1; + + if rem == 0 { + tx.send(()).unwrap(); + Ok(Async::Ready(())) + } else { + // Notify the current task + task::current().notify(); + + // Not ready + Ok(Async::NotReady) + } + })); + } + + for _ in 0..tasks { + let _ = rx.recv().unwrap(); + } + }); + } +} + +// In this case, CPU pool completes the benchmark faster, but this is due to how +// CpuPool currently behaves, starving other futures. This completes the +// benchmark quickly but results in poor runtime characteristics for a thread +// pool. +// +// See rust-lang-nursery/futures-rs#617 +// +mod cpupool { + use futures::{task, Async}; + use futures::future::{self, Executor}; + use futures_cpupool::*; + use num_cpus; + use test; + use std::sync::{mpsc, Arc}; + use std::sync::atomic::AtomicUsize; + use std::sync::atomic::Ordering::SeqCst; + + #[bench] + fn spawn_many(b: &mut test::Bencher) { + let pool = CpuPool::new(num_cpus::get()); + + let (tx, rx) = mpsc::sync_channel(10); + let rem = Arc::new(AtomicUsize::new(0)); + + b.iter(move || { + rem.store(super::NUM_SPAWN, SeqCst); + + for _ in 0..super::NUM_SPAWN { + let tx = tx.clone(); + let rem = rem.clone(); + + pool.execute(future::lazy(move || { + if 1 == rem.fetch_sub(1, SeqCst) { + tx.send(()).unwrap(); + } + + Ok(()) + })).ok().unwrap(); + } + + let _ = rx.recv().unwrap(); + }); + } + + #[bench] + fn yield_many(b: &mut test::Bencher) { + let pool = CpuPool::new(num_cpus::get()); + let tasks = super::TASKS_PER_CPU * num_cpus::get(); + + let (tx, rx) = mpsc::sync_channel(tasks); + + b.iter(move || { + for _ in 0..tasks { + let mut rem = super::NUM_YIELD; + let tx = tx.clone(); + + pool.execute(future::poll_fn(move || { + rem -= 1; + + if rem == 0 { + tx.send(()).unwrap(); + Ok(Async::Ready(())) + } else { + // Notify the current task + task::current().notify(); + + // Not ready + Ok(Async::NotReady) + } + })).ok().unwrap(); + } + + for _ in 0..tasks { + let _ = rx.recv().unwrap(); + } + }); + } +} diff --git a/third_party/rust/tokio-threadpool/benches/blocking.rs b/third_party/rust/tokio-threadpool/benches/blocking.rs new file mode 100644 index 000000000000..ea432c885f96 --- /dev/null +++ b/third_party/rust/tokio-threadpool/benches/blocking.rs @@ -0,0 +1,148 @@ +#![feature(test)] +#![deny(warnings)] + +extern crate futures; +extern crate rand; +extern crate tokio_threadpool; +extern crate threadpool; +extern crate test; + +const ITER: usize = 1_000; + +mod blocking { + use super::*; + + use futures::future::*; + use tokio_threadpool::{Builder, blocking}; + + #[bench] + fn cpu_bound(b: &mut test::Bencher) { + let pool = Builder::new() + .pool_size(2) + .max_blocking(20) + .build(); + + b.iter(|| { + let count_down = Arc::new(CountDown::new(::ITER)); + + for _ in 0..::ITER { + let count_down = count_down.clone(); + + pool.spawn(lazy(move || { + poll_fn(|| { + blocking(|| { + perform_complex_computation() + }) + .map_err(|_| panic!()) + }) + .and_then(move |_| { + // Do something with the value + count_down.dec(); + Ok(()) + }) + })); + } + + count_down.wait(); + }) + } +} + +mod message_passing { + use super::*; + + use futures::future::*; + use futures::sync::oneshot; + use tokio_threadpool::Builder; + + #[bench] + fn cpu_bound(b: &mut test::Bencher) { + let pool = Builder::new() + .pool_size(2) + .max_blocking(20) + .build(); + + let blocking = threadpool::ThreadPool::new(20); + + b.iter(|| { + let count_down = Arc::new(CountDown::new(::ITER)); + + for _ in 0..::ITER { + let count_down = count_down.clone(); + let blocking = blocking.clone(); + + pool.spawn(lazy(move || { + // Create a channel to receive the return value. + let (tx, rx) = oneshot::channel(); + + // Spawn a task on the blocking thread pool to process the + // computation. + blocking.execute(move || { + let res = perform_complex_computation(); + tx.send(res).unwrap(); + }); + + rx.and_then(move |_| { + count_down.dec(); + Ok(()) + }).map_err(|_| panic!()) + })); + } + + count_down.wait(); + }) + } +} + +fn perform_complex_computation() -> usize { + use rand::*; + + // Simulate a CPU heavy computation + let mut rng = rand::thread_rng(); + rng.gen() +} + +// Util for waiting until the tasks complete + +use std::sync::*; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::*; + +struct CountDown { + rem: AtomicUsize, + mutex: Mutex<()>, + condvar: Condvar, +} + +impl CountDown { + fn new(rem: usize) -> Self { + CountDown { + rem: AtomicUsize::new(rem), + mutex: Mutex::new(()), + condvar: Condvar::new(), + } + } + + fn dec(&self) { + let prev = self.rem.fetch_sub(1, AcqRel); + + if prev != 1 { + return; + } + + let _lock = self.mutex.lock().unwrap(); + self.condvar.notify_all(); + } + + fn wait(&self) { + let mut lock = self.mutex.lock().unwrap(); + + loop { + if self.rem.load(Acquire) == 0 { + return; + } + + lock = self.condvar.wait(lock).unwrap(); + } + } +} diff --git a/third_party/rust/tokio-threadpool/benches/depth.rs b/third_party/rust/tokio-threadpool/benches/depth.rs new file mode 100644 index 000000000000..d500ad4aee43 --- /dev/null +++ b/third_party/rust/tokio-threadpool/benches/depth.rs @@ -0,0 +1,73 @@ +#![feature(test)] +#![deny(warnings)] + +extern crate tokio_threadpool; +extern crate futures; +extern crate futures_cpupool; +extern crate num_cpus; +extern crate test; + +const ITER: usize = 20_000; + +mod us { + use tokio_threadpool::*; + use futures::future; + use test; + use std::sync::mpsc; + + #[bench] + fn chained_spawn(b: &mut test::Bencher) { + let threadpool = ThreadPool::new(); + + fn spawn(pool_tx: Sender, res_tx: mpsc::Sender<()>, n: usize) { + if n == 0 { + res_tx.send(()).unwrap(); + } else { + let pool_tx2 = pool_tx.clone(); + pool_tx.spawn(future::lazy(move || { + spawn(pool_tx2, res_tx, n - 1); + Ok(()) + })).unwrap(); + } + } + + b.iter(move || { + let (res_tx, res_rx) = mpsc::channel(); + + spawn(threadpool.sender().clone(), res_tx, super::ITER); + res_rx.recv().unwrap(); + }); + } +} + +mod cpupool { + use futures::future::{self, Executor}; + use futures_cpupool::*; + use num_cpus; + use test; + use std::sync::mpsc; + + #[bench] + fn chained_spawn(b: &mut test::Bencher) { + let pool = CpuPool::new(num_cpus::get()); + + fn spawn(pool: CpuPool, res_tx: mpsc::Sender<()>, n: usize) { + if n == 0 { + res_tx.send(()).unwrap(); + } else { + let pool2 = pool.clone(); + pool.execute(future::lazy(move || { + spawn(pool2, res_tx, n - 1); + Ok(()) + })).ok().unwrap(); + } + } + + b.iter(move || { + let (res_tx, res_rx) = mpsc::channel(); + + spawn(pool.clone(), res_tx, super::ITER); + res_rx.recv().unwrap(); + }); + } +} diff --git a/third_party/rust/tokio-threadpool/examples/depth.rs b/third_party/rust/tokio-threadpool/examples/depth.rs new file mode 100644 index 000000000000..7957f09edd55 --- /dev/null +++ b/third_party/rust/tokio-threadpool/examples/depth.rs @@ -0,0 +1,46 @@ +extern crate futures; +extern crate tokio_threadpool; +extern crate env_logger; + +use tokio_threadpool::*; +use futures::future::{self, Executor}; + +use std::sync::mpsc; + +const ITER: usize = 2_000_000; +// const ITER: usize = 30; + +fn chained_spawn() { + let pool = ThreadPool::new(); + let tx = pool.sender().clone(); + + fn spawn(tx: Sender, res_tx: mpsc::Sender<()>, n: usize) { + if n == 0 { + res_tx.send(()).unwrap(); + } else { + let tx2 = tx.clone(); + tx.execute(future::lazy(move || { + spawn(tx2, res_tx, n - 1); + Ok(()) + })).ok().unwrap(); + } + } + + loop { + println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + let (res_tx, res_rx) = mpsc::channel(); + + for _ in 0..10 { + spawn(tx.clone(), res_tx.clone(), ITER); + } + + for _ in 0..10 { + res_rx.recv().unwrap(); + } + } +} + +pub fn main() { + let _ = ::env_logger::init(); + chained_spawn(); +} diff --git a/third_party/rust/tokio-threadpool/examples/hello.rs b/third_party/rust/tokio-threadpool/examples/hello.rs new file mode 100644 index 000000000000..3324f862a990 --- /dev/null +++ b/third_party/rust/tokio-threadpool/examples/hello.rs @@ -0,0 +1,21 @@ +extern crate futures; +extern crate tokio_threadpool; +extern crate env_logger; + +use tokio_threadpool::*; +use futures::*; +use futures::sync::oneshot; + +pub fn main() { + let _ = ::env_logger::init(); + + let pool = ThreadPool::new(); + let tx = pool.sender().clone(); + + let res = oneshot::spawn(future::lazy(|| { + println!("Running on the pool"); + Ok::<_, ()>("complete") + }), &tx); + + println!("Result: {:?}", res.wait()); +} diff --git a/third_party/rust/tokio-threadpool/examples/smoke.rs b/third_party/rust/tokio-threadpool/examples/smoke.rs new file mode 100644 index 000000000000..8ab144917fe9 --- /dev/null +++ b/third_party/rust/tokio-threadpool/examples/smoke.rs @@ -0,0 +1,34 @@ +extern crate futures; +extern crate tokio_threadpool; +extern crate tokio_timer; +extern crate env_logger; + +use tokio_threadpool::*; +use tokio_timer::Timer; + +use futures::*; +use futures::sync::oneshot::spawn; + +use std::thread; +use std::time::Duration; + +pub fn main() { + let _ = ::env_logger::init(); + + let timer = Timer::default(); + { + let pool = ThreadPool::new(); + let tx = pool.sender().clone(); + + let fut = timer.interval(Duration::from_millis(300)) + .for_each(|_| { + println!("~~~~~ Hello ~~~"); + Ok(()) + }) + .map_err(|_| unimplemented!()); + + spawn(fut, &tx).wait().unwrap(); + } + + thread::sleep(Duration::from_millis(100)); +} diff --git a/third_party/rust/tokio-threadpool/src/blocking.rs b/third_party/rust/tokio-threadpool/src/blocking.rs new file mode 100644 index 000000000000..d75ed13ea986 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/blocking.rs @@ -0,0 +1,170 @@ +use worker::Worker; + +use futures::Poll; + +use std::error::Error; +use std::fmt; + +/// Error raised by `blocking`. +pub struct BlockingError { + _p: (), +} + +/// Enter a blocking section of code. +/// +/// The `blocking` function annotates a section of code that performs a blocking +/// operation, either by issuing a blocking syscall or by performing a long +/// running CPU-bound computation. +/// +/// When the `blocking` function enters, it hands off the responsibility of +/// processing the current work queue to another thread. Then, it calls the +/// supplied closure. The closure is permitted to block indefinitely. +/// +/// If the maximum number of concurrent `blocking` calls has been reached, then +/// `NotReady` is returned and the task is notified once existing `blocking` +/// calls complete. The maximum value is specified when creating a thread pool +/// using [`Builder::max_blocking`][build] +/// +/// [build]: struct.Builder.html#method.max_blocking +/// +/// # Return +/// +/// When the blocking closure is executed, `Ok(T)` is returned, where `T` is the +/// closure's return value. +/// +/// If the thread pool has shutdown, `Err` is returned. +/// +/// If the number of concurrent `blocking` calls has reached the maximum, +/// `Ok(NotReady)` is returned and the current task is notified when a call to +/// `blocking` will succeed. +/// +/// If `blocking` is called from outside the context of a Tokio thread pool, +/// `Err` is returned. +/// +/// # Background +/// +/// By default, the Tokio thread pool expects that tasks will only run for short +/// periods at a time before yielding back to the thread pool. This is the basic +/// premise of cooperative multitasking. +/// +/// However, it is common to want to perform a blocking operation while +/// processing an asynchronous computation. Examples of blocking operation +/// include: +/// +/// * Performing synchronous file operations (reading and writing). +/// * Blocking on acquiring a mutex. +/// * Performing a CPU bound computation, like cryptographic encryption or +/// decryption. +/// +/// One option for dealing with blocking operations in an asynchronous context +/// is to use a thread pool dedicated to performing these operations. This not +/// ideal as it requires bidirectional message passing as well as a channel to +/// communicate which adds a level of buffering. +/// +/// Instead, `blocking` hands off the responsibility of processing the work queue +/// to another thread. This hand off is light compared to a channel and does not +/// require buffering. +/// +/// # Examples +/// +/// Block on receiving a message from a `std` channel. This example is a little +/// silly as using the non-blocking channel from the `futures` crate would make +/// more sense. The blocking receive can be replaced with any blocking operation +/// that needs to be performed. +/// +/// ```rust +/// # extern crate futures; +/// # extern crate tokio_threadpool; +/// +/// use tokio_threadpool::{ThreadPool, blocking}; +/// +/// use futures::Future; +/// use futures::future::{lazy, poll_fn}; +/// +/// use std::sync::mpsc; +/// use std::thread; +/// use std::time::Duration; +/// +/// pub fn main() { +/// // This is a *blocking* channel +/// let (tx, rx) = mpsc::channel(); +/// +/// // Spawn a thread to send a message +/// thread::spawn(move || { +/// thread::sleep(Duration::from_millis(500)); +/// tx.send("hello").unwrap(); +/// }); +/// +/// let pool = ThreadPool::new(); +/// +/// pool.spawn(lazy(move || { +/// // Because `blocking` returns `Poll`, it is intended to be used +/// // from the context of a `Future` implementation. Since we don't +/// // have a complicated requirement, we can use `poll_fn` in this +/// // case. +/// poll_fn(move || { +/// blocking(|| { +/// let msg = rx.recv().unwrap(); +/// println!("message = {}", msg); +/// }).map_err(|_| panic!("the threadpool shut down")) +/// }) +/// })); +/// +/// // Wait for the task we just spawned to complete. +/// pool.shutdown_on_idle().wait().unwrap(); +/// } +/// ``` +pub fn blocking(f: F) -> Poll +where F: FnOnce() -> T, +{ + let res = Worker::with_current(|worker| { + let worker = match worker { + Some(worker) => worker, + None => { + return Err(BlockingError { _p: () }); + } + }; + + // Transition the worker state to blocking. This will exit the fn early + // with `NotReady` if the pool does not have enough capacity to enter + // blocking mode. + worker.transition_to_blocking() + }); + + // If the transition cannot happen, exit early + try_ready!(res); + + // Currently in blocking mode, so call the inner closure + let ret = f(); + + // Try to transition out of blocking mode. This is a fast path that takes + // back ownership of the worker if the worker handoff didn't complete yet. + Worker::with_current(|worker| { + // Worker must be set since it was above. + worker.unwrap() + .transition_from_blocking(); + }); + + // Return the result + Ok(ret.into()) +} + +impl fmt::Display for BlockingError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}", self.description()) + } +} + +impl fmt::Debug for BlockingError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("BlockingError") + .field("reason", &self.description()) + .finish() + } +} + +impl Error for BlockingError { + fn description(&self) -> &str { + "`blocking` annotation used from outside the context of a thread pool" + } +} diff --git a/third_party/rust/tokio-threadpool/src/builder.rs b/third_party/rust/tokio-threadpool/src/builder.rs new file mode 100644 index 000000000000..0e5a36e5ea02 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/builder.rs @@ -0,0 +1,437 @@ +use callback::Callback; +use config::{Config, MAX_WORKERS}; +use park::{BoxPark, BoxedPark, DefaultPark}; +use sender::Sender; +use pool::{Pool, MAX_BACKUP}; +use thread_pool::ThreadPool; +use worker::{self, Worker, WorkerId}; + +use std::error::Error; +use std::fmt; +use std::sync::Arc; +use std::time::Duration; + +use num_cpus; +use tokio_executor::Enter; +use tokio_executor::park::Park; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Builds a thread pool with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. The thread +/// pool is constructed by calling [`build`]. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various configuration +/// settings. +/// +/// [`build`]: #method.build +/// [`Builder::new`]: #method.new +/// +/// # Examples +/// +/// ``` +/// # extern crate tokio_threadpool; +/// # extern crate futures; +/// # use tokio_threadpool::Builder; +/// use futures::future::{Future, lazy}; +/// use std::time::Duration; +/// +/// # pub fn main() { +/// let thread_pool = Builder::new() +/// .pool_size(4) +/// .keep_alive(Some(Duration::from_secs(30))) +/// .build(); +/// +/// thread_pool.spawn(lazy(|| { +/// println!("called from a worker thread"); +/// Ok(()) +/// })); +/// +/// // Gracefully shutdown the threadpool +/// thread_pool.shutdown().wait().unwrap(); +/// # } +/// ``` +pub struct Builder { + /// Thread pool specific configuration values + config: Config, + + /// Number of workers to spawn + pool_size: usize, + + /// Maximum number of futures that can be in a blocking section + /// concurrently. + max_blocking: usize, + + /// Generates the `Park` instances + new_park: Box BoxPark>, +} + +impl Builder { + /// Returns a new thread pool builder initialized with default configuration + /// values. + /// + /// Configuration methods can be chained on the return value. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// use std::time::Duration; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .pool_size(4) + /// .keep_alive(Some(Duration::from_secs(30))) + /// .build(); + /// # } + /// ``` + pub fn new() -> Builder { + let num_cpus = num_cpus::get(); + + let new_park = Box::new(|_: &WorkerId| { + Box::new(BoxedPark::new(DefaultPark::new())) + as BoxPark + }); + + Builder { + pool_size: num_cpus, + max_blocking: 100, + config: Config { + keep_alive: None, + name_prefix: None, + stack_size: None, + around_worker: None, + after_start: None, + before_stop: None, + }, + new_park, + } + } + + /// Set the maximum number of worker threads for the thread pool instance. + /// + /// This must be a number between 1 and 32,768 though it is advised to keep + /// this value on the smaller side. + /// + /// The default value is the number of cores available to the system. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .pool_size(4) + /// .build(); + /// # } + /// ``` + pub fn pool_size(&mut self, val: usize) -> &mut Self { + assert!(val >= 1, "at least one thread required"); + assert!(val <= MAX_WORKERS, "max value is {}", 32768); + + self.pool_size = val; + self + } + + /// Set the maximum number of concurrent blocking sections. + /// + /// When the maximum concurrent `blocking` calls is reached, any further + /// calls to `blocking` will return `NotReady` and the task is notified once + /// previously in-flight calls to `blocking` return. + /// + /// This must be a number between 1 and 32,768 though it is advised to keep + /// this value on the smaller side. + /// + /// The default value is 100. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .max_blocking(200) + /// .build(); + /// # } + /// ``` + pub fn max_blocking(&mut self, val: usize) -> &mut Self { + assert!(val <= MAX_BACKUP, "max value is {}", MAX_BACKUP); + self.max_blocking = val; + self + } + + /// Set the worker thread keep alive duration + /// + /// If set, a worker thread will wait for up to the specified duration for + /// work, at which point the thread will shutdown. When work becomes + /// available, a new thread will eventually be spawned to replace the one + /// that shut down. + /// + /// When the value is `None`, the thread will wait for work forever. + /// + /// The default value is `None`. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// use std::time::Duration; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .keep_alive(Some(Duration::from_secs(30))) + /// .build(); + /// # } + /// ``` + pub fn keep_alive(&mut self, val: Option) -> &mut Self { + self.config.keep_alive = val; + self + } + + /// Set name prefix of threads spawned by the scheduler + /// + /// Thread name prefix is used for generating thread names. For example, if + /// prefix is `my-pool-`, then threads in the pool will get names like + /// `my-pool-1` etc. + /// + /// If this configuration is not set, then the thread will use the system + /// default naming scheme. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .name_prefix("my-pool-") + /// .build(); + /// # } + /// ``` + pub fn name_prefix>(&mut self, val: S) -> &mut Self { + self.config.name_prefix = Some(val.into()); + self + } + + /// Set the stack size (in bytes) for worker threads. + /// + /// The actual stack size may be greater than this value if the platform + /// specifies minimal stack size. + /// + /// The default stack size for spawned threads is 2 MiB, though this + /// particular stack size is subject to change in the future. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .stack_size(32 * 1024) + /// .build(); + /// # } + /// ``` + pub fn stack_size(&mut self, val: usize) -> &mut Self { + self.config.stack_size = Some(val); + self + } + + /// Execute function `f` on each worker thread. + /// + /// This function is provided a handle to the worker and is expected to call + /// [`Worker::run`], otherwise the worker thread will shutdown without doing + /// any work. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .around_worker(|worker, _| { + /// println!("worker is starting up"); + /// worker.run(); + /// println!("worker is shutting down"); + /// }) + /// .build(); + /// # } + /// ``` + /// + /// [`Worker::run`]: struct.Worker.html#method.run + pub fn around_worker(&mut self, f: F) -> &mut Self + where F: Fn(&Worker, &mut Enter) + Send + Sync + 'static + { + self.config.around_worker = Some(Callback::new(f)); + self + } + + /// Execute function `f` after each thread is started but before it starts + /// doing work. + /// + /// This is intended for bookkeeping and monitoring use cases. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .after_start(|| { + /// println!("thread started"); + /// }) + /// .build(); + /// # } + /// ``` + pub fn after_start(&mut self, f: F) -> &mut Self + where F: Fn() + Send + Sync + 'static + { + self.config.after_start = Some(Arc::new(f)); + self + } + + /// Execute function `f` before each thread stops. + /// + /// This is intended for bookkeeping and monitoring use cases. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .before_stop(|| { + /// println!("thread stopping"); + /// }) + /// .build(); + /// # } + /// ``` + pub fn before_stop(&mut self, f: F) -> &mut Self + where F: Fn() + Send + Sync + 'static + { + self.config.before_stop = Some(Arc::new(f)); + self + } + + /// Customize the `park` instance used by each worker thread. + /// + /// The provided closure `f` is called once per worker and returns a `Park` + /// instance that is used by the worker to put itself to sleep. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// # fn decorate(f: F) -> F { f } + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .custom_park(|_| { + /// use tokio_threadpool::park::DefaultPark; + /// + /// // This is the default park type that the worker would use if we + /// // did not customize it. + /// let park = DefaultPark::new(); + /// + /// // Decorate the `park` instance, allowing us to customize work + /// // that happens when a worker thread goes to sleep. + /// decorate(park) + /// }) + /// .build(); + /// # } + /// ``` + pub fn custom_park(&mut self, f: F) -> &mut Self + where F: Fn(&WorkerId) -> P + 'static, + P: Park + Send + 'static, + P::Error: Error, + { + self.new_park = Box::new(move |id| { + Box::new(BoxedPark::new(f(id))) + }); + + self + } + + /// Create the configured `ThreadPool`. + /// + /// The returned `ThreadPool` instance is ready to spawn tasks. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::Builder; + /// + /// # pub fn main() { + /// let thread_pool = Builder::new() + /// .build(); + /// # } + /// ``` + pub fn build(&self) -> ThreadPool { + let mut workers = vec![]; + + trace!("build; num-workers={}", self.pool_size); + + for i in 0..self.pool_size { + let id = WorkerId::new(i); + let park = (self.new_park)(&id); + let unpark = park.unpark(); + + workers.push(worker::Entry::new(park, unpark)); + } + + // Create the pool + let inner = Arc::new( + Pool::new( + workers.into_boxed_slice(), + self.max_blocking, + self.config.clone())); + + // Wrap with `Sender` + let inner = Some(Sender { + inner + }); + + ThreadPool { inner } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Builder") + .field("config", &self.config) + .field("pool_size", &self.pool_size) + .field("new_park", &"Box BoxPark>") + .finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/callback.rs b/third_party/rust/tokio-threadpool/src/callback.rs new file mode 100644 index 000000000000..e269872a91e9 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/callback.rs @@ -0,0 +1,29 @@ +use worker::Worker; + +use std::fmt; +use std::sync::Arc; + +use tokio_executor::Enter; + +#[derive(Clone)] +pub(crate) struct Callback { + f: Arc, +} + +impl Callback { + pub fn new(f: F) -> Self + where F: Fn(&Worker, &mut Enter) + Send + Sync + 'static + { + Callback { f: Arc::new(f) } + } + + pub fn call(&self, worker: &Worker, enter: &mut Enter) { + (self.f)(worker, enter) + } +} + +impl fmt::Debug for Callback { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Fn") + } +} diff --git a/third_party/rust/tokio-threadpool/src/config.rs b/third_party/rust/tokio-threadpool/src/config.rs new file mode 100644 index 000000000000..e51efc2ba406 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/config.rs @@ -0,0 +1,32 @@ +use callback::Callback; + +use std::fmt; +use std::sync::Arc; +use std::time::Duration; + +/// Thread pool specific configuration values +#[derive(Clone)] +pub(crate) struct Config { + pub keep_alive: Option, + // Used to configure a worker thread + pub name_prefix: Option, + pub stack_size: Option, + pub around_worker: Option, + pub after_start: Option>, + pub before_stop: Option>, +} + +/// Max number of workers that can be part of a pool. This is the most that can +/// fit in the scheduler state. Note, that this is the max number of **active** +/// threads. There can be more standby threads. +pub(crate) const MAX_WORKERS: usize = 1 << 15; + +impl fmt::Debug for Config { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Config") + .field("keep_alive", &self.keep_alive) + .field("name_prefix", &self.name_prefix) + .field("stack_size", &self.stack_size) + .finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/futures2_wake.rs b/third_party/rust/tokio-threadpool/src/futures2_wake.rs new file mode 100644 index 000000000000..ed9d4552c55e --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/futures2_wake.rs @@ -0,0 +1,60 @@ +use inner::Pool; +use notifier::Notifier; + +use std::marker::PhantomData; +use std::mem; +use std::sync::Arc; + +use futures::executor::Notify; +use futures2; + +pub(crate) struct Futures2Wake { + notifier: Arc, + id: usize, +} + +impl Futures2Wake { + pub(crate) fn new(id: usize, inner: &Arc) -> Futures2Wake { + let notifier = Arc::new(Notifier { + inner: Arc::downgrade(inner), + }); + Futures2Wake { id, notifier } + } +} + +impl Drop for Futures2Wake { + fn drop(&mut self) { + self.notifier.drop_id(self.id) + } +} + +struct ArcWrapped(PhantomData); + +unsafe impl futures2::task::UnsafeWake for ArcWrapped { + unsafe fn clone_raw(&self) -> futures2::task::Waker { + let me: *const ArcWrapped = self; + let arc = (*(&me as *const *const ArcWrapped as *const Arc)).clone(); + arc.notifier.clone_id(arc.id); + into_waker(arc) + } + + unsafe fn drop_raw(&self) { + let mut me: *const ArcWrapped = self; + let me = &mut me as *mut *const ArcWrapped as *mut Arc; + (*me).notifier.drop_id((*me).id); + ::std::ptr::drop_in_place(me); + } + + unsafe fn wake(&self) { + let me: *const ArcWrapped = self; + let me = &me as *const *const ArcWrapped as *const Arc; + (*me).notifier.notify((*me).id) + } +} + +pub(crate) fn into_waker(rc: Arc) -> futures2::task::Waker { + unsafe { + let ptr = mem::transmute::, *mut ArcWrapped>(rc); + futures2::task::Waker::new(ptr) + } +} diff --git a/third_party/rust/tokio-threadpool/src/lib.rs b/third_party/rust/tokio-threadpool/src/lib.rs new file mode 100644 index 000000000000..05d3d3eb160b --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/lib.rs @@ -0,0 +1,152 @@ +//! A work-stealing based thread pool for executing futures. + +#![doc(html_root_url = "https://docs.rs/tokio-threadpool/0.1.5")] +#![deny(warnings, missing_docs, missing_debug_implementations)] + +// The Tokio thread pool is designed to scheduled futures in Tokio based +// applications. The thread pool structure manages two sets of threads: +// +// * Worker threads. +// * Backup threads. +// +// Worker threads are used to schedule futures using a work-stealing strategy. +// Backup threads, on the other hand, are intended only to support the +// `blocking` API. Threads will transition between the two sets. +// +// The advantage of the work-stealing strategy is minimal cross-thread +// coordination. The thread pool attempts to make as much progress as possible +// without communicating across threads. +// +// # Crate layout +// +// The primary type, `Pool`, holds the majority of a thread pool's state, +// including the state for each worker. Each worker's state is maintained in an +// instance of `worker::Entry`. +// +// `Worker` contains the logic that runs on each worker thread. It holds an +// `Arc` to `Pool` and is able to access its state from `Pool`. +// +// `Task` is a harness around an individual future. It manages polling and +// scheduling that future. +// +// # Worker overview +// +// Each worker has two queues: a deque and a mpsc channel. The deque is the +// primary queue for tasks that are scheduled to run on the worker thread. Tasks +// can only be pushed onto the deque by the worker, but other workers may +// "steal" from that deque. The mpsc channel is used to submit futures while +// external to the pool. +// +// As long as the thread pool has not been shutdown, a worker will run in a +// loop. Each loop, it consumes all tasks on its mpsc channel and pushes it onto +// the deque. It then pops tasks off of the deque and executes them. +// +// If a worker has no work, i.e., both queues are empty. It attempts to steal. +// To do this, it randomly scans other workers' deques and tries to pop a task. +// If it finds no work to steal, the thread goes to sleep. +// +// When the worker detects that the pool has been shut down, it exits the loop, +// cleans up its state, and shuts the thread down. +// +// # Thread pool initialization +// +// By default, no threads are spawned on creation. Instead, when new futures are +// spawned, the pool first checks if there are enough active worker threads. If +// not, a new worker thread is spawned. +// +// # Spawning futures +// +// The spawning behavior depends on whether a future was spawned from within a +// worker or thread or if it was spawned from an external handle. +// +// When spawning a future while external to the thread pool, the current +// strategy is to randomly pick a worker to submit the task to. The task is then +// pushed onto that worker's mpsc channel. +// +// When spawning a future while on a worker thread, the task is pushed onto the +// back of the current worker's deque. +// +// # Sleeping workers +// +// Sleeping workers are tracked using a treiber stack [1]. This results in the +// thread that most recently went to sleep getting woken up first. When the pool +// is not under load, this helps threads shutdown faster. +// +// Sleeping is done by using `tokio_executor::Park` implementations. This allows +// the user of the thread pool to customize the work that is performed to sleep. +// This is how injecting timers and other functionality into the thread pool is +// done. +// +// [1]: https://en.wikipedia.org/wiki/Treiber_Stack +// +// # Notifying workers +// +// When there is work to be done, workers must be notified. However, notifying a +// worker requires cross thread coordination. Ideally, a worker would only be +// notified when it is sleeping, but there is no way to know if a worker is +// sleeping without cross thread communication. +// +// The two cases when a worker might need to be notified are: +// +// 1) A task is externally submitted to a worker via the mpsc channel. +// 2) A worker has a back log of work and needs other workers to steal from it. +// +// In the first case, the worker will always be notified. However, it could be +// possible to avoid the notification if the mpsc channel has two or greater +// number of tasks *after* the task is submitted. In this case, we are able to +// assume that the worker has previously been notified. +// +// The second case is trickier. Currently, whenever a worker spawns a new future +// (pushing it onto its deque) and when it pops a future from its mpsc, it tries +// to notify a sleeping worker to wake up and start stealing. This is a lot of +// notification and it **might** be possible to reduce it. +// +// Also, whenever a worker is woken up via a signal and it does find work, it, +// in turn, will try to wake up a new worker. +// +// # `blocking` +// +// The strategy for handling blocking closures is to hand off the worker to a +// new thread. This implies handing off the `deque` and `mpsc`. Once this is +// done, the new thread continues to process the work queue and the original +// thread is able to block. Once it finishes processing the blocking future, the +// thread has no additional work and is inserted into the backup pool. This +// makes it available to other workers that encounter a `blocking` call. + +extern crate tokio_executor; + +extern crate crossbeam_deque as deque; +#[macro_use] +extern crate futures; +extern crate num_cpus; +extern crate rand; + +#[macro_use] +extern crate log; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +pub mod park; + +mod blocking; +mod builder; +mod callback; +mod config; +#[cfg(feature = "unstable-futures")] +mod futures2_wake; +mod notifier; +mod pool; +mod sender; +mod shutdown; +mod shutdown_task; +mod task; +mod thread_pool; +mod worker; + +pub use blocking::{blocking, BlockingError}; +pub use builder::Builder; +pub use sender::Sender; +pub use shutdown::Shutdown; +pub use thread_pool::ThreadPool; +pub use worker::{Worker, WorkerId}; diff --git a/third_party/rust/tokio-threadpool/src/notifier.rs b/third_party/rust/tokio-threadpool/src/notifier.rs new file mode 100644 index 000000000000..74c78cc8025b --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/notifier.rs @@ -0,0 +1,94 @@ +use pool::Pool; +use task::Task; + +use std::mem; +use std::ops; +use std::sync::{Arc, Weak}; + +use futures::executor::Notify; + +/// Implements the future `Notify` API. +/// +/// This is how external events are able to signal the task, informing it to try +/// to poll the future again. +#[derive(Debug)] +pub(crate) struct Notifier { + pub inner: Weak, +} + +/// A guard that ensures that the inner value gets forgotten. +#[derive(Debug)] +struct Forget(Option); + +impl Notify for Notifier { + fn notify(&self, id: usize) { + trace!("Notifier::notify; id=0x{:x}", id); + + unsafe { + let ptr = id as *const Task; + + // We did not actually take ownership of the `Arc` in this function + // so we must ensure that the Arc is forgotten. + let task = Forget::new(Arc::from_raw(ptr)); + + // TODO: Unify this with Task::notify + if task.schedule() { + // TODO: Check if the pool is still running + // + // Bump the ref count + let task = task.clone(); + + if let Some(inner) = self.inner.upgrade() { + let _ = inner.submit(task, &inner); + } + } + } + } + + fn clone_id(&self, id: usize) -> usize { + let ptr = id as *const Task; + + // This function doesn't actually get a strong ref to the task here. + // However, the only method we have to convert a raw pointer -> &Arc + // is to call `Arc::from_raw` which returns a strong ref. So, to + // maintain the invariants, `t1` has to be forgotten. This prevents the + // ref count from being decremented. + let t1 = Forget::new(unsafe { Arc::from_raw(ptr) }); + + // The clone is forgotten so that the fn exits without decrementing the ref + // count. The caller of `clone_id` ensures that `drop_id` is called when + // the ref count needs to be decremented. + let _ = Forget::new(t1.clone()); + + id + } + + fn drop_id(&self, id: usize) { + unsafe { + let ptr = id as *const Task; + let _ = Arc::from_raw(ptr); + } + } +} + +// ===== impl Forget ===== + +impl Forget { + fn new(t: T) -> Self { + Forget(Some(t)) + } +} + +impl ops::Deref for Forget { + type Target = T; + + fn deref(&self) -> &T { + self.0.as_ref().unwrap() + } +} + +impl Drop for Forget { + fn drop(&mut self) { + mem::forget(self.0.take()); + } +} diff --git a/third_party/rust/tokio-threadpool/src/park/boxed.rs b/third_party/rust/tokio-threadpool/src/park/boxed.rs new file mode 100644 index 000000000000..bd3671d482e4 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/park/boxed.rs @@ -0,0 +1,40 @@ +use tokio_executor::park::{Park, Unpark}; + +use std::error::Error; +use std::time::Duration; + +pub(crate) type BoxPark = Box + Send>; +pub(crate) type BoxUnpark = Box; + +pub(crate) struct BoxedPark(T); + +impl BoxedPark { + pub fn new(inner: T) -> Self { + BoxedPark(inner) + } +} + +impl Park for BoxedPark +where T::Error: Error, +{ + type Unpark = BoxUnpark; + type Error = (); + + fn unpark(&self) -> Self::Unpark { + Box::new(self.0.unpark()) + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.0.park() + .map_err(|e| { + warn!("calling `park` on worker thread errored -- shutting down thread: {}", e); + }) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.0.park_timeout(duration) + .map_err(|e| { + warn!("calling `park` on worker thread errored -- shutting down thread: {}", e); + }) + } +} diff --git a/third_party/rust/tokio-threadpool/src/park/default_park.rs b/third_party/rust/tokio-threadpool/src/park/default_park.rs new file mode 100644 index 000000000000..4ccbe34da586 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/park/default_park.rs @@ -0,0 +1,183 @@ +use tokio_executor::park::{Park, Unpark}; + +use std::error::Error; +use std::fmt; +use std::sync::{Arc, Mutex, Condvar}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::time::Duration; + +/// Parks the thread. +#[derive(Debug)] +pub struct DefaultPark { + inner: Arc, +} + +/// Unparks threads that were parked by `DefaultPark`. +#[derive(Debug)] +pub struct DefaultUnpark { + inner: Arc, +} + +/// Error returned by [`ParkThread`] +/// +/// This currently is never returned, but might at some point in the future. +/// +/// [`ParkThread`]: struct.ParkThread.html +#[derive(Debug)] +pub struct ParkError { + _p: (), +} + +#[derive(Debug)] +struct Inner { + state: AtomicUsize, + mutex: Mutex<()>, + condvar: Condvar, +} + +const IDLE: usize = 0; +const NOTIFY: usize = 1; +const SLEEP: usize = 2; + +// ===== impl DefaultPark ===== + +impl DefaultPark { + /// Creates a new `DefaultPark` instance. + pub fn new() -> DefaultPark { + let inner = Arc::new(Inner { + state: AtomicUsize::new(IDLE), + mutex: Mutex::new(()), + condvar: Condvar::new(), + }); + + DefaultPark { inner } + } + + /// Unpark the thread without having to clone the unpark handle. + /// + /// Named `notify` to avoid conflicting with the `unpark` fn. + pub(crate) fn notify(&self) { + self.inner.unpark(); + } + + pub(crate) fn park_sync(&self, duration: Option) { + self.inner.park(duration); + } +} + +impl Park for DefaultPark { + type Unpark = DefaultUnpark; + type Error = ParkError; + + fn unpark(&self) -> Self::Unpark { + let inner = self.inner.clone(); + DefaultUnpark { inner } + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.inner.park(None); + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.inner.park(Some(duration)); + Ok(()) + } +} + +// ===== impl DefaultUnpark ===== + +impl Unpark for DefaultUnpark { + fn unpark(&self) { + self.inner.unpark(); + } +} + +impl Inner { + /// Park the current thread for at most `dur`. + fn park(&self, timeout: Option) { + // If currently notified, then we skip sleeping. This is checked outside + // of the lock to avoid acquiring a mutex if not necessary. + match self.state.compare_and_swap(NOTIFY, IDLE, SeqCst) { + NOTIFY => return, + IDLE => {}, + _ => unreachable!(), + } + + // If the duration is zero, then there is no need to actually block + if let Some(ref dur) = timeout { + if *dur == Duration::from_millis(0) { + return; + } + } + + // The state is currently idle, so obtain the lock and then try to + // transition to a sleeping state. + let mut m = self.mutex.lock().unwrap(); + + // Transition to sleeping + match self.state.compare_and_swap(IDLE, SLEEP, SeqCst) { + NOTIFY => { + // Notified before we could sleep, consume the notification and + // exit + self.state.store(IDLE, SeqCst); + return; + } + IDLE => {}, + _ => unreachable!(), + } + + m = match timeout { + Some(timeout) => self.condvar.wait_timeout(m, timeout).unwrap().0, + None => self.condvar.wait(m).unwrap(), + }; + + // Transition back to idle. If the state has transitioned to `NOTIFY`, + // this will consume that notification. + self.state.store(IDLE, SeqCst); + + // Explicitly drop the mutex guard. There is no real point in doing it + // except that I find it helpful to make it explicit where we want the + // mutex to unlock. + drop(m); + } + + fn unpark(&self) { + // First, try transitioning from IDLE -> NOTIFY, this does not require a + // lock. + match self.state.compare_and_swap(IDLE, NOTIFY, SeqCst) { + IDLE | NOTIFY => return, + SLEEP => {} + _ => unreachable!(), + } + + // The other half is sleeping, this requires a lock + let _m = self.mutex.lock().unwrap(); + + // Transition to NOTIFY + match self.state.swap(NOTIFY, SeqCst) { + SLEEP => {} + NOTIFY => return, + IDLE => return, + _ => unreachable!(), + } + + // Wakeup the sleeper + self.condvar.notify_one(); + } +} + +// ===== impl ParkError ===== + +impl fmt::Display for ParkError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(fmt) + } +} + +impl Error for ParkError { + fn description(&self) -> &str { + "unknown park error" + } +} diff --git a/third_party/rust/tokio-threadpool/src/park/mod.rs b/third_party/rust/tokio-threadpool/src/park/mod.rs new file mode 100644 index 000000000000..e7c5f40d3676 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/park/mod.rs @@ -0,0 +1,8 @@ +//! Thread parking utilities. + +mod boxed; +mod default_park; + +pub use self::default_park::{DefaultPark, DefaultUnpark, ParkError}; + +pub(crate) use self::boxed::{BoxPark, BoxUnpark, BoxedPark}; diff --git a/third_party/rust/tokio-threadpool/src/pool/backup.rs b/third_party/rust/tokio-threadpool/src/pool/backup.rs new file mode 100644 index 000000000000..4f0c295a539d --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/pool/backup.rs @@ -0,0 +1,304 @@ +use park::DefaultPark; +use worker::{WorkerId}; + +use std::cell::UnsafeCell; +use std::fmt; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{self, Acquire, AcqRel, Relaxed}; + +/// State associated with a thread in the thread pool. +/// +/// The pool manages a number of threads. Some of those threads are considered +/// "primary" threads and process the work queue. When a task being run on a +/// primary thread enters a blocking context, the responsibility of processing +/// the work queue must be handed off to another thread. This is done by first +/// checking for idle threads on the backup stack. If one is found, the worker +/// token (`WorkerId`) is handed off to that running thread. If none are found, +/// a new thread is spawned. +/// +/// This state manages the exchange. A thread that is idle, not assigned to a +/// work queue, sits around for a specified amount of time. When the worker +/// token is handed off, it is first stored in `handoff`. The backup thread is +/// then signaled. At this point, the backup thread wakes up from sleep and +/// reads `handoff`. At that point, it has been promoted to a primary thread and +/// will begin processing inbound work on the work queue. +/// +/// The name `Backup` isn't really great for what the type does, but I have not +/// come up with a better name... Maybe it should just be named `Thread`. +#[derive(Debug)] +pub(crate) struct Backup { + /// Worker ID that is being handed to this thread. + handoff: UnsafeCell>, + + /// Thread state. + /// + /// This tracks: + /// + /// * Is queued flag + /// * If the pool is shutting down. + /// * If the thread is running + state: AtomicUsize, + + /// Next entry in the treiber stack. + next_sleeper: UnsafeCell, + + /// Used to put the thread to sleep + park: DefaultPark, +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub(crate) struct BackupId(pub(crate) usize); + +#[derive(Debug)] +pub(crate) enum Handoff { + Worker(WorkerId), + Idle, + Terminated, +} + +/// Tracks thread state. +#[derive(Clone, Copy, Eq, PartialEq)] +struct State(usize); + +/// Set when the worker is pushed onto the scheduler's stack of sleeping +/// threads. +/// +/// This flag also serves as a "notification" bit. If another thread is +/// attempting to hand off a worker to the backup thread, then the pushed bit +/// will not be set when the thread tries to shutdown. +pub const PUSHED: usize = 0b001; + +/// Set when the thread is running +pub const RUNNING: usize = 0b010; + +/// Set when the thread pool has terminated +pub const TERMINATED: usize = 0b100; + +// ===== impl Backup ===== + +impl Backup { + pub fn new() -> Backup { + Backup { + handoff: UnsafeCell::new(None), + state: AtomicUsize::new(State::new().into()), + next_sleeper: UnsafeCell::new(BackupId(0)), + park: DefaultPark::new(), + } + } + + /// Called when the thread is starting + pub fn start(&self, worker_id: &WorkerId) { + debug_assert!({ + let state: State = self.state.load(Relaxed).into(); + + debug_assert!(!state.is_pushed()); + debug_assert!(state.is_running()); + debug_assert!(!state.is_terminated()); + + true + }); + + // The handoff value is equal to `worker_id` + debug_assert_eq!(unsafe { (*self.handoff.get()).as_ref() }, Some(worker_id)); + + unsafe { *self.handoff.get() = None; } + } + + pub fn is_running(&self) -> bool { + let state: State = self.state.load(Relaxed).into(); + state.is_running() + } + + /// Hands off the worker to a thread. + /// + /// Returns `true` if the thread needs to be spawned. + pub fn worker_handoff(&self, worker_id: WorkerId) -> bool { + unsafe { + // The backup worker should not already have been handoff a worker. + debug_assert!((*self.handoff.get()).is_none()); + + // Set the handoff + *self.handoff.get() = Some(worker_id); + } + + // This *probably* can just be `Release`... memory orderings, how do + // they work? + let prev = State::worker_handoff(&self.state); + debug_assert!(prev.is_pushed()); + + if prev.is_running() { + // Wakeup the backup thread + self.park.notify(); + false + } else { + true + } + } + + /// Terminate the worker + pub fn signal_stop(&self) { + let prev: State = self.state.fetch_xor(TERMINATED | PUSHED, AcqRel).into(); + + debug_assert!(!prev.is_terminated()); + debug_assert!(prev.is_pushed()); + + if prev.is_running() { + self.park.notify(); + } + } + + /// Release the worker + pub fn release(&self) { + let prev: State = self.state.fetch_xor(RUNNING, AcqRel).into(); + + debug_assert!(prev.is_running()); + } + + /// Wait for a worker handoff + pub fn wait_for_handoff(&self, sleep: bool) -> Handoff { + let mut state: State = self.state.load(Acquire).into(); + + // Run in a loop since there can be spurious wakeups + loop { + if !state.is_pushed() { + if state.is_terminated() { + return Handoff::Terminated; + } + + let worker_id = unsafe { + (*self.handoff.get()).take() + .expect("no worker handoff") + }; + + return Handoff::Worker(worker_id); + } + + if sleep { + // TODO: Park with a timeout + self.park.park_sync(None); + + // Reload the state + state = self.state.load(Acquire).into(); + debug_assert!(state.is_running()); + } else { + debug_assert!(state.is_running()); + + // Transition out of running + let mut next = state; + next.unset_running(); + + let actual = self.state.compare_and_swap( + state.into(), + next.into(), + AcqRel).into(); + + if actual == state { + debug_assert!(!next.is_running()); + + return Handoff::Idle; + } + + state = actual; + } + } + } + + pub fn is_pushed(&self) -> bool { + let state: State = self.state.load(Relaxed).into(); + state.is_pushed() + } + + pub fn set_pushed(&self, ordering: Ordering) { + let prev: State = self.state.fetch_or(PUSHED, ordering).into(); + debug_assert!(!prev.is_pushed()); + } + + #[inline] + pub fn next_sleeper(&self) -> BackupId { + unsafe { *self.next_sleeper.get() } + } + + #[inline] + pub fn set_next_sleeper(&self, val: BackupId) { + unsafe { *self.next_sleeper.get() = val; } + } +} + +// ===== impl State ===== + +impl State { + /// Returns a new, default, thread `State` + pub fn new() -> State { + State(0) + } + + /// Returns true if the thread entry is pushed in the sleeper stack + pub fn is_pushed(&self) -> bool { + self.0 & PUSHED == PUSHED + } + + pub fn set_pushed(&mut self) { + self.0 |= PUSHED; + } + + fn unset_pushed(&mut self) { + self.0 &= !PUSHED; + } + + pub fn is_running(&self) -> bool { + self.0 & RUNNING == RUNNING + } + + pub fn set_running(&mut self) { + self.0 |= RUNNING; + } + + pub fn unset_running(&mut self) { + self.0 &= !RUNNING; + } + + pub fn is_terminated(&self) -> bool { + self.0 & TERMINATED == TERMINATED + } + + fn worker_handoff(state: &AtomicUsize) -> State { + let mut curr: State = state.load(Acquire).into(); + + loop { + let mut next = curr; + next.set_running(); + next.unset_pushed(); + + let actual = state.compare_and_swap( + curr.into(), next.into(), AcqRel).into(); + + if actual == curr { + return curr; + } + + curr = actual; + } + } +} + +impl From for State { + fn from(src: usize) -> State { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> usize { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("backup::State") + .field("is_pushed", &self.is_pushed()) + .field("is_running", &self.is_running()) + .field("is_terminated", &self.is_terminated()) + .finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/pool/backup_stack.rs b/third_party/rust/tokio-threadpool/src/pool/backup_stack.rs new file mode 100644 index 000000000000..c330a0368bbe --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/pool/backup_stack.rs @@ -0,0 +1,185 @@ +use pool::{Backup, BackupId}; + +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{Acquire, AcqRel}; + +#[derive(Debug)] +pub(crate) struct BackupStack { + state: AtomicUsize, +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +struct State(usize); + +pub(crate) const MAX_BACKUP: usize = 1 << 15; + +/// Extracts the head of the backup stack from the state +const STACK_MASK: usize = ((1 << 16) - 1); + +/// Used to mark the stack as empty +pub(crate) const EMPTY: BackupId = BackupId(MAX_BACKUP); + +/// Used to mark the stack as terminated +pub(crate) const TERMINATED: BackupId = BackupId(EMPTY.0 + 1); + +/// How many bits the treiber ABA guard is offset by +const ABA_GUARD_SHIFT: usize = 16; + +#[cfg(target_pointer_width = "64")] +const ABA_GUARD_MASK: usize = (1 << (64 - ABA_GUARD_SHIFT)) - 1; + +#[cfg(target_pointer_width = "32")] +const ABA_GUARD_MASK: usize = (1 << (32 - ABA_GUARD_SHIFT)) - 1; + +// ===== impl BackupStack ===== + +impl BackupStack { + pub fn new() -> BackupStack { + let state = AtomicUsize::new(State::new().into()); + BackupStack { state } + } + + /// Push a backup thread onto the stack + /// + /// # Return + /// + /// Returns `Ok` on success. + /// + /// Returns `Err` if the pool has transitioned to the `TERMINATED` state. + /// When terminated, pushing new entries is no longer permitted. + pub fn push(&self, entries: &[Backup], id: BackupId) -> Result<(), ()> { + let mut state: State = self.state.load(Acquire).into(); + + entries[id.0].set_pushed(AcqRel); + + loop { + let mut next = state; + + let head = state.head(); + + if head == TERMINATED { + // The pool is terminated, cannot push the sleeper. + return Err(()); + } + + entries[id.0].set_next_sleeper(head); + next.set_head(id); + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if state == actual { + return Ok(()); + } + + state = actual; + } + } + + /// Pop a backup thread off the stack. + /// + /// If `terminate` is set and the stack is empty when this function is + /// called, the state of the stack is transitioned to "terminated". At this + /// point, no further entries can be pushed onto the stack. + /// + /// # Return + /// + /// * Returns the index of the popped worker and the worker's observed + /// state. + /// + /// * `Ok(None)` if the stack is empty. + /// * `Err(_)` is returned if the pool has been shutdown. + pub fn pop(&self, entries: &[Backup], terminate: bool) -> Result, ()> { + // Figure out the empty value + let terminal = match terminate { + true => TERMINATED, + false => EMPTY, + }; + + let mut state: State = self.state.load(Acquire).into(); + + loop { + let head = state.head(); + + if head == EMPTY { + let mut next = state; + next.set_head(terminal); + + if next == state { + debug_assert!(terminal == EMPTY); + return Ok(None); + } + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual != state { + state = actual; + continue; + } + + return Ok(None); + } else if head == TERMINATED { + return Err(()); + } + + debug_assert!(head.0 < MAX_BACKUP); + + let mut next = state; + + let next_head = entries[head.0].next_sleeper(); + + // TERMINATED can never be set as the "next pointer" on a worker. + debug_assert!(next_head != TERMINATED); + + if next_head == EMPTY { + next.set_head(terminal); + } else { + next.set_head(next_head); + } + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + debug_assert!(entries[head.0].is_pushed()); + return Ok(Some(head)); + } + + state = actual; + } + } +} + +// ===== impl State ===== + +impl State { + fn new() -> State { + State(EMPTY.0) + } + + fn head(&self) -> BackupId { + BackupId(self.0 & STACK_MASK) + } + + fn set_head(&mut self, val: BackupId) { + let val = val.0; + + // The ABA guard protects against the ABA problem w/ treiber stacks + let aba_guard = ((self.0 >> ABA_GUARD_SHIFT) + 1) & ABA_GUARD_MASK; + + self.0 = (aba_guard << ABA_GUARD_SHIFT) | val; + } +} + +impl From for State { + fn from(src: usize) -> Self { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> Self { + src.0 + } +} diff --git a/third_party/rust/tokio-threadpool/src/pool/mod.rs b/third_party/rust/tokio-threadpool/src/pool/mod.rs new file mode 100644 index 000000000000..5a79b0dee700 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/pool/mod.rs @@ -0,0 +1,565 @@ +mod backup; +mod backup_stack; +mod state; + +pub(crate) use self::backup::{Backup, BackupId}; +pub(crate) use self::backup_stack::MAX_BACKUP; +pub(crate) use self::state::{ + State, + Lifecycle, + MAX_FUTURES, +}; + +use self::backup::Handoff; +use self::backup_stack::BackupStack; + +use config::Config; +use shutdown_task::ShutdownTask; +use task::{Task, Blocking}; +use worker::{self, Worker, WorkerId}; + +use futures::Poll; +use futures::task::AtomicTask; + +use std::cell::UnsafeCell; +use std::sync::atomic::Ordering::{Acquire, AcqRel, Relaxed}; +use std::sync::atomic::AtomicUsize; +use std::sync::Arc; +use std::thread; + +use rand::{Rng, SeedableRng, XorShiftRng}; + +#[derive(Debug)] +pub(crate) struct Pool { + // Tracks the state of the thread pool (running, shutting down, ...). + // + // While workers check this field as a hint to detect shutdown, it is + // **not** used as a primary point of coordination for workers. The sleep + // stack is used as the primary point of coordination for workers. + // + // The value of this atomic is deserialized into a `pool::State` instance. + // See comments for that type. + pub state: AtomicUsize, + + // Stack tracking sleeping workers. + sleep_stack: worker::Stack, + + // Number of workers that haven't reached the final state of shutdown + // + // This is only used to know when to single `shutdown_task` once the + // shutdown process has completed. + pub num_workers: AtomicUsize, + + // Used to generate a thread local RNG seed + pub next_thread_id: AtomicUsize, + + // Worker state + // + // A worker is a thread that is processing the work queue and polling + // futures. + // + // This will *usually* be a small number. + pub workers: Box<[worker::Entry]>, + + // Backup thread state + // + // In order to efficiently support `blocking`, a pool of backup threads is + // needed. These backup threads are ready to take over a worker if the + // future being processed requires blocking. + backup: Box<[Backup]>, + + // Stack of sleeping backup threads + pub backup_stack: BackupStack, + + // State regarding coordinating blocking sections and tracking tasks that + // are pending blocking capacity. + blocking: Blocking, + + // Task notified when the worker shuts down + pub shutdown_task: ShutdownTask, + + // Configuration + pub config: Config, +} + +const TERMINATED: usize = 1; + +impl Pool { + /// Create a new `Pool` + pub fn new(workers: Box<[worker::Entry]>, max_blocking: usize, config: Config) -> Pool { + let pool_size = workers.len(); + let total_size = max_blocking + pool_size; + + // Create the set of backup entries + // + // This is `backup + pool_size` because the core thread pool running the + // workers is spawned from backup as well. + let backup = (0..total_size).map(|_| { + Backup::new() + }).collect::>().into_boxed_slice(); + + let backup_stack = BackupStack::new(); + + for i in (0..backup.len()).rev() { + backup_stack.push(&backup, BackupId(i)) + .unwrap(); + } + + // Initialize the blocking state + let blocking = Blocking::new(max_blocking); + + let ret = Pool { + state: AtomicUsize::new(State::new().into()), + sleep_stack: worker::Stack::new(), + num_workers: AtomicUsize::new(0), + next_thread_id: AtomicUsize::new(0), + workers, + backup, + backup_stack, + blocking, + shutdown_task: ShutdownTask { + task1: AtomicTask::new(), + #[cfg(feature = "unstable-futures")] + task2: futures2::task::AtomicWaker::new(), + }, + config, + }; + + // Now, we prime the sleeper stack + for i in 0..pool_size { + ret.sleep_stack.push(&ret.workers, i).unwrap(); + } + + ret + } + + /// Start shutting down the pool. This means that no new futures will be + /// accepted. + pub fn shutdown(&self, now: bool, purge_queue: bool) { + let mut state: State = self.state.load(Acquire).into(); + + trace!("shutdown; state={:?}", state); + + // For now, this must be true + debug_assert!(!purge_queue || now); + + // Start by setting the shutdown flag + loop { + let mut next = state; + + let num_futures = next.num_futures(); + + if next.lifecycle() == Lifecycle::ShutdownNow { + // Already transitioned to shutting down state + + if !purge_queue || num_futures == 0 { + // Nothing more to do + return; + } + + // The queue must be purged + debug_assert!(purge_queue); + next.clear_num_futures(); + } else { + next.set_lifecycle(if now || num_futures == 0 { + // If already idle, always transition to shutdown now. + Lifecycle::ShutdownNow + } else { + Lifecycle::ShutdownOnIdle + }); + + if purge_queue { + next.clear_num_futures(); + } + } + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if state == actual { + state = next; + break; + } + + state = actual; + } + + trace!(" -> transitioned to shutdown"); + + // Only transition to terminate if there are no futures currently on the + // pool + if state.num_futures() != 0 { + return; + } + + self.terminate_sleeping_workers(); + } + + pub fn is_shutdown(&self) -> bool { + self.num_workers.load(Acquire) == TERMINATED + } + + /// Called by `Worker` as it tries to enter a sleeping state. Before it + /// sleeps, it must push itself onto the sleep stack. This enables other + /// threads to see it when signaling work. + pub fn push_sleeper(&self, idx: usize) -> Result<(), ()> { + self.sleep_stack.push(&self.workers, idx) + } + + pub fn terminate_sleeping_workers(&self) { + use worker::Lifecycle::Signaled; + + // First, set the TERMINATED flag on `num_workers`. This signals that + // whichever thread transitions the count to zero must notify the + // shutdown task. + let prev = self.num_workers.fetch_or(TERMINATED, AcqRel); + let notify = prev == 0; + + trace!(" -> shutting down workers"); + // Wakeup all sleeping workers. They will wake up, see the state + // transition, and terminate. + while let Some((idx, worker_state)) = self.sleep_stack.pop(&self.workers, Signaled, true) { + self.workers[idx].signal_stop(worker_state); + } + + // Now terminate any backup threads + // + // The call to `pop` must be successful because shutting down the pool + // is coordinated and at this point, this is the only thread that will + // attempt to transition the backup stack to "terminated". + while let Ok(Some(backup_id)) = self.backup_stack.pop(&self.backup, true) { + self.backup[backup_id.0].signal_stop(); + } + + if notify { + self.shutdown_task.notify(); + } + } + + /// Track that a worker thread has started + /// + /// If `Err` is returned, then the thread is not permitted to started. + fn thread_started(&self) -> Result<(), ()> { + let mut curr = self.num_workers.load(Acquire); + + loop { + if curr & TERMINATED == TERMINATED { + return Err(()); + } + + let actual = self.num_workers.compare_and_swap( + curr, curr + 2, AcqRel); + + if curr == actual { + return Ok(()); + } + + curr = actual; + } + } + + fn thread_stopped(&self) { + let prev = self.num_workers.fetch_sub(2, AcqRel); + + if prev == TERMINATED | 2 { + self.shutdown_task.notify(); + } + } + + pub fn poll_blocking_capacity(&self, task: &Arc) -> Poll<(), ::BlockingError> { + self.blocking.poll_blocking_capacity(task) + } + + /// Submit a task to the scheduler. + /// + /// Called from either inside or outside of the scheduler. If currently on + /// the scheduler, then a fast path is taken. + pub fn submit(&self, task: Arc, inner: &Arc) { + debug_assert_eq!(*self, **inner); + + Worker::with_current(|worker| { + if let Some(worker) = worker { + // If the worker is in blocking mode, then even though the + // thread-local variable is set, the current thread does not + // have ownership of that worker entry. This is because the + // worker entry has already been handed off to another thread. + // + // The second check handles the case where the current thread is + // part of a different threadpool than the one being submitted + // to. + if !worker.is_blocking() && *self == *worker.inner { + let idx = worker.id.0; + + trace!(" -> submit internal; idx={}", idx); + + worker.inner.workers[idx].submit_internal(task); + worker.inner.signal_work(inner); + return; + } + } + + self.submit_external(task, inner); + }); + } + + /// Submit a task to the scheduler from off worker + /// + /// Called from outside of the scheduler, this function is how new tasks + /// enter the system. + pub fn submit_external(&self, task: Arc, inner: &Arc) { + debug_assert_eq!(*self, **inner); + + use worker::Lifecycle::Notified; + + // First try to get a handle to a sleeping worker. This ensures that + // sleeping tasks get woken up + if let Some((idx, worker_state)) = self.sleep_stack.pop(&self.workers, Notified, false) { + trace!("submit to existing worker; idx={}; state={:?}", idx, worker_state); + self.submit_to_external(idx, task, worker_state, inner); + return; + } + + // All workers are active, so pick a random worker and submit the + // task to it. + let len = self.workers.len(); + let idx = self.rand_usize() % len; + + trace!(" -> submitting to random; idx={}", idx); + + let state = self.workers[idx].load_state(); + self.submit_to_external(idx, task, state, inner); + } + + fn submit_to_external(&self, + idx: usize, + task: Arc, + state: worker::State, + inner: &Arc) + { + debug_assert_eq!(*self, **inner); + + let entry = &self.workers[idx]; + + if !entry.submit_external(task, state) { + self.spawn_thread(WorkerId::new(idx), inner); + } + } + + pub fn release_backup(&self, backup_id: BackupId) -> Result<(), ()> { + // First update the state, this cannot fail because the caller must have + // exclusive access to the backup token. + self.backup[backup_id.0].release(); + + // Push the backup entry back on the stack + self.backup_stack.push(&self.backup, backup_id) + } + + pub fn notify_blocking_task(&self, inner: &Arc) { + debug_assert_eq!(*self, **inner); + self.blocking.notify_task(&inner); + } + + /// Provision a thread to run a worker + pub fn spawn_thread(&self, id: WorkerId, inner: &Arc) { + debug_assert_eq!(*self, **inner); + + let backup_id = match self.backup_stack.pop(&self.backup, false) { + Ok(Some(backup_id)) => backup_id, + Ok(None) => panic!("no thread available"), + Err(_) => { + debug!("failed to spawn worker thread due to the thread pool shutting down"); + return; + } + }; + + let need_spawn = self.backup[backup_id.0] + .worker_handoff(id.clone()); + + if !need_spawn { + return; + } + + if self.thread_started().is_err() { + // The pool is shutting down. + return; + } + + let mut th = thread::Builder::new(); + + if let Some(ref prefix) = inner.config.name_prefix { + th = th.name(format!("{}{}", prefix, backup_id.0)); + } + + if let Some(stack) = inner.config.stack_size { + th = th.stack_size(stack); + } + + let inner = inner.clone(); + + let res = th.spawn(move || { + if let Some(ref f) = inner.config.after_start { + f(); + } + + let mut worker_id = id; + + inner.backup[backup_id.0].start(&worker_id); + + loop { + // The backup token should be in the running state. + debug_assert!(inner.backup[backup_id.0].is_running()); + + // TODO: Avoid always cloning + let worker = Worker::new(worker_id, backup_id, inner.clone()); + + // Run the worker. If the worker transitioned to a "blocking" + // state, then `is_blocking` will be true. + if !worker.do_run() { + // The worker shutdown, so exit the thread. + break; + } + + // Push the thread back onto the backup stack. This makes it + // available for future handoffs. + // + // This **must** happen before notifying the task. + let res = inner.backup_stack + .push(&inner.backup, backup_id); + + if res.is_err() { + // The pool is being shutdown. + break; + } + + // The task switched the current thread to blocking mode. + // Now that the blocking task completed, any tasks + inner.notify_blocking_task(&inner); + + debug_assert!(inner.backup[backup_id.0].is_running()); + + // Wait for a handoff + let handoff = inner.backup[backup_id.0] + .wait_for_handoff(true); + + match handoff { + Handoff::Worker(id) => { + debug_assert!(inner.backup[backup_id.0].is_running()); + worker_id = id; + } + Handoff::Idle => { + // Worker is idle + break; + } + Handoff::Terminated => { + // TODO: When wait_for_handoff supports blocking with a + // timeout, this will have to be smarter + break; + } + } + } + + if let Some(ref f) = inner.config.before_stop { + f(); + } + + inner.thread_stopped(); + }); + + if let Err(e) = res { + warn!("failed to spawn worker thread; err={:?}", e); + } + } + + /// If there are any other workers currently relaxing, signal them that work + /// is available so that they can try to find more work to process. + pub fn signal_work(&self, inner: &Arc) { + debug_assert_eq!(*self, **inner); + + use worker::Lifecycle::*; + + if let Some((idx, mut worker_state)) = self.sleep_stack.pop(&self.workers, Signaled, false) { + let entry = &self.workers[idx]; + + debug_assert!(worker_state.lifecycle() != Signaled, "actual={:?}", worker_state.lifecycle()); + + // Transition the worker state to signaled + loop { + let mut next = worker_state; + + next.set_lifecycle(Signaled); + + let actual = entry.state.compare_and_swap( + worker_state.into(), next.into(), AcqRel).into(); + + if actual == worker_state { + break; + } + + worker_state = actual; + } + + // The state has been transitioned to signal, now we need to wake up + // the worker if necessary. + match worker_state.lifecycle() { + Sleeping => { + trace!("signal_work -- wakeup; idx={}", idx); + self.workers[idx].wakeup(); + } + Shutdown => { + trace!("signal_work -- spawn; idx={}", idx); + self.spawn_thread(WorkerId(idx), inner); + } + Running | Notified | Signaled => { + // The workers are already active. No need to wake them up. + } + } + } + } + + /// Generates a random number + /// + /// Uses a thread-local seeded XorShift. + pub fn rand_usize(&self) -> usize { + // Use a thread-local random number generator. If the thread does not + // have one yet, then seed a new one + thread_local!(static THREAD_RNG_KEY: UnsafeCell> = UnsafeCell::new(None)); + + THREAD_RNG_KEY.with(|t| { + #[cfg(target_pointer_width = "32")] + fn new_rng(thread_id: usize) -> XorShiftRng { + XorShiftRng::from_seed([ + thread_id as u32, + 0x00000000, + 0xa8a7d469, + 0x97830e05]) + } + + #[cfg(target_pointer_width = "64")] + fn new_rng(thread_id: usize) -> XorShiftRng { + XorShiftRng::from_seed([ + thread_id as u32, + (thread_id >> 32) as u32, + 0xa8a7d469, + 0x97830e05]) + } + + let thread_id = self.next_thread_id.fetch_add(1, Relaxed); + let rng = unsafe { &mut *t.get() }; + + if rng.is_none() { + *rng = Some(new_rng(thread_id)); + } + + rng.as_mut().unwrap().next_u32() as usize + }) + } +} + +impl PartialEq for Pool { + fn eq(&self, other: &Pool) -> bool { + self as *const _ == other as *const _ + } +} + +unsafe impl Send for Pool {} +unsafe impl Sync for Pool {} diff --git a/third_party/rust/tokio-threadpool/src/pool/state.rs b/third_party/rust/tokio-threadpool/src/pool/state.rs new file mode 100644 index 000000000000..e8f5d12e450d --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/pool/state.rs @@ -0,0 +1,132 @@ +use std::{fmt, usize}; + +/// ThreadPool state. +/// +/// The two least significant bits are the shutdown flags. (0 for active, 1 for +/// shutdown on idle, 2 for shutting down). The remaining bits represent the +/// number of futures that still need to complete. +#[derive(Eq, PartialEq, Clone, Copy)] +pub(crate) struct State(usize); + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Copy)] +#[repr(usize)] +pub(crate) enum Lifecycle { + /// The thread pool is currently running + Running = 0, + + /// The thread pool should shutdown once it reaches an idle state. + ShutdownOnIdle = 1, + + /// The thread pool should start the process of shutting down. + ShutdownNow = 2, +} + +/// Mask used to extract the number of futures from the state +const LIFECYCLE_MASK: usize = 0b11; +const NUM_FUTURES_MASK: usize = !LIFECYCLE_MASK; +const NUM_FUTURES_OFFSET: usize = 2; + +/// Max number of futures the pool can handle. +pub(crate) const MAX_FUTURES: usize = usize::MAX >> NUM_FUTURES_OFFSET; + +// ===== impl State ===== + +impl State { + #[inline] + pub fn new() -> State { + State(0) + } + + /// Returns the number of futures still pending completion. + pub fn num_futures(&self) -> usize { + self.0 >> NUM_FUTURES_OFFSET + } + + /// Increment the number of futures pending completion. + /// + /// Returns false on failure. + pub fn inc_num_futures(&mut self) { + debug_assert!(self.num_futures() < MAX_FUTURES); + debug_assert!(self.lifecycle() < Lifecycle::ShutdownNow); + + self.0 += 1 << NUM_FUTURES_OFFSET; + } + + /// Decrement the number of futures pending completion. + pub fn dec_num_futures(&mut self) { + let num_futures = self.num_futures(); + + if num_futures == 0 { + // Already zero + return; + } + + self.0 -= 1 << NUM_FUTURES_OFFSET; + + if self.lifecycle() == Lifecycle::ShutdownOnIdle && num_futures == 1 { + self.set_lifecycle(Lifecycle::ShutdownNow); + } + } + + /// Set the number of futures pending completion to zero + pub fn clear_num_futures(&mut self) { + self.0 = self.0 & LIFECYCLE_MASK; + } + + pub fn lifecycle(&self) -> Lifecycle { + (self.0 & LIFECYCLE_MASK).into() + } + + pub fn set_lifecycle(&mut self, val: Lifecycle) { + self.0 = (self.0 & NUM_FUTURES_MASK) | (val as usize); + } + + pub fn is_terminated(&self) -> bool { + self.lifecycle() == Lifecycle::ShutdownNow && + self.num_futures() == 0 + } +} + +impl From for State { + fn from(src: usize) -> Self { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> Self { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("pool::State") + .field("lifecycle", &self.lifecycle()) + .field("num_futures", &self.num_futures()) + .finish() + } +} + +// ===== impl Lifecycle ===== + +impl From for Lifecycle { + fn from(src: usize) -> Lifecycle { + use self::Lifecycle::*; + + debug_assert!( + src == Running as usize || + src == ShutdownOnIdle as usize || + src == ShutdownNow as usize); + + unsafe { ::std::mem::transmute(src) } + } +} + +impl From for usize { + fn from(src: Lifecycle) -> usize { + let v = src as usize; + debug_assert!(v & LIFECYCLE_MASK == v); + v + } +} diff --git a/third_party/rust/tokio-threadpool/src/sender.rs b/third_party/rust/tokio-threadpool/src/sender.rs new file mode 100644 index 000000000000..d540e55894fe --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/sender.rs @@ -0,0 +1,250 @@ +use pool::{self, Pool, Lifecycle, MAX_FUTURES}; +use task::Task; + +use std::sync::Arc; +use std::sync::atomic::Ordering::{AcqRel, Acquire}; + +use tokio_executor::{self, SpawnError}; +use futures::{future, Future}; +#[cfg(feature = "unstable-futures")] +use futures2; +#[cfg(feature = "unstable-futures")] +use futures2_wake::{into_waker, Futures2Wake}; + +/// Submit futures to the associated thread pool for execution. +/// +/// A `Sender` instance is a handle to a single thread pool, allowing the owner +/// of the handle to spawn futures onto the thread pool. New futures are spawned +/// using [`Sender::spawn`]. +/// +/// The `Sender` handle is *only* used for spawning new futures. It does not +/// impact the lifecycle of the thread pool in any way. +/// +/// `Sender` instances are obtained by calling [`ThreadPool::sender`]. The +/// `Sender` struct implements the `Executor` trait. +/// +/// [`Sender::spawn`]: #method.spawn +/// [`ThreadPool::sender`]: struct.ThreadPool.html#method.sender +#[derive(Debug)] +pub struct Sender { + pub(crate) inner: Arc, +} + +impl Sender { + /// Spawn a future onto the thread pool + /// + /// This function takes ownership of the future and spawns it onto the + /// thread pool, assigning it to a worker thread. The exact strategy used to + /// assign a future to a worker depends on if the caller is already on a + /// worker thread or external to the thread pool. + /// + /// If the caller is currently on the thread pool, the spawned future will + /// be assigned to the same worker that the caller is on. If the caller is + /// external to the thread pool, the future will be assigned to a random + /// worker. + /// + /// If `spawn` returns `Ok`, this does not mean that the future will be + /// executed. The thread pool can be forcibly shutdown between the time + /// `spawn` is called and the future has a chance to execute. + /// + /// If `spawn` returns `Err`, then the future failed to be spawned. There + /// are two possible causes: + /// + /// * The thread pool is at capacity and is unable to spawn a new future. + /// This is a temporary failure. At some point in the future, the thread + /// pool might be able to spawn new futures. + /// * The thread pool is shutdown. This is a permanent failure indicating + /// that the handle will never be able to spawn new futures. + /// + /// The status of the thread pool can be queried before calling `spawn` + /// using the `status` function (part of the `Executor` trait). + /// + /// # Examples + /// + /// ```rust + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::ThreadPool; + /// use futures::future::{Future, lazy}; + /// + /// # pub fn main() { + /// // Create a thread pool with default configuration values + /// let thread_pool = ThreadPool::new(); + /// + /// thread_pool.sender().spawn(lazy(|| { + /// println!("called from a worker thread"); + /// Ok(()) + /// })).unwrap(); + /// + /// // Gracefully shutdown the threadpool + /// thread_pool.shutdown().wait().unwrap(); + /// # } + /// ``` + pub fn spawn(&self, future: F) -> Result<(), SpawnError> + where F: Future + Send + 'static, + { + let mut s = self; + tokio_executor::Executor::spawn(&mut s, Box::new(future)) + } + + /// Logic to prepare for spawning + fn prepare_for_spawn(&self) -> Result<(), SpawnError> { + let mut state: pool::State = self.inner.state.load(Acquire).into(); + + // Increment the number of futures spawned on the pool as well as + // validate that the pool is still running/ + loop { + let mut next = state; + + if next.num_futures() == MAX_FUTURES { + // No capacity + return Err(SpawnError::at_capacity()); + } + + if next.lifecycle() == Lifecycle::ShutdownNow { + // Cannot execute the future, executor is shutdown. + return Err(SpawnError::shutdown()); + } + + next.inc_num_futures(); + + let actual = self.inner.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + trace!("execute; count={:?}", next.num_futures()); + break; + } + + state = actual; + } + + Ok(()) + } +} + +impl tokio_executor::Executor for Sender { + fn status(&self) -> Result<(), tokio_executor::SpawnError> { + let s = self; + tokio_executor::Executor::status(&s) + } + + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + let mut s = &*self; + tokio_executor::Executor::spawn(&mut s, future) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, f: Task2) -> Result<(), futures2::executor::SpawnError> { + futures2::executor::Executor::spawn(self, f) + } +} + +impl<'a> tokio_executor::Executor for &'a Sender { + fn status(&self) -> Result<(), tokio_executor::SpawnError> { + let state: pool::State = self.inner.state.load(Acquire).into(); + + if state.num_futures() == MAX_FUTURES { + // No capacity + return Err(SpawnError::at_capacity()); + } + + if state.lifecycle() == Lifecycle::ShutdownNow { + // Cannot execute the future, executor is shutdown. + return Err(SpawnError::shutdown()); + } + + Ok(()) + } + + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + self.prepare_for_spawn()?; + + // At this point, the pool has accepted the future, so schedule it for + // execution. + + // Create a new task for the future + let task = Arc::new(Task::new(future)); + + self.inner.submit(task, &self.inner); + + Ok(()) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, f: Task2) -> Result<(), futures2::executor::SpawnError> { + futures2::executor::Executor::spawn(self, f) + } +} + +impl future::Executor for Sender +where T: Future + Send + 'static, +{ + fn execute(&self, future: T) -> Result<(), future::ExecuteError> { + if let Err(e) = tokio_executor::Executor::status(self) { + let kind = if e.is_at_capacity() { + future::ExecuteErrorKind::NoCapacity + } else { + future::ExecuteErrorKind::Shutdown + }; + + return Err(future::ExecuteError::new(kind, future)); + } + + let _ = self.spawn(future); + Ok(()) + } +} + +#[cfg(feature = "unstable-futures")] +type Task2 = Box + Send>; + +#[cfg(feature = "unstable-futures")] +impl futures2::executor::Executor for Sender { + fn spawn(&mut self, f: Task2) -> Result<(), futures2::executor::SpawnError> { + let mut s = &*self; + futures2::executor::Executor::spawn(&mut s, f) + } + + fn status(&self) -> Result<(), futures2::executor::SpawnError> { + let s = &*self; + futures2::executor::Executor::status(&s) + } +} + +#[cfg(feature = "unstable-futures")] +impl<'a> futures2::executor::Executor for &'a Sender { + fn spawn(&mut self, f: Task2) -> Result<(), futures2::executor::SpawnError> { + self.prepare_for_spawn() + // TODO: get rid of this once the futures crate adds more error types + .map_err(|_| futures2::executor::SpawnError::shutdown())?; + + // At this point, the pool has accepted the future, so schedule it for + // execution. + + // Create a new task for the future + let task = Task::new2(f, |id| into_waker(Arc::new(Futures2Wake::new(id, &self.inner)))); + + self.inner.submit(task, &self.inner); + + Ok(()) + } + + fn status(&self) -> Result<(), futures2::executor::SpawnError> { + tokio_executor::Executor::status(self) + // TODO: get rid of this once the futures crate adds more error types + .map_err(|_| futures2::executor::SpawnError::shutdown()) + } +} + +impl Clone for Sender { + #[inline] + fn clone(&self) -> Sender { + let inner = self.inner.clone(); + Sender { inner } + } +} diff --git a/third_party/rust/tokio-threadpool/src/shutdown.rs b/third_party/rust/tokio-threadpool/src/shutdown.rs new file mode 100644 index 000000000000..29f2a342f302 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/shutdown.rs @@ -0,0 +1,63 @@ +use pool::Pool; +use sender::Sender; + +use futures::{Future, Poll, Async}; +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Future that resolves when the thread pool is shutdown. +/// +/// A `ThreadPool` is shutdown once all the worker have drained their queues and +/// shutdown their threads. +/// +/// `Shutdown` is returned by [`shutdown`], [`shutdown_on_idle`], and +/// [`shutdown_now`]. +/// +/// [`shutdown`]: struct.ThreadPool.html#method.shutdown +/// [`shutdown_on_idle`]: struct.ThreadPool.html#method.shutdown_on_idle +/// [`shutdown_now`]: struct.ThreadPool.html#method.shutdown_now +#[derive(Debug)] +pub struct Shutdown { + pub(crate) inner: Sender, +} + +impl Shutdown { + fn inner(&self) -> &Pool { + &*self.inner.inner + } +} + +impl Future for Shutdown { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + use futures::task; + + self.inner().shutdown_task.task1.register_task(task::current()); + + if !self.inner().is_shutdown() { + return Ok(Async::NotReady); + } + + Ok(().into()) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::Future for Shutdown { + type Item = (); + type Error = (); + + fn poll(&mut self, cx: &mut futures2::task::Context) -> futures2::Poll<(), ()> { + trace!("Shutdown::poll"); + + self.inner().shutdown_task.task2.register(cx.waker()); + + if 0 != self.inner().num_workers.load(Acquire) { + return Ok(futures2::Async::Pending); + } + + Ok(().into()) + } +} diff --git a/third_party/rust/tokio-threadpool/src/shutdown_task.rs b/third_party/rust/tokio-threadpool/src/shutdown_task.rs new file mode 100644 index 000000000000..2d6e87d21669 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/shutdown_task.rs @@ -0,0 +1,24 @@ +use futures::task::AtomicTask; +#[cfg(feature = "unstable-futures")] +use futures2; + +#[derive(Debug)] +pub(crate) struct ShutdownTask { + pub task1: AtomicTask, + + #[cfg(feature = "unstable-futures")] + pub task2: futures2::task::AtomicWaker, +} + +impl ShutdownTask { + #[cfg(not(feature = "unstable-futures"))] + pub fn notify(&self) { + self.task1.notify(); + } + + #[cfg(feature = "unstable-futures")] + pub fn notify(&self) { + self.task1.notify(); + self.task2.wake(); + } +} diff --git a/third_party/rust/tokio-threadpool/src/task/blocking.rs b/third_party/rust/tokio-threadpool/src/task/blocking.rs new file mode 100644 index 000000000000..62eef5d9c7d5 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/task/blocking.rs @@ -0,0 +1,499 @@ +use pool::Pool; +use task::{Task, BlockingState}; + +use futures::{Poll, Async}; + +use std::cell::UnsafeCell; +use std::fmt; +use std::ptr; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{Acquire, Release, AcqRel, Relaxed}; +use std::thread; + +/// Manages the state around entering a blocking section and tasks that are +/// queued pending the ability to block. +/// +/// This is a hybrid counter and instrusive mpsc channel (like `Queue`). +#[derive(Debug)] +pub(crate) struct Blocking { + /// Queue head. + /// + /// This is either the current remaining capacity for blocking sections + /// **or** if the max has been reached, the head of a pending blocking + /// capacity channel of tasks. + /// + /// When this points to a task, it represents a strong reference, i.e. + /// `Arc`. + state: AtomicUsize, + + /// Tail pointer. This is `Arc` unless it points to `stub`. + tail: UnsafeCell<*mut Task>, + + /// Stub pointer, used as part of the intrusive mpsc channel algorithm + /// described by 1024cores. + stub: Box, + + /// The channel algorithm is MPSC. This means that, in order to pop tasks, + /// coordination is required. + /// + /// Since it doesn't matter *which* task pops & notifies the queued task, we + /// can avoid a full mutex and make the "lock" lock free. + /// + /// Instead, threads race to set the "entered" bit. When the transition is + /// successfully made, the thread has permission to pop tasks off of the + /// queue. If a thread loses the race, instead of waiting to pop a task, it + /// signals to the winning thread that it should pop an additional task. + lock: AtomicUsize, +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub(crate) enum CanBlock { + /// Blocking capacity has been allocated to this task. + /// + /// The capacity allocation is initially checked before a task is polled. If + /// capacity has been allocated, it is consumed and tracked as `Allocated`. + Allocated, + + /// Allocation capacity must be either available to the task when it is + /// polled or not available. This means that a task can only ask for + /// capacity once. This state is used to track a task that has not yet asked + /// for blocking capacity. When a task needs blocking capacity, if it is in + /// this state, it can immediately try to get an allocation. + CanRequest, + + /// The task has requested blocking capacity, but none is available. + NoCapacity, +} + +/// Decorates the `usize` value of `Blocking::state`, providing fns to +/// manipulate the state instead of requiring bit ops. +#[derive(Copy, Clone, Eq, PartialEq)] +struct State(usize); + +/// Flag differentiating between remaining capacity and task pointers. +/// +/// If we assume pointers are properly aligned, then the least significant bit +/// will always be zero. So, we use that bit to track if the value represents a +/// number. +const NUM_FLAG: usize = 1; + +/// When representing "numbers", the state has to be shifted this much (to get +/// rid of the flag bit). +const NUM_SHIFT: usize = 1; + +// ====== impl Blocking ===== +// +impl Blocking { + /// Create a new `Blocking`. + pub fn new(capacity: usize) -> Blocking { + assert!(capacity > 0, "blocking capacity must be greater than zero"); + + let stub = Box::new(Task::stub()); + let ptr = &*stub as *const _ as *mut _; + + // Allocations are aligned + debug_assert!(ptr as usize & NUM_FLAG == 0); + + // The initial state value. This starts at the max capacity. + let init = State::new(capacity); + + Blocking { + state: AtomicUsize::new(init.into()), + tail: UnsafeCell::new(ptr), + stub: stub, + lock: AtomicUsize::new(0), + } + } + + /// Atomically either acquire blocking capacity or queue the task to be + /// notified once capacity becomes available. + /// + /// The caller must ensure that `task` has not previously been queued to be + /// notified when capacity becomes available. + pub fn poll_blocking_capacity(&self, task: &Arc) -> Poll<(), ::BlockingError> { + // This requires atomically claiming blocking capacity and if none is + // available, queuing &task. + + // The task cannot be queued at this point. The caller must ensure this. + debug_assert!(!BlockingState::from(task.blocking.load(Acquire)).is_queued()); + + // Don't bump the ref count unless necessary. + let mut strong: Option<*const Task> = None; + + // Load the state + let mut curr: State = self.state.load(Acquire).into(); + + loop { + let mut next = curr; + + if !next.claim_capacity(&self.stub) { + debug_assert!(curr.ptr().is_some()); + + // Unable to claim capacity, so we must queue `task` onto the + // channel. + // + // This guard also serves to ensure that queuing work that is + // only needed to run once only gets run once. + if strong.is_none() { + // First, transition the task to a "queued" state. This + // prevents double queuing. + // + // This is also the only thread that can set the queued flag + // at this point. And, the goal is for this to only be + // visible when the task node is polled from the channel. + // The memory ordering is established by MPSC queue + // operation. + // + // Note that, if the task doesn't get queued (because the + // CAS fails and capacity is now available) then this flag + // must be unset. Again, there is no race because until the + // task is queued, no other thread can see it. + let prev = BlockingState::toggle_queued(&task.blocking, Relaxed); + debug_assert!(!prev.is_queued()); + + // Bump the ref count + strong = Some(Arc::into_raw(task.clone())); + + // Set the next pointer. This does not require an atomic + // operation as this node is not currently accessible to + // other threads via the queue. + task.next_blocking.store(ptr::null_mut(), Relaxed); + } + + let ptr = strong.unwrap(); + + // Update the head to point to the new node. We need to see the + // previous node in order to update the next pointer as well as + // release `task` to any other threads calling `push`. + next.set_ptr(ptr); + } + + debug_assert_ne!(curr.0, 0); + debug_assert_ne!(next.0, 0); + + let actual = self.state.compare_and_swap( + curr.into(), + next.into(), + AcqRel).into(); + + if curr == actual { + break; + } + + curr = actual; + } + + match curr.ptr() { + Some(prev) => { + let ptr = strong.unwrap(); + + // Finish pushing + unsafe { + (*prev).next_blocking + .store(ptr as *mut _, Release); + } + + // The node was queued to be notified once capacity is made + // available. + Ok(Async::NotReady) + } + None => { + debug_assert!(curr.remaining_capacity() > 0); + + // If `strong` is set, gotta undo a bunch of work + if let Some(ptr) = strong { + let _ = unsafe { Arc::from_raw(ptr) }; + + // Unset the queued flag. + let prev = BlockingState::toggle_queued(&task.blocking, Relaxed); + debug_assert!(prev.is_queued()); + } + + // Capacity has been obtained + Ok(().into()) + } + } + } + + unsafe fn push_stub(&self) { + let task: *mut Task = &*self.stub as *const _ as *mut _; + + // Set the next pointer. This does not require an atomic operation as + // this node is not accessible. The write will be flushed with the next + // operation + (*task).next_blocking.store(ptr::null_mut(), Relaxed); + + // Update the head to point to the new node. We need to see the previous + // node in order to update the next pointer as well as release `task` + // to any other threads calling `push`. + let prev = self.state.swap(task as usize, AcqRel); + + // The stub is only pushed when there are pending tasks. Because of + // this, the state must *always* be in pointer mode. + debug_assert!(State::from(prev).is_ptr()); + + let prev = prev as *const Task; + + // We don't want the *existing* pointer to be a stub. + debug_assert_ne!(prev, task); + + // Release `task` to the consume end. + (*prev).next_blocking.store(task, Release); + } + + pub fn notify_task(&self, pool: &Arc) { + let prev = self.lock.fetch_add(1, AcqRel); + + + if prev != 0 { + // Another thread has the lock and will be responsible for notifying + // pending tasks. + return; + } + + let mut dec = 1; + + loop { + let mut remaining_pops = dec; + while remaining_pops > 0 { + remaining_pops -= 1; + + let task = match self.pop(remaining_pops) { + Some(t) => t, + None => break, + }; + + Task::notify_blocking(task, pool); + } + + // Decrement the number of handled notifications + let actual = self.lock.fetch_sub(dec, AcqRel); + + if actual == dec { + break; + } + + // This can only be greater than expected as we are the only thread + // that is decrementing. + debug_assert!(actual > dec); + dec = actual - dec; + } + } + + /// Pop a task + /// + /// `rem` represents the remaining number of times the caller will pop. If + /// there are no more tasks to pop, `rem` is used to set the remaining + /// capacity. + fn pop(&self, rem: usize) -> Option> { + 'outer: + loop { + unsafe { + let mut tail = *self.tail.get(); + let mut next = (*tail).next_blocking.load(Acquire); + + let stub = &*self.stub as *const _ as *mut _; + + if tail == stub { + if next.is_null() { + // This loop is not part of the standard intrusive mpsc + // channel algorithm. This is where we atomically pop + // the last task and add `rem` to the remaining capacity. + // + // This modification to the pop algorithm works because, + // at this point, we have not done any work (only done + // reading). We have a *pretty* good idea that there is + // no concurrent pusher. + // + // The capacity is then atomically added by doing an + // AcqRel CAS on `state`. The `state` cell is the + // linchpin of the algorithm. + // + // By successfully CASing `head` w/ AcqRel, we ensure + // that, if any thread was racing and entered a push, we + // see that and abort pop, retrying as it is + // "inconsistent". + let mut curr: State = self.state.load(Acquire).into(); + + loop { + if curr.has_task(&self.stub) { + // Inconsistent state, yield the thread and try + // again. + thread::yield_now(); + continue 'outer; + } + + let mut after = curr; + + // +1 here because `rem` represents the number of + // pops that will come after the current one. + after.add_capacity(rem + 1, &self.stub); + + let actual: State = self.state.compare_and_swap( + curr.into(), + after.into(), + AcqRel).into(); + + if actual == curr { + // Successfully returned the remaining capacity + return None; + } + + curr = actual; + } + } + + *self.tail.get() = next; + tail = next; + next = (*next).next_blocking.load(Acquire); + } + + if !next.is_null() { + *self.tail.get() = next; + + // No ref_count inc is necessary here as this poll is paired + // with a `push` which "forgets" the handle. + return Some(Arc::from_raw(tail)); + } + + let state = self.state.load(Acquire); + + // This must always be a pointer + debug_assert!(State::from(state).is_ptr()); + + if state != tail as usize { + // Try again + thread::yield_now(); + continue 'outer; + } + + self.push_stub(); + + next = (*tail).next_blocking.load(Acquire); + + if !next.is_null() { + *self.tail.get() = next; + + return Some(Arc::from_raw(tail)); + } + + thread::yield_now(); + // Try again + } + } + } +} + +// ====== impl State ===== + +impl State { + /// Return a new `State` representing the remaining capacity at the maximum + /// value. + fn new(capacity: usize) -> State { + State((capacity << NUM_SHIFT) | NUM_FLAG) + } + + fn remaining_capacity(&self) -> usize { + if !self.has_remaining_capacity() { + return 0; + } + + self.0 >> 1 + } + + fn has_remaining_capacity(&self) -> bool { + self.0 & NUM_FLAG == NUM_FLAG + } + + fn has_task(&self, stub: &Task) -> bool { + !(self.has_remaining_capacity() || self.is_stub(stub)) + } + + fn is_stub(&self, stub: &Task) -> bool { + self.0 == stub as *const _ as usize + } + + /// Try to claim blocking capacity. + /// + /// # Return + /// + /// Returns `true` if the capacity was claimed, `false` otherwise. If + /// `false` is returned, it can be assumed that `State` represents the head + /// pointer in the mpsc channel. + fn claim_capacity(&mut self, stub: &Task) -> bool { + if !self.has_remaining_capacity() { + return false; + } + + debug_assert!(self.0 != 1); + + self.0 -= 1 << NUM_SHIFT; + + if self.0 == NUM_FLAG { + // Set the state to the stub pointer. + self.0 = stub as *const _ as usize; + } + + true + } + + /// Add blocking capacity. + fn add_capacity(&mut self, capacity: usize, stub: &Task) -> bool { + debug_assert!(capacity > 0); + + if self.is_stub(stub) { + self.0 = (capacity << NUM_SHIFT) | NUM_FLAG; + true + } else if self.has_remaining_capacity() { + self.0 += capacity << NUM_SHIFT; + true + } else { + false + } + } + + fn is_ptr(&self) -> bool { + self.0 & NUM_FLAG == 0 + } + + fn ptr(&self) -> Option<*const Task> { + if self.is_ptr() { + Some(self.0 as *const Task) + } else { + None + } + } + + fn set_ptr(&mut self, ptr: *const Task) { + let ptr = ptr as usize; + debug_assert!(ptr & NUM_FLAG == 0); + self.0 = ptr + } +} + +impl From for State { + fn from(src: usize) -> State { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> usize { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut fmt = fmt.debug_struct("State"); + + if self.is_ptr() { + fmt.field("ptr", &self.0); + } else { + fmt.field("remaining", &self.remaining_capacity()); + } + + fmt.finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/task/blocking_state.rs b/third_party/rust/tokio-threadpool/src/task/blocking_state.rs new file mode 100644 index 000000000000..b41fc4868dec --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/task/blocking_state.rs @@ -0,0 +1,89 @@ +use task::CanBlock; + +use std::fmt; +use std::sync::atomic::{AtomicUsize, Ordering}; + +/// State tracking task level state to support `blocking`. +/// +/// This tracks two separate flags. +/// +/// a) If the task is queued in the pending blocking channel. This prevents +/// double queuing (which would break the linked list). +/// +/// b) If the task has been allocated capacity to block. +#[derive(Eq, PartialEq)] +pub(crate) struct BlockingState(usize); + +const QUEUED: usize = 0b01; +const ALLOCATED: usize = 0b10; + +impl BlockingState { + /// Create a new, default, `BlockingState`. + pub fn new() -> BlockingState { + BlockingState(0) + } + + /// Returns `true` if the state represents the associated task being queued + /// in the pending blocking capacity channel + pub fn is_queued(&self) -> bool { + self.0 & QUEUED == QUEUED + } + + /// Toggle the queued flag + /// + /// Returns the state before the flag has been toggled. + pub fn toggle_queued(state: &AtomicUsize, ordering: Ordering) -> BlockingState { + state.fetch_xor(QUEUED, ordering).into() + } + + /// Returns `true` if the state represents the associated task having been + /// allocated capacity to block. + pub fn is_allocated(&self) -> bool { + self.0 & ALLOCATED == ALLOCATED + } + + /// Atomically consume the capacity allocation and return if the allocation + /// was present. + /// + /// If this returns `true`, then the task has the ability to block for the + /// duration of the `poll`. + pub fn consume_allocation(state: &AtomicUsize, ordering: Ordering) -> CanBlock { + let state: Self = state.fetch_and(!ALLOCATED, ordering).into(); + + if state.is_allocated() { + CanBlock::Allocated + } else if state.is_queued() { + CanBlock::NoCapacity + } else { + CanBlock::CanRequest + } + } + + pub fn notify_blocking(state: &AtomicUsize, ordering: Ordering) { + let prev: Self = state.fetch_xor(ALLOCATED | QUEUED, ordering).into(); + + debug_assert!(prev.is_queued()); + debug_assert!(!prev.is_allocated()); + } +} + +impl From for BlockingState { + fn from(src: usize) -> BlockingState { + BlockingState(src) + } +} + +impl From for usize { + fn from(src: BlockingState) -> usize { + src.0 + } +} + +impl fmt::Debug for BlockingState { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("BlockingState") + .field("is_queued", &self.is_queued()) + .field("is_allocated", &self.is_allocated()) + .finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/task/mod.rs b/third_party/rust/tokio-threadpool/src/task/mod.rs new file mode 100644 index 000000000000..84309ffd67bd --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/task/mod.rs @@ -0,0 +1,304 @@ +mod blocking; +mod blocking_state; +mod queue; +mod state; + +pub(crate) use self::blocking::{Blocking, CanBlock}; +pub(crate) use self::queue::{Queue, Poll}; +use self::blocking_state::BlockingState; +use self::state::State; + +use notifier::Notifier; +use pool::Pool; +use sender::Sender; + +use futures::{self, Future, Async}; +use futures::executor::{self, Spawn}; + +use std::{fmt, panic, ptr}; +use std::cell::{UnsafeCell}; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, AtomicPtr}; +use std::sync::atomic::Ordering::{AcqRel, Release, Relaxed}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Harness around a future. +/// +/// This also behaves as a node in the inbound work queue and the blocking +/// queue. +pub(crate) struct Task { + /// Task lifecycle state + state: AtomicUsize, + + /// Task blocking related state + blocking: AtomicUsize, + + /// Next pointer in the queue that submits tasks to a worker. + next: AtomicPtr, + + /// Next pointer in the queue of tasks pending blocking capacity. + next_blocking: AtomicPtr, + + /// Store the future at the head of the struct + /// + /// The future is dropped immediately when it transitions to Complete + future: UnsafeCell>, +} + +#[derive(Debug)] +pub(crate) enum Run { + Idle, + Schedule, + Complete, +} + +type BoxFuture = Box + Send + 'static>; + +#[cfg(feature = "unstable-futures")] +type BoxFuture2 = Box + Send>; + +enum TaskFuture { + Futures1(Spawn), + + #[cfg(feature = "unstable-futures")] + Futures2 { + tls: futures2::task::LocalMap, + waker: futures2::task::Waker, + fut: BoxFuture2, + } +} + +// ===== impl Task ===== + +impl Task { + /// Create a new `Task` as a harness for `future`. + pub fn new(future: BoxFuture) -> Task { + // Wrap the future with an execution context. + let task_fut = TaskFuture::Futures1(executor::spawn(future)); + + Task { + state: AtomicUsize::new(State::new().into()), + blocking: AtomicUsize::new(BlockingState::new().into()), + next: AtomicPtr::new(ptr::null_mut()), + next_blocking: AtomicPtr::new(ptr::null_mut()), + future: UnsafeCell::new(Some(task_fut)), + } + } + + /// Create a new `Task` as a harness for a futures 0.2 `future`. + #[cfg(feature = "unstable-futures")] + pub fn new2(fut: BoxFuture2, make_waker: F) -> Task + where F: FnOnce(usize) -> futures2::task::Waker + { + let mut inner = Box::new(Task { + state: AtomicUsize::new(State::new().into()), + blocking: AtomicUsize::new(BlockingState::new().into()), + next: AtomicPtr::new(ptr::null_mut()), + next_blocking: AtomicPtr::new(ptr::null_mut()), + future: None, + }); + + let waker = make_waker((&*inner) as *const _ as usize); + let tls = futures2::task::LocalMap::new(); + inner.future = Some(TaskFuture::Futures2 { waker, tls, fut }); + + Task { ptr: Box::into_raw(inner) } + } + + /// Create a fake `Task` to be used as part of the intrusive mpsc channel + /// algorithm. + fn stub() -> Task { + let future = Box::new(futures::empty()); + let task_fut = TaskFuture::Futures1(executor::spawn(future)); + + Task { + state: AtomicUsize::new(State::stub().into()), + blocking: AtomicUsize::new(BlockingState::new().into()), + next: AtomicPtr::new(ptr::null_mut()), + next_blocking: AtomicPtr::new(ptr::null_mut()), + future: UnsafeCell::new(Some(task_fut)), + } + } + + /// Execute the task returning `Run::Schedule` if the task needs to be + /// scheduled again. + pub fn run(&self, unpark: &Arc, exec: &mut Sender) -> Run { + use self::State::*; + + // Transition task to running state. At this point, the task must be + // scheduled. + let actual: State = self.state.compare_and_swap( + Scheduled.into(), Running.into(), AcqRel).into(); + + match actual { + Scheduled => {}, + _ => panic!("unexpected task state; {:?}", actual), + } + + trace!("Task::run; state={:?}", State::from(self.state.load(Relaxed))); + + // The transition to `Running` done above ensures that a lock on the + // future has been obtained. + let fut = unsafe { &mut (*self.future.get()) }; + + // This block deals with the future panicking while being polled. + // + // If the future panics, then the drop handler must be called such that + // `thread::panicking() -> true`. To do this, the future is dropped from + // within the catch_unwind block. + let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { + struct Guard<'a>(&'a mut Option, bool); + + impl<'a> Drop for Guard<'a> { + fn drop(&mut self) { + // This drops the future + if self.1 { + let _ = self.0.take(); + } + } + } + + let mut g = Guard(fut, true); + + let ret = g.0.as_mut().unwrap() + .poll(unpark, self as *const _ as usize, exec); + + + g.1 = false; + + ret + })); + + match res { + Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => { + trace!(" -> task complete"); + + // The future has completed. Drop it immediately to free + // resources and run drop handlers. + // + // The `Task` harness will stay around longer if it is contained + // by any of the various queues. + self.drop_future(); + + // Transition to the completed state + self.state.store(State::Complete.into(), Release); + + Run::Complete + } + Ok(Ok(Async::NotReady)) => { + trace!(" -> not ready"); + + // Attempt to transition from Running -> Idle, if successful, + // then the task does not need to be scheduled again. If the CAS + // fails, then the task has been unparked concurrent to running, + // in which case it transitions immediately back to scheduled + // and we return `true`. + let prev: State = self.state.compare_and_swap( + Running.into(), Idle.into(), AcqRel).into(); + + match prev { + Running => Run::Idle, + Notified => { + self.state.store(Scheduled.into(), Release); + Run::Schedule + } + _ => unreachable!(), + } + } + } + } + + /// Notify the task + pub fn notify(me: Arc, pool: &Arc) { + if me.schedule(){ + let _ = pool.submit(me, pool); + } + } + + /// Notify the task it has been allocated blocking capacity + pub fn notify_blocking(me: Arc, pool: &Arc) { + BlockingState::notify_blocking(&me.blocking, AcqRel); + Task::notify(me, pool); + } + + /// Transition the task state to scheduled. + /// + /// Returns `true` if the caller is permitted to schedule the task. + pub fn schedule(&self) -> bool { + use self::State::*; + + loop { + // Scheduling can only be done from the `Idle` state. + let actual = self.state.compare_and_swap( + Idle.into(), + Scheduled.into(), + AcqRel).into(); + + match actual { + Idle => return true, + Running => { + // The task is already running on another thread. Transition + // the state to `Notified`. If this CAS fails, then restart + // the logic again from `Idle`. + let actual = self.state.compare_and_swap( + Running.into(), Notified.into(), AcqRel).into(); + + match actual { + Idle => continue, + _ => return false, + } + } + Complete | Notified | Scheduled => return false, + } + } + } + + /// Consumes any allocated capacity to block. + /// + /// Returns `true` if capacity was allocated, `false` otherwise. + pub fn consume_blocking_allocation(&self) -> CanBlock { + // This flag is the primary point of coordination. The queued flag + // happens "around" setting the blocking capacity. + BlockingState::consume_allocation(&self.blocking, AcqRel) + } + + /// Drop the future + /// + /// This must only be called by the thread that successfully transitioned + /// the future state to `Running`. + fn drop_future(&self) { + let _ = unsafe { (*self.future.get()).take() }; + } +} + +impl fmt::Debug for Task { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Task") + .field("next", &self.next) + .field("state", &self.state) + .field("future", &"Spawn") + .finish() + } +} + +// ===== impl TaskFuture ===== + +impl TaskFuture { + #[allow(unused_variables)] + fn poll(&mut self, unpark: &Arc, id: usize, exec: &mut Sender) -> futures::Poll<(), ()> { + match *self { + TaskFuture::Futures1(ref mut fut) => fut.poll_future_notify(unpark, id), + + #[cfg(feature = "unstable-futures")] + TaskFuture::Futures2 { ref mut fut, ref waker, ref mut tls } => { + let mut cx = futures2::task::Context::new(tls, waker, exec); + match fut.poll(&mut cx).unwrap() { + futures2::Async::Pending => Ok(Async::NotReady), + futures2::Async::Ready(x) => Ok(Async::Ready(x)), + } + } + } + } +} diff --git a/third_party/rust/tokio-threadpool/src/task/queue.rs b/third_party/rust/tokio-threadpool/src/task/queue.rs new file mode 100644 index 000000000000..d91c564bc619 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/task/queue.rs @@ -0,0 +1,115 @@ +use task::Task; + +use std::cell::UnsafeCell; +use std::ptr; +use std::sync::Arc; +use std::sync::atomic::AtomicPtr; +use std::sync::atomic::Ordering::{Acquire, Release, AcqRel, Relaxed}; + +#[derive(Debug)] +pub(crate) struct Queue { + /// Queue head. + /// + /// This is a strong reference to `Task` (i.e, `Arc`) + head: AtomicPtr, + + /// Tail pointer. This is `Arc` unless it points to `stub`. + tail: UnsafeCell<*mut Task>, + + /// Stub pointer, used as part of the intrusive mpsc channel algorithm + /// described by 1024cores. + stub: Box, +} + +#[derive(Debug)] +pub(crate) enum Poll { + Empty, + Inconsistent, + Data(Arc), +} + +// ===== impl Queue ===== + +impl Queue { + /// Create a new, empty, `Queue`. + pub fn new() -> Queue { + let stub = Box::new(Task::stub()); + let ptr = &*stub as *const _ as *mut _; + + Queue { + head: AtomicPtr::new(ptr), + tail: UnsafeCell::new(ptr), + stub: stub, + } + } + + /// Push a task onto the queue. + /// + /// This function is `Sync`. + pub fn push(&self, task: Arc) { + unsafe { + self.push2(Arc::into_raw(task)); + } + } + + unsafe fn push2(&self, task: *const Task) { + let task = task as *mut Task; + + // Set the next pointer. This does not require an atomic operation as + // this node is not accessible. The write will be flushed with the next + // operation + (*task).next.store(ptr::null_mut(), Relaxed); + + // Update the head to point to the new node. We need to see the previous + // node in order to update the next pointer as well as release `task` + // to any other threads calling `push`. + let prev = self.head.swap(task, AcqRel); + + // Release `task` to the consume end. + (*prev).next.store(task, Release); + } + + /// Poll a task from the queue. + /// + /// This function is **not** `Sync` and requires coordination by the caller. + pub unsafe fn poll(&self) -> Poll { + let mut tail = *self.tail.get(); + let mut next = (*tail).next.load(Acquire); + + let stub = &*self.stub as *const _ as *mut _; + + if tail == stub { + if next.is_null() { + return Poll::Empty; + } + + *self.tail.get() = next; + tail = next; + next = (*next).next.load(Acquire); + } + + if !next.is_null() { + *self.tail.get() = next; + + // No ref_count inc is necessary here as this poll is paired + // with a `push` which "forgets" the handle. + return Poll::Data(Arc::from_raw(tail)); + } + + if self.head.load(Acquire) != tail { + return Poll::Inconsistent; + } + + self.push2(stub); + + next = (*tail).next.load(Acquire); + + if !next.is_null() { + *self.tail.get() = next; + + return Poll::Data(Arc::from_raw(tail)); + } + + Poll::Inconsistent + } +} diff --git a/third_party/rust/tokio-threadpool/src/task/state.rs b/third_party/rust/tokio-threadpool/src/task/state.rs new file mode 100644 index 000000000000..9023eec5fbb1 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/task/state.rs @@ -0,0 +1,52 @@ +#[repr(usize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub(crate) enum State { + /// Task is currently idle + Idle = 0, + + /// Task is currently running + Running = 1, + + /// Task is currently running, but has been notified that it must run again. + Notified = 2, + + /// Task has been scheduled + Scheduled = 3, + + /// Task is complete + Complete = 4, +} + +// ===== impl State ===== + +impl State { + /// Returns the initial task state. + /// + /// Tasks start in the scheduled state as they are immediately scheduled on + /// creation. + pub fn new() -> State { + State::Scheduled + } + + pub fn stub() -> State { + State::Idle + } +} + +impl From for State { + fn from(src: usize) -> Self { + use self::State::*; + + debug_assert!( + src >= Idle as usize && + src <= Complete as usize, "actual={}", src); + + unsafe { ::std::mem::transmute(src) } + } +} + +impl From for usize { + fn from(src: State) -> Self { + src as usize + } +} diff --git a/third_party/rust/tokio-threadpool/src/thread_pool.rs b/third_party/rust/tokio-threadpool/src/thread_pool.rs new file mode 100644 index 000000000000..cb2e960a24a6 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/thread_pool.rs @@ -0,0 +1,134 @@ +use builder::Builder; +use pool::Pool; +use sender::Sender; +use shutdown::Shutdown; + +use futures::Future; + +/// Work-stealing based thread pool for executing futures. +/// +/// If a `ThreadPool` instance is dropped without explicitly being shutdown, +/// `shutdown_now` is called implicitly, forcing all tasks that have not yet +/// completed to be dropped. +/// +/// Create `ThreadPool` instances using `Builder`. +#[derive(Debug)] +pub struct ThreadPool { + pub(crate) inner: Option, +} + +impl ThreadPool { + /// Create a new `ThreadPool` with default values. + /// + /// Use [`Builder`] for creating a configured thread pool. + /// + /// [`Builder`]: struct.Builder.html + pub fn new() -> ThreadPool { + Builder::new().build() + } + + /// Spawn a future onto the thread pool. + /// + /// This function takes ownership of the future and randomly assigns it to a + /// worker thread. The thread will then start executing the future. + /// + /// # Examples + /// + /// ```rust + /// # extern crate tokio_threadpool; + /// # extern crate futures; + /// # use tokio_threadpool::ThreadPool; + /// use futures::future::{Future, lazy}; + /// + /// # pub fn main() { + /// // Create a thread pool with default configuration values + /// let thread_pool = ThreadPool::new(); + /// + /// thread_pool.spawn(lazy(|| { + /// println!("called from a worker thread"); + /// Ok(()) + /// })); + /// + /// // Gracefully shutdown the threadpool + /// thread_pool.shutdown().wait().unwrap(); + /// # } + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Use [`Sender::spawn`] for a + /// version that returns a `Result` instead of panicking. + pub fn spawn(&self, future: F) + where F: Future + Send + 'static, + { + self.sender().spawn(future).unwrap(); + } + + /// Return a reference to the sender handle + /// + /// The handle is used to spawn futures onto the thread pool. It also + /// implements the `Executor` trait. + pub fn sender(&self) -> &Sender { + self.inner.as_ref().unwrap() + } + + /// Return a mutable reference to the sender handle + pub fn sender_mut(&mut self) -> &mut Sender { + self.inner.as_mut().unwrap() + } + + /// Shutdown the pool once it becomes idle. + /// + /// Idle is defined as the completion of all futures that have been spawned + /// onto the thread pool. There may still be outstanding handles when the + /// thread pool reaches an idle state. + /// + /// Once the idle state is reached, calling `spawn` on any outstanding + /// handle will result in an error. All worker threads are signaled and will + /// shutdown. The returned future completes once all worker threads have + /// completed the shutdown process. + pub fn shutdown_on_idle(mut self) -> Shutdown { + self.inner().shutdown(false, false); + Shutdown { inner: self.inner.take().unwrap() } + } + + /// Shutdown the pool + /// + /// This prevents the thread pool from accepting new tasks but will allow + /// any existing tasks to complete. + /// + /// Calling `spawn` on any outstanding handle will result in an error. All + /// worker threads are signaled and will shutdown. The returned future + /// completes once all worker threads have completed the shutdown process. + pub fn shutdown(mut self) -> Shutdown { + self.inner().shutdown(true, false); + Shutdown { inner: self.inner.take().unwrap() } + } + + /// Shutdown the pool immediately + /// + /// This will prevent the thread pool from accepting new tasks **and** + /// abort any tasks that are currently running on the thread pool. + /// + /// Calling `spawn` on any outstanding handle will result in an error. All + /// worker threads are signaled and will shutdown. The returned future + /// completes once all worker threads have completed the shutdown process. + pub fn shutdown_now(mut self) -> Shutdown { + self.inner().shutdown(true, true); + Shutdown { inner: self.inner.take().unwrap() } + } + + fn inner(&self) -> &Pool { + &*self.inner.as_ref().unwrap().inner + } +} + +impl Drop for ThreadPool { + fn drop(&mut self) { + if let Some(sender) = self.inner.take() { + sender.inner.shutdown(true, true); + let shutdown = Shutdown { inner: sender }; + let _ = shutdown.wait(); + } + } +} diff --git a/third_party/rust/tokio-threadpool/src/worker/entry.rs b/third_party/rust/tokio-threadpool/src/worker/entry.rs new file mode 100644 index 000000000000..752ad2955a57 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/worker/entry.rs @@ -0,0 +1,249 @@ +use park::{BoxPark, BoxUnpark}; +use task::{Task, Queue}; +use worker::state::{State, PUSHED_MASK}; + +use std::cell::UnsafeCell; +use std::fmt; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::Ordering::{Acquire, AcqRel, Relaxed}; + +use deque; + +// TODO: None of the fields should be public +// +// It would also be helpful to split up the state across what fields / +// operations are thread-safe vs. which ones require ownership of the worker. +pub(crate) struct WorkerEntry { + // Worker state. This is mutated when notifying the worker. + // + // The `usize` value is deserialized to a `worker::State` instance. See + // comments on that type. + pub state: AtomicUsize, + + // Next entry in the parked Trieber stack + next_sleeper: UnsafeCell, + + // Worker half of deque + deque: deque::Deque>, + + // Stealer half of deque + steal: deque::Stealer>, + + // Thread parker + pub park: UnsafeCell, + + // Thread unparker + pub unpark: BoxUnpark, + + // MPSC queue of jobs submitted to the worker from an external source. + pub inbound: Queue, +} + +impl WorkerEntry { + pub fn new(park: BoxPark, unpark: BoxUnpark) -> Self { + let w = deque::Deque::new(); + let s = w.stealer(); + + WorkerEntry { + state: AtomicUsize::new(State::default().into()), + next_sleeper: UnsafeCell::new(0), + deque: w, + steal: s, + inbound: Queue::new(), + park: UnsafeCell::new(park), + unpark, + } + } + + /// Atomically load the worker's state + /// + /// # Ordering + /// + /// An `Acquire` ordering is established on the entry's state variable. + pub fn load_state(&self) -> State { + self.state.load(Acquire).into() + } + + /// Atomically unset the pushed flag. + /// + /// # Return + /// + /// The state *before* the push flag is unset. + /// + /// # Ordering + /// + /// The specified ordering is established on the entry's state variable. + pub fn fetch_unset_pushed(&self, ordering: Ordering) -> State { + self.state.fetch_and(!PUSHED_MASK, ordering).into() + } + + /// Submit a task to this worker while currently on the same thread that is + /// running the worker. + #[inline] + pub fn submit_internal(&self, task: Arc) { + self.push_internal(task); + } + + /// Submits a task to the worker. This assumes that the caller is external + /// to the worker. Internal submissions go through another path. + /// + /// Returns `false` if the worker needs to be spawned. + /// + /// # Ordering + /// + /// The `state` must have been obtained with an `Acquire` ordering. + pub fn submit_external(&self, task: Arc, mut state: State) -> bool { + use worker::Lifecycle::*; + + // Push the task onto the external queue + self.push_external(task); + + loop { + let mut next = state; + next.notify(); + + let actual = self.state.compare_and_swap( + state.into(), next.into(), + AcqRel).into(); + + if state == actual { + break; + } + + state = actual; + } + + match state.lifecycle() { + Sleeping => { + // The worker is currently sleeping, the condition variable must + // be signaled + self.wakeup(); + true + } + Shutdown => false, + Running | Notified | Signaled => { + // In these states, the worker is active and will eventually see + // the task that was just submitted. + true + } + } + } + + /// Signals to the worker that it should stop + /// + /// `state` is the last observed state for the worker. This allows skipping + /// the initial load from the state atomic. + /// + /// # Return + /// + /// Returns `Ok` when the worker was successfully signaled. + /// + /// Returns `Err` if the worker has already terminated. + pub fn signal_stop(&self, mut state: State) { + use worker::Lifecycle::*; + + // Transition the worker state to signaled + loop { + let mut next = state; + + match state.lifecycle() { + Shutdown => { + return; + } + Running | Sleeping => {} + Notified | Signaled => { + // These two states imply that the worker is active, thus it + // will eventually see the shutdown signal, so we don't need + // to do anything. + // + // The worker is forced to see the shutdown signal + // eventually as: + // + // a) No more work will arrive + // b) The shutdown signal is stored as the head of the + // sleep, stack which will prevent the worker from going to + // sleep again. + return; + } + } + + next.set_lifecycle(Signaled); + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + break; + } + + state = actual; + } + + // Wakeup the worker + self.wakeup(); + } + + /// Pop a task + /// + /// This **must** only be called by the thread that owns the worker entry. + /// This function is not `Sync`. + pub fn pop_task(&self) -> deque::Steal> { + self.deque.steal() + } + + /// Steal a task + /// + /// This is called by *other* workers to steal a task for processing. This + /// function is `Sync`. + pub fn steal_task(&self) -> deque::Steal> { + self.steal.steal() + } + + /// Drain (and drop) all tasks that are queued for work. + /// + /// This is called when the pool is shutting down. + pub fn drain_tasks(&self) { + while let Some(_) = self.deque.pop() { + } + } + + #[inline] + fn push_external(&self, task: Arc) { + self.inbound.push(task); + } + + #[inline] + pub fn push_internal(&self, task: Arc) { + self.deque.push(task); + } + + #[inline] + pub fn wakeup(&self) { + self.unpark.unpark(); + } + + #[inline] + pub fn next_sleeper(&self) -> usize { + unsafe { *self.next_sleeper.get() } + } + + #[inline] + pub fn set_next_sleeper(&self, val: usize) { + unsafe { *self.next_sleeper.get() = val; } + } +} + +impl fmt::Debug for WorkerEntry { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("WorkerEntry") + .field("state", &self.state.load(Relaxed)) + .field("next_sleeper", &"UnsafeCell") + .field("deque", &self.deque) + .field("steal", &self.steal) + .field("park", &"UnsafeCell") + .field("unpark", &"BoxUnpark") + .field("inbound", &self.inbound) + .finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/worker/mod.rs b/third_party/rust/tokio-threadpool/src/worker/mod.rs new file mode 100644 index 000000000000..35feae6b7d7f --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/worker/mod.rs @@ -0,0 +1,890 @@ +mod entry; +mod stack; +mod state; + +pub(crate) use self::entry::{ + WorkerEntry as Entry, +}; +pub(crate) use self::stack::Stack; +pub(crate) use self::state::{ + State, + Lifecycle, +}; + +use pool::{self, Pool, BackupId}; +use notifier::Notifier; +use sender::Sender; +use task::{self, Task, CanBlock}; + +use tokio_executor; + +use futures::{Poll, Async}; + +use std::cell::Cell; +use std::marker::PhantomData; +use std::rc::Rc; +use std::sync::atomic::Ordering::{AcqRel, Acquire}; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +/// Thread worker +/// +/// This is passed to the [`around_worker`] callback set on [`Builder`]. This +/// callback is only expected to call [`run`] on it. +/// +/// [`Builder`]: struct.Builder.html +/// [`around_worker`]: struct.Builder.html#method.around_worker +/// [`run`]: struct.Worker.html#method.run +#[derive(Debug)] +pub struct Worker { + // Shared scheduler data + pub(crate) inner: Arc, + + // WorkerEntry index + pub(crate) id: WorkerId, + + // Backup thread ID assigned to processing this worker. + backup_id: BackupId, + + // Set to the task that is currently being polled by the worker. This is + // needed so that `blocking` blocks are able to interact with this task. + // + // This has to be a raw pointer to make it compile, but great care is taken + // when this is set. + current_task: CurrentTask, + + // Set when the thread is in blocking mode. + is_blocking: Cell, + + // Set when the worker should finalize on drop + should_finalize: Cell, + + // Keep the value on the current thread. + _p: PhantomData>, +} + +/// Tracks the state related to the currently running task. +#[derive(Debug)] +struct CurrentTask { + /// This has to be a raw pointer to make it compile, but great care is taken + /// when this is set. + task: Cell>>, + + /// Tracks the blocking capacity allocation state. + can_block: Cell, +} + +/// Identifies a thread pool worker. +/// +/// This identifier is unique scoped by the thread pool. It is possible that +/// different thread pool instances share worker identifier values. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct WorkerId(pub(crate) usize); + +// Pointer to the current worker info +thread_local!(static CURRENT_WORKER: Cell<*const Worker> = Cell::new(0 as *const _)); + +impl Worker { + pub(crate) fn new(id: WorkerId, backup_id: BackupId, inner: Arc) -> Worker { + Worker { + inner, + id, + backup_id, + current_task: CurrentTask::new(), + is_blocking: Cell::new(false), + should_finalize: Cell::new(false), + _p: PhantomData, + } + } + + pub(crate) fn is_blocking(&self) -> bool { + self.is_blocking.get() + } + + /// Run the worker + /// + /// Returns `true` if the thread should keep running as a `backup` thread. + pub(crate) fn do_run(&self) -> bool { + // Create another worker... It's ok, this is just a new type around + // `Pool` that is expected to stay on the current thread. + CURRENT_WORKER.with(|c| { + c.set(self as *const _); + + let inner = self.inner.clone(); + let mut sender = Sender { inner }; + + // Enter an execution context + let mut enter = tokio_executor::enter().unwrap(); + + tokio_executor::with_default(&mut sender, &mut enter, |enter| { + if let Some(ref callback) = self.inner.config.around_worker { + callback.call(self, enter); + } else { + self.run(); + } + }); + }); + + // Can't be in blocking mode and finalization mode + debug_assert!(!self.is_blocking.get() || !self.should_finalize.get()); + + self.is_blocking.get() + } + + pub(crate) fn with_current) -> R, R>(f: F) -> R { + CURRENT_WORKER.with(move |c| { + let ptr = c.get(); + + if ptr.is_null() { + f(None) + } else { + f(Some(unsafe { &*ptr })) + } + }) + } + + /// Transition the current worker to a blocking worker + pub(crate) fn transition_to_blocking(&self) -> Poll<(), ::BlockingError> { + use self::CanBlock::*; + + // If we get this far, then `current_task` has been set. + let task_ref = self.current_task.get_ref(); + + // First step is to acquire blocking capacity for the task. + match self.current_task.can_block() { + // Capacity to block has already been allocated to this task. + Allocated => {} + + // The task has already requested capacity to block, but there is + // none yet available. + NoCapacity => return Ok(Async::NotReady), + + // The task has yet to ask for capacity + CanRequest => { + // Atomically attempt to acquire blocking capacity, and if none + // is available, register the task to be notified once capacity + // becomes available. + match self.inner.poll_blocking_capacity(task_ref)? { + Async::Ready(()) => { + self.current_task.set_can_block(Allocated); + } + Async::NotReady => { + self.current_task.set_can_block(NoCapacity); + return Ok(Async::NotReady); + } + } + } + } + + // The task has been allocated blocking capacity. At this point, this is + // when the current thread transitions from a worker to a backup thread. + // To do so requires handing over the worker to another backup thread. + + if self.is_blocking.get() { + // The thread is already in blocking mode, so there is nothing else + // to do. Return `Ready` and allow the caller to block the thread. + return Ok(().into()); + } + + trace!("transition to blocking state"); + + // Transitioning to blocking requires handing over the worker state to + // another thread so that the work queue can continue to be processed. + + self.inner.spawn_thread(self.id.clone(), &self.inner); + + // Track that the thread has now fully entered the blocking state. + self.is_blocking.set(true); + + Ok(().into()) + } + + /// Transition from blocking + pub(crate) fn transition_from_blocking(&self) { + // TODO: Attempt to take ownership of the worker again. + } + + /// Returns a reference to the worker's identifier. + /// + /// This identifier is unique scoped by the thread pool. It is possible that + /// different thread pool instances share worker identifier values. + pub fn id(&self) -> &WorkerId { + &self.id + } + + /// Run the worker + /// + /// This function blocks until the worker is shutting down. + pub fn run(&self) { + const LIGHT_SLEEP_INTERVAL: usize = 32; + + // Get the notifier. + let notify = Arc::new(Notifier { + inner: Arc::downgrade(&self.inner), + }); + let mut sender = Sender { inner: self.inner.clone() }; + + let mut first = true; + let mut spin_cnt = 0; + let mut tick = 0; + + while self.check_run_state(first) { + first = false; + + // Poll inbound until empty, transferring all tasks to the internal + // queue. + let consistent = self.drain_inbound(); + + // Run the next available task + if self.try_run_task(¬ify, &mut sender) { + if self.is_blocking.get() { + // Exit out of the run state + return; + } + + if tick % LIGHT_SLEEP_INTERVAL == 0 { + self.sleep_light(); + } + + tick = tick.wrapping_add(1); + spin_cnt = 0; + + // As long as there is work, keep looping. + continue; + } + + if !consistent { + spin_cnt = 0; + continue; + } + + // Starting to get sleeeeepy + if spin_cnt < 61 { + spin_cnt += 1; + } else { + tick = 0; + + if !self.sleep() { + return; + } + } + + // If there still isn't any work to do, shutdown the worker? + } + + // The pool is terminating. However, transitioning the pool state to + // terminated is the very first step of the finalization process. Other + // threads may not see this state and try to spawn a new thread. To + // ensure consistency, before the current thread shuts down, it must + // return the backup token to the stack. + // + // The returned result is ignored because `Err` represents the pool + // shutting down. We are currently aware of this fact. + let _ = self.inner.release_backup(self.backup_id); + + self.should_finalize.set(true); + } + + /// Try to run a task + /// + /// Returns `true` if work was found. + #[inline] + fn try_run_task(&self, notify: &Arc, sender: &mut Sender) -> bool { + if self.try_run_owned_task(notify, sender) { + return true; + } + + self.try_steal_task(notify, sender) + } + + /// Checks the worker's current state, updating it as needed. + /// + /// Returns `true` if the worker should run. + #[inline] + fn check_run_state(&self, first: bool) -> bool { + use self::Lifecycle::*; + + debug_assert!(!self.is_blocking.get()); + + let mut state: State = self.entry().state.load(Acquire).into(); + + loop { + let pool_state: pool::State = self.inner.state.load(Acquire).into(); + + if pool_state.is_terminated() { + return false; + } + + let mut next = state; + + match state.lifecycle() { + Running => break, + Notified | Signaled => { + // transition back to running + next.set_lifecycle(Running); + } + Shutdown | Sleeping => { + // The worker should never be in these states when calling + // this function. + panic!("unexpected worker state; lifecycle={:?}", state.lifecycle()); + } + } + + let actual = self.entry().state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + break; + } + + state = actual; + } + + // `first` is set to true the first time this function is called after + // the thread has started. + // + // This check is to handle the scenario where a worker gets signaled + // while it is already happily running. The `is_signaled` state is + // intended to wake up a worker that has been previously sleeping in + // effect increasing the number of active workers. If this is the first + // time `check_run_state` is called, then being in a signalled state is + // normal and the thread was started to handle it. However, if this is + // **not** the first time the fn was called, then the number of active + // workers has not been increased by the signal, so `signal_work` has to + // be called again to try to wake up another worker. + // + // For example, if the thread pool is configured to allow 4 workers. + // Worker 1 is processing tasks from its `deque`. Worker 2 receives its + // first task. Worker 2 will pick a random worker to signal. It does + // this by popping off the sleep stack, but there is no guarantee that + // workers on the sleep stack are actually sleeping. It is possible that + // Worker 1 gets signaled. + // + // Without this check, in the above case, no additional workers will get + // started, which results in the thread pool permanently being at 2 + // workers even though it should reach 4. + if !first && state.is_signaled() { + trace!("Worker::check_run_state; delegate signal"); + // This worker is not ready to be signaled, so delegate the signal + // to another worker. + self.inner.signal_work(&self.inner); + } + + true + } + + /// Runs the next task on this worker's queue. + /// + /// Returns `true` if work was found. + fn try_run_owned_task(&self, notify: &Arc, sender: &mut Sender) -> bool { + use deque::Steal::*; + + // Poll the internal queue for a task to run + match self.entry().pop_task() { + Data(task) => { + self.run_task(task, notify, sender); + true + } + Empty => false, + Retry => true, + } + } + + /// Tries to steal a task from another worker. + /// + /// Returns `true` if work was found + fn try_steal_task(&self, notify: &Arc, sender: &mut Sender) -> bool { + use deque::Steal::*; + + debug_assert!(!self.is_blocking.get()); + + let len = self.inner.workers.len(); + let mut idx = self.inner.rand_usize() % len; + let mut found_work = false; + let start = idx; + + loop { + if idx < len { + match self.inner.workers[idx].steal_task() { + Data(task) => { + trace!("stole task"); + + self.run_task(task, notify, sender); + + trace!("try_steal_task -- signal_work; self={}; from={}", + self.id.0, idx); + + // Signal other workers that work is available + // + // TODO: Should this be called here or before + // `run_task`? + self.inner.signal_work(&self.inner); + + return true; + } + Empty => {} + Retry => found_work = true, + } + + idx += 1; + } else { + idx = 0; + } + + if idx == start { + break; + } + } + + found_work + } + + fn run_task(&self, task: Arc, notify: &Arc, sender: &mut Sender) { + use task::Run::*; + + let run = self.run_task2(&task, notify, sender); + + // TODO: Try to claim back the worker state in case the backup thread + // did not start up fast enough. This is a performance optimization. + + match run { + Idle => {} + Schedule => { + if self.is_blocking.get() { + // The future has been notified while it was running. + // However, the future also entered a blocking section, + // which released the worker state from this thread. + // + // This means that scheduling the future must be done from + // a point of view external to the worker set. + // + // We have to call `submit_external` instead of `submit` + // here because `self` is still set as the current worker. + self.inner.submit_external(task, &self.inner); + } else { + self.entry().push_internal(task); + } + } + Complete => { + let mut state: pool::State = self.inner.state.load(Acquire).into(); + + loop { + let mut next = state; + next.dec_num_futures(); + + let actual = self.inner.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + trace!("task complete; state={:?}", next); + + if state.num_futures() == 1 { + // If the thread pool has been flagged as shutdown, + // start terminating workers. This involves waking + // up any sleeping worker so that they can notice + // the shutdown state. + if next.is_terminated() { + self.inner.terminate_sleeping_workers(); + } + } + + // The worker's run loop will detect the shutdown state + // next iteration. + return; + } + + state = actual; + } + } + } + } + + /// Actually run the task. This is where `Worker::current_task` is set. + /// + /// Great care is needed to ensure that `current_task` is unset in this + /// function. + fn run_task2(&self, + task: &Arc, + notify: &Arc, + sender: &mut Sender) + -> task::Run + { + struct Guard<'a> { + worker: &'a Worker, + allocated_at_run: bool + } + + impl<'a> Drop for Guard<'a> { + fn drop(&mut self) { + // A task is allocated at run when it was explicitly notified + // that the task has capacity to block. When this happens, that + // capacity is automatically allocated to the notified task. + // This capacity is "use it or lose it", so if the thread is not + // transitioned to blocking in this call, then another task has + // to be notified. + if self.allocated_at_run && !self.worker.is_blocking.get() { + self.worker.inner.notify_blocking_task(&self.worker.inner); + } + + self.worker.current_task.clear(); + } + } + + let can_block = task.consume_blocking_allocation(); + + // Set `current_task` + self.current_task.set(task, can_block); + + // Create the guard, this ensures that `current_task` is unset when the + // function returns, even if the return is caused by a panic. + let _g = Guard { + worker: self, + allocated_at_run: can_block == CanBlock::Allocated + }; + + task.run(notify, sender) + } + + /// Drains all tasks on the extern queue and pushes them onto the internal + /// queue. + /// + /// Returns `true` if the operation was able to complete in a consistent + /// state. + #[inline] + fn drain_inbound(&self) -> bool { + use task::Poll::*; + + let mut found_work = false; + + loop { + let task = unsafe { self.entry().inbound.poll() }; + + match task { + Empty => { + if found_work { + // TODO: Why is this called on every iteration? Would it + // not be better to only signal when work was found + // after waking up? + trace!("found work while draining; signal_work"); + self.inner.signal_work(&self.inner); + } + + return true; + } + Inconsistent => { + if found_work { + trace!("found work while draining; signal_work"); + self.inner.signal_work(&self.inner); + } + + return false; + } + Data(task) => { + found_work = true; + self.entry().push_internal(task); + } + } + } + } + + /// Put the worker to sleep + /// + /// Returns `true` if woken up due to new work arriving. + fn sleep(&self) -> bool { + use self::Lifecycle::*; + + // Putting a worker to sleep is a multipart operation. This is, in part, + // due to the fact that a worker can be notified without it being popped + // from the sleep stack. Extra care is needed to deal with this. + + trace!("Worker::sleep; worker={:?}", self.id); + + let mut state: State = self.entry().state.load(Acquire).into(); + + // The first part of the sleep process is to transition the worker state + // to "pushed". Now, it may be that the worker is already pushed on the + // sleeper stack, in which case, we don't push again. + + loop { + let mut next = state; + + match state.lifecycle() { + Running => { + // Try setting the pushed state + next.set_pushed(); + + // Transition the worker state to sleeping + next.set_lifecycle(Sleeping); + } + Notified | Signaled => { + // No need to sleep, transition back to running and move on. + next.set_lifecycle(Running); + } + Shutdown | Sleeping => { + // The worker cannot transition to sleep when already in a + // sleeping state. + panic!("unexpected worker state; actual={:?}", state.lifecycle()); + } + } + + let actual = self.entry().state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + if state.is_notified() { + // The previous state was notified, so we don't need to + // sleep. + return true; + } + + if !state.is_pushed() { + debug_assert!(next.is_pushed()); + + trace!(" sleeping -- push to stack; idx={}", self.id.0); + + // We obtained permission to push the worker into the + // sleeper queue. + if let Err(_) = self.inner.push_sleeper(self.id.0) { + trace!(" sleeping -- push to stack failed; idx={}", self.id.0); + // The push failed due to the pool being terminated. + // + // This is true because the "work" being woken up for is + // shutting down. + return true; + } + } + + break; + } + + state = actual; + } + + trace!(" -> starting to sleep; idx={}", self.id.0); + + let sleep_until = self.inner.config.keep_alive + .map(|dur| Instant::now() + dur); + + // The state has been transitioned to sleeping, we can now wait by + // calling the parker. This is done in a loop as condvars can wakeup + // spuriously. + 'sleep: + loop { + let mut drop_thread = false; + + match sleep_until { + Some(when) => { + let now = Instant::now(); + + if when >= now { + drop_thread = true; + } + + let dur = when - now; + + unsafe { + (*self.entry().park.get()) + .park_timeout(dur) + .unwrap(); + } + } + None => { + unsafe { + (*self.entry().park.get()) + .park() + .unwrap(); + } + } + } + + trace!(" -> wakeup; idx={}", self.id.0); + + // Reload the state + state = self.entry().state.load(Acquire).into(); + + // If the worker has been notified, transition back to running. + match state.lifecycle() { + Sleeping => { + if !drop_thread { + // This goes back to the outer loop. + continue 'sleep; + } + } + Notified | Signaled => { + // Transition back to running + loop { + let mut next = state; + next.set_lifecycle(Running); + + let actual = self.entry().state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + return true; + } + + state = actual; + } + } + Shutdown | Running => { + // To get here, the block above transitioned the tate to + // `Sleeping`. No other thread can concurrently + // transition to `Shutdown` or `Running`. + unreachable!(); + } + } + + // The thread has reached the maximum permitted sleep duration. + // It is now going to begin to shutdown. + // + // Doing this requires first releasing the thread to the backup + // stack. Because the moment the worker state is transitioned to + // `Shutdown`, other threads **expect** the thread's backup + // entry to be available on the backup stack. + // + // However, it is possible that the worker is notified between + // us pushing the backup entry onto the backup stack and + // transitioning the worker to `Shutdown`. If this happens, the + // current thread lost the token to run the backup entry and has + // to shutdown no matter what. + // + // To deal with this, the worker is transitioned to another + // thread. This is a pretty rare condition. + // + // If pushing on the backup stack fails, then the pool is being + // terminated and the thread should just shutdown + let backup_push_err = self.inner.release_backup(self.backup_id).is_err(); + + if backup_push_err { + debug_assert!({ + let state: State = self.entry().state.load(Acquire).into(); + state.lifecycle() != Sleeping + }); + + self.should_finalize.set(true); + + return true; + } + + loop { + let mut next = state; + next.set_lifecycle(Shutdown); + + let actual: State = self.entry().state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + // Transitioned to a shutdown state + return false; + } + + match actual.lifecycle() { + Sleeping => { + state = actual; + } + Notified | Signaled => { + // Transition back to running + loop { + let mut next = state; + next.set_lifecycle(Running); + + let actual = self.entry().state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + self.inner.spawn_thread(self.id.clone(), &self.inner); + return false; + } + + state = actual; + } + } + Shutdown | Running => { + // To get here, the block above transitioned the tate to + // `Sleeping`. No other thread can concurrently + // transition to `Shutdown` or `Running`. + unreachable!(); + } + } + } + } + } + + /// This doesn't actually put the thread to sleep. It calls + /// `park.park_timeout` with a duration of 0. This allows the park + /// implementation to perform any work that might be done on an interval. + fn sleep_light(&self) { + unsafe { + (*self.entry().park.get()) + .park_timeout(Duration::from_millis(0)) + .unwrap(); + } + } + + fn entry(&self) -> &Entry { + debug_assert!(!self.is_blocking.get()); + &self.inner.workers[self.id.0] + } +} + +impl Drop for Worker { + fn drop(&mut self) { + trace!("shutting down thread; idx={}", self.id.0); + + if self.should_finalize.get() { + // Get all inbound work and push it onto the work queue. The work + // queue is drained in the next step. + self.drain_inbound(); + + // Drain the work queue + self.entry().drain_tasks(); + + // TODO: Drain the work queue... + } + } +} + +// ===== impl CurrentTask ===== + +impl CurrentTask { + /// Returns a default `CurrentTask` representing no task. + fn new() -> CurrentTask { + CurrentTask { + task: Cell::new(None), + can_block: Cell::new(CanBlock::CanRequest), + } + } + + /// Returns a reference to the task. + fn get_ref(&self) -> &Arc { + unsafe { &*self.task.get().unwrap() } + } + + fn can_block(&self) -> CanBlock { + self.can_block.get() + } + + fn set_can_block(&self, can_block: CanBlock) { + self.can_block.set(can_block); + } + + fn set(&self, task: &Arc, can_block: CanBlock) { + self.task.set(Some(task as *const _)); + self.can_block.set(can_block); + } + + /// Reset the `CurrentTask` to null state. + fn clear(&self) { + self.task.set(None); + self.can_block.set(CanBlock::CanRequest); + } +} + +// ===== impl WorkerId ===== + +impl WorkerId { + /// Returns a `WorkerId` representing the worker entry at index `idx`. + pub(crate) fn new(idx: usize) -> WorkerId { + WorkerId(idx) + } +} diff --git a/third_party/rust/tokio-threadpool/src/worker/stack.rs b/third_party/rust/tokio-threadpool/src/worker/stack.rs new file mode 100644 index 000000000000..19245780adee --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/worker/stack.rs @@ -0,0 +1,253 @@ +use config::MAX_WORKERS; +use worker; + +use std::{fmt, usize}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{Acquire, AcqRel, Relaxed}; + +/// Lock-free stack of sleeping workers. +/// +/// This is implemented as a Treiber stack and references to nodes are +/// `usize` values, indexing the entry in the `[worker::Entry]` array stored by +/// `Pool`. Each `Entry` instance maintains a `pushed` bit in its state. This +/// bit tracks if the entry is already pushed onto the stack or not. A single +/// entry can only be stored on the stack a single time. +/// +/// By using indexes instead of pointers, that allows a much greater amount of +/// data to be used for the ABA guard (see correctness section of wikipedia +/// page). +/// +/// Treiber stack: https://en.wikipedia.org/wiki/Treiber_Stack +#[derive(Debug)] +pub(crate) struct Stack { + state: AtomicUsize, +} + +/// State related to the stack of sleeping workers. +/// +/// - Parked head 16 bits +/// - Sequence remaining +/// +/// The parked head value has a couple of special values: +/// +/// - EMPTY: No sleepers +/// - TERMINATED: Don't spawn more threads +#[derive(Eq, PartialEq, Clone, Copy)] +pub struct State(usize); + +/// Extracts the head of the worker stack from the scheduler state +/// +/// The 16 relates to the value of MAX_WORKERS +const STACK_MASK: usize = ((1 << 16) - 1); + +/// Used to mark the stack as empty +pub(crate) const EMPTY: usize = MAX_WORKERS; + +/// Used to mark the stack as terminated +pub(crate) const TERMINATED: usize = EMPTY + 1; + +/// How many bits the treiber ABA guard is offset by +const ABA_GUARD_SHIFT: usize = 16; + +#[cfg(target_pointer_width = "64")] +const ABA_GUARD_MASK: usize = (1 << (64 - ABA_GUARD_SHIFT)) - 1; + +#[cfg(target_pointer_width = "32")] +const ABA_GUARD_MASK: usize = (1 << (32 - ABA_GUARD_SHIFT)) - 1; + +// ===== impl Stack ===== + +impl Stack { + /// Create a new `Stack` representing the empty state. + pub fn new() -> Stack { + let state = AtomicUsize::new(State::new().into()); + Stack { state } + } + + /// Push a worker onto the stack + /// + /// # Return + /// + /// Returns `Ok` on success. + /// + /// Returns `Err` if the pool has transitioned to the `TERMINATED` state. + /// When terminated, pushing new entries is no longer permitted. + pub fn push(&self, entries: &[worker::Entry], idx: usize) -> Result<(), ()> { + let mut state: State = self.state.load(Acquire).into(); + + debug_assert!(worker::State::from(entries[idx].state.load(Relaxed)).is_pushed()); + + loop { + let mut next = state; + + let head = state.head(); + + if head == TERMINATED { + // The pool is terminated, cannot push the sleeper. + return Err(()); + } + + entries[idx].set_next_sleeper(head); + next.set_head(idx); + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if state == actual { + return Ok(()); + } + + state = actual; + } + } + + /// Pop a worker off the stack. + /// + /// If `terminate` is set and the stack is empty when this function is + /// called, the state of the stack is transitioned to "terminated". At this + /// point, no further workers can be pushed onto the stack. + /// + /// # Return + /// + /// Returns the index of the popped worker and the worker's observed state. + /// + /// `None` if the stack is empty. + pub fn pop(&self, entries: &[worker::Entry], + max_lifecycle: worker::Lifecycle, + terminate: bool) + -> Option<(usize, worker::State)> + { + // Figure out the empty value + let terminal = match terminate { + true => TERMINATED, + false => EMPTY, + }; + + // If terminating, the max lifecycle *must* be `Signaled`, which is the + // highest lifecycle. By passing the greatest possible lifecycle value, + // no entries are skipped by this function. + // + // TODO: It would be better to terminate in a separate function that + // atomically takes all values and transitions to a terminated state. + debug_assert!(!terminate || max_lifecycle == worker::Lifecycle::Signaled); + + let mut state: State = self.state.load(Acquire).into(); + + loop { + let head = state.head(); + + if head == EMPTY { + let mut next = state; + next.set_head(terminal); + + if next == state { + debug_assert!(terminal == EMPTY); + return None; + } + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual != state { + state = actual; + continue; + } + + return None; + } else if head == TERMINATED { + return None; + } + + debug_assert!(head < MAX_WORKERS); + + let mut next = state; + + let next_head = entries[head].next_sleeper(); + + // TERMINATED can never be set as the "next pointer" on a worker. + debug_assert!(next_head != TERMINATED); + + if next_head == EMPTY { + next.set_head(terminal); + } else { + next.set_head(next_head); + } + + let actual = self.state.compare_and_swap( + state.into(), next.into(), AcqRel).into(); + + if actual == state { + // Release ordering is needed to ensure that unsetting the + // `pushed` flag happens after popping the sleeper from the + // stack. + // + // Acquire ordering is required to acquire any memory associated + // with transitioning the worker's lifecycle. + let state = entries[head].fetch_unset_pushed(AcqRel); + + if state.lifecycle() >= max_lifecycle { + // If the worker has already been notified, then it is + // warming up to do more work. In this case, try to pop + // another thread that might be in a relaxed state. + continue; + } + + return Some((head, state)); + } + + state = actual; + } + } +} + +// ===== impl State ===== + +impl State { + #[inline] + fn new() -> State { + State(EMPTY) + } + + #[inline] + fn head(&self) -> usize { + self.0 & STACK_MASK + } + + #[inline] + fn set_head(&mut self, val: usize) { + // The ABA guard protects against the ABA problem w/ treiber stacks + let aba_guard = ((self.0 >> ABA_GUARD_SHIFT) + 1) & ABA_GUARD_MASK; + + self.0 = (aba_guard << ABA_GUARD_SHIFT) | val; + } +} + +impl From for State { + fn from(src: usize) -> Self { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> Self { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let head = self.head(); + + let mut fmt = fmt.debug_struct("stack::State"); + + if head < MAX_WORKERS { + fmt.field("head", &head); + } else if head == EMPTY { + fmt.field("head", &"EMPTY"); + } else if head == TERMINATED { + fmt.field("head", &"TERMINATED"); + } + + fmt.finish() + } +} diff --git a/third_party/rust/tokio-threadpool/src/worker/state.rs b/third_party/rust/tokio-threadpool/src/worker/state.rs new file mode 100644 index 000000000000..cb98937840c9 --- /dev/null +++ b/third_party/rust/tokio-threadpool/src/worker/state.rs @@ -0,0 +1,158 @@ +use std::fmt; + +/// Tracks worker state +#[derive(Clone, Copy, Eq, PartialEq)] +pub(crate) struct State(usize); + +/// Set when the worker is pushed onto the scheduler's stack of sleeping +/// threads. +pub(crate) const PUSHED_MASK: usize = 0b001; + +/// Manages the worker lifecycle part of the state +const LIFECYCLE_MASK: usize = 0b1110; +const LIFECYCLE_SHIFT: usize = 1; + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Copy)] +#[repr(usize)] +pub(crate) enum Lifecycle { + /// The worker does not currently have an associated thread. + Shutdown = 0 << LIFECYCLE_SHIFT, + + /// The worker is doing work + Running = 1 << LIFECYCLE_SHIFT, + + /// The worker is currently asleep in the condvar + Sleeping = 2 << LIFECYCLE_SHIFT, + + /// The worker has been notified it should process more work. + Notified = 3 << LIFECYCLE_SHIFT, + + /// A stronger form of notification. In this case, the worker is expected to + /// wakeup and try to acquire more work... if it enters this state while + /// already busy with other work, it is expected to signal another worker. + Signaled = 4 << LIFECYCLE_SHIFT, +} + +impl State { + /// Returns true if the worker entry is pushed in the sleeper stack + pub fn is_pushed(&self) -> bool { + self.0 & PUSHED_MASK == PUSHED_MASK + } + + pub fn set_pushed(&mut self) { + self.0 |= PUSHED_MASK + } + + pub fn is_notified(&self) -> bool { + use self::Lifecycle::*; + + match self.lifecycle() { + Notified | Signaled => true, + _ => false, + } + } + + pub fn lifecycle(&self) -> Lifecycle { + Lifecycle::from(self.0 & LIFECYCLE_MASK) + } + + pub fn set_lifecycle(&mut self, val: Lifecycle) { + self.0 = (self.0 & !LIFECYCLE_MASK) | (val as usize) + } + + pub fn is_signaled(&self) -> bool { + self.lifecycle() == Lifecycle::Signaled + } + + pub fn notify(&mut self) { + use self::Lifecycle::Signaled; + + if self.lifecycle() != Signaled { + self.set_lifecycle(Signaled) + } + } +} + +impl Default for State { + fn default() -> State { + // All workers will start pushed in the sleeping stack + State(PUSHED_MASK) + } +} + +impl From for State { + fn from(src: usize) -> Self { + State(src) + } +} + +impl From for usize { + fn from(src: State) -> Self { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("worker::State") + .field("lifecycle", &self.lifecycle()) + .field("is_pushed", &self.is_pushed()) + .finish() + } +} + +// ===== impl Lifecycle ===== + +impl From for Lifecycle { + fn from(src: usize) -> Lifecycle { + use self::Lifecycle::*; + + debug_assert!( + src == Shutdown as usize || + src == Running as usize || + src == Sleeping as usize || + src == Notified as usize || + src == Signaled as usize); + + unsafe { ::std::mem::transmute(src) } + } +} + +impl From for usize { + fn from(src: Lifecycle) -> usize { + let v = src as usize; + debug_assert!(v & LIFECYCLE_MASK == v); + v + } +} + +#[cfg(test)] +mod test { + use super::*; + use super::Lifecycle::*; + + #[test] + fn lifecycle_encode() { + let lifecycles = &[ + Shutdown, + Running, + Sleeping, + Notified, + Signaled, + ]; + + for &lifecycle in lifecycles { + let mut v: usize = lifecycle.into(); + v &= LIFECYCLE_MASK; + + assert_eq!(lifecycle, Lifecycle::from(v)); + } + } + + #[test] + fn lifecycle_ord() { + assert!(Running >= Shutdown); + assert!(Signaled >= Notified); + assert!(Signaled >= Sleeping); + } +} diff --git a/third_party/rust/tokio-threadpool/tests/blocking.rs b/third_party/rust/tokio-threadpool/tests/blocking.rs new file mode 100644 index 000000000000..b87417b13629 --- /dev/null +++ b/third_party/rust/tokio-threadpool/tests/blocking.rs @@ -0,0 +1,410 @@ +extern crate tokio_threadpool; + +extern crate env_logger; +#[macro_use] +extern crate futures; +extern crate rand; + +use tokio_threadpool::*; + +use futures::*; +use futures::future::{lazy, poll_fn}; +use rand::*; + +use std::sync::*; +use std::sync::atomic::*; +use std::sync::atomic::Ordering::*; +use std::time::Duration; +use std::thread; + +#[test] +fn basic() { + let _ = ::env_logger::init(); + + let pool = Builder::new() + .pool_size(1) + .max_blocking(1) + .build(); + + let (tx1, rx1) = mpsc::channel(); + let (tx2, rx2) = mpsc::channel(); + + pool.spawn(lazy(move || { + let res = blocking(|| { + let v = rx1.recv().unwrap(); + tx2.send(v).unwrap(); + }).unwrap(); + + assert!(res.is_ready()); + Ok(().into()) + })); + + pool.spawn(lazy(move || { + tx1.send(()).unwrap(); + Ok(().into()) + })); + + rx2.recv().unwrap(); +} + +#[test] +fn notify_task_on_capacity() { + const BLOCKING: usize = 10; + + let pool = Builder::new() + .pool_size(1) + .max_blocking(1) + .build(); + + let rem = Arc::new(AtomicUsize::new(BLOCKING)); + let (tx, rx) = mpsc::channel(); + + for _ in 0..BLOCKING { + let rem = rem.clone(); + let tx = tx.clone(); + + pool.spawn(lazy(move || { + poll_fn(move || { + blocking(|| { + thread::sleep(Duration::from_millis(100)); + let prev = rem.fetch_sub(1, Relaxed); + + if prev == 1 { + tx.send(()).unwrap(); + } + }).map_err(|e| panic!("blocking err {:?}", e)) + }) + })); + } + + rx.recv().unwrap(); + + assert_eq!(0, rem.load(Relaxed)); +} + +#[test] +fn capacity_is_use_it_or_lose_it() { + use futures::*; + use futures::Async::*; + use futures::sync::oneshot; + use futures::task::Task; + + // TODO: Run w/ bigger pool size + + let pool = Builder::new() + .pool_size(1) + .max_blocking(1) + .build(); + + let (tx1, rx1) = mpsc::channel(); + let (tx2, rx2) = oneshot::channel(); + let (tx3, rx3) = mpsc::channel(); + let (tx4, rx4) = mpsc::channel(); + + // First, fill the blocking capacity + pool.spawn(lazy(move || { + poll_fn(move || { + blocking(|| { + rx1.recv().unwrap(); + }).map_err(|_| panic!()) + }) + })); + + pool.spawn(lazy(move || { + rx2 + .map_err(|_| panic!()) + .and_then(|task: Task| { + poll_fn(move || { + blocking(|| { + // Notify the other task + task.notify(); + + // Block until woken + rx3.recv().unwrap(); + }).map_err(|_| panic!()) + }) + }) + })); + + // Spawn a future that will try to block, get notified, then not actually + // use the blocking + let mut i = 0; + let mut tx2 = Some(tx2); + + pool.spawn(lazy(move || { + poll_fn(move || { + match i { + 0 => { + i = 1; + + let res = blocking(|| unreachable!()) + .map_err(|_| panic!()); + + assert!(res.unwrap().is_not_ready()); + + // Unblock the first blocker + tx1.send(()).unwrap(); + + return Ok(NotReady); + } + 1 => { + i = 2; + + // Skip blocking, and notify the second task that it should + // start blocking + let me = task::current(); + tx2.take().unwrap().send(me).unwrap(); + + return Ok(NotReady); + } + 2 => { + let res = blocking(|| unreachable!()) + .map_err(|_| panic!()); + + assert!(res.unwrap().is_not_ready()); + + // Unblock the first blocker + tx3.send(()).unwrap(); + tx4.send(()).unwrap(); + Ok(().into()) + } + _ => unreachable!(), + } + }) + })); + + rx4.recv().unwrap(); +} + +#[test] +fn blocking_thread_does_not_take_over_shutdown_worker_thread() { + let pool = Builder::new() + .pool_size(2) + .max_blocking(1) + .build(); + + let (enter_tx, enter_rx) = mpsc::channel(); + let (exit_tx, exit_rx) = mpsc::channel(); + let (try_tx, try_rx) = mpsc::channel(); + + let exited = Arc::new(AtomicBool::new(false)); + + { + let exited = exited.clone(); + + pool.spawn(lazy(move || { + poll_fn(move || { + blocking(|| { + enter_tx.send(()).unwrap(); + exit_rx.recv().unwrap(); + exited.store(true, Relaxed); + }).map_err(|_| panic!()) + }) + })); + } + + // Wait for the task to block + let _ = enter_rx.recv().unwrap(); + + // Spawn another task that attempts to block + pool.spawn(lazy(move || { + poll_fn(move || { + let res = blocking(|| { + + }).unwrap(); + + assert_eq!( + res.is_ready(), + exited.load(Relaxed)); + + try_tx.send(res.is_ready()).unwrap(); + + Ok(res) + }) + })); + + // Wait for the second task to try to block (and not be ready). + let res = try_rx.recv().unwrap(); + assert!(!res); + + // Unblock the first task + exit_tx.send(()).unwrap(); + + // Wait for the second task to successfully block. + let res = try_rx.recv().unwrap(); + assert!(res); + + drop(pool); +} + +#[test] +fn blocking_one_time_gets_capacity_for_multiple_blocks() { + const ITER: usize = 1; + const BLOCKING: usize = 2; + + for _ in 0..ITER { + let pool = Builder::new() + .pool_size(4) + .max_blocking(1) + .build(); + + let rem = Arc::new(AtomicUsize::new(BLOCKING)); + let (tx, rx) = mpsc::channel(); + + for _ in 0..BLOCKING { + let rem = rem.clone(); + let tx = tx.clone(); + + pool.spawn(lazy(move || { + poll_fn(move || { + // First block + let res = blocking(|| { + thread::sleep(Duration::from_millis(100)); + }).map_err(|e| panic!("blocking err {:?}", e)); + + try_ready!(res); + + let res = blocking(|| { + thread::sleep(Duration::from_millis(100)); + let prev = rem.fetch_sub(1, Relaxed); + + if prev == 1 { + tx.send(()).unwrap(); + } + }); + + assert!(res.unwrap().is_ready()); + + Ok(().into()) + }) + })); + } + + rx.recv().unwrap(); + + assert_eq!(0, rem.load(Relaxed)); + } +} + +#[test] +fn shutdown() { + const ITER: usize = 1_000; + const BLOCKING: usize = 10; + + for _ in 0..ITER { + let num_inc = Arc::new(AtomicUsize::new(0)); + let num_dec = Arc::new(AtomicUsize::new(0)); + let (tx, rx) = mpsc::channel(); + + let pool = { + let num_inc = num_inc.clone(); + let num_dec = num_dec.clone(); + + Builder::new() + .pool_size(1) + .max_blocking(BLOCKING) + .after_start(move || { num_inc.fetch_add(1, Relaxed); }) + .before_stop(move || { num_dec.fetch_add(1, Relaxed); }) + .build() + }; + + let barrier = Arc::new(Barrier::new(BLOCKING)); + + for _ in 0..BLOCKING { + let barrier = barrier.clone(); + let tx = tx.clone(); + + pool.spawn(lazy(move || { + let res = blocking(|| { + barrier.wait(); + Ok::<_, ()>(()) + }).unwrap(); + + tx.send(()).unwrap(); + + assert!(res.is_ready()); + Ok(().into()) + })); + } + + for _ in 0..BLOCKING { + rx.recv().unwrap(); + } + + // Shutdown + drop(pool); + + assert_eq!(11, num_inc.load(Relaxed)); + assert_eq!(11, num_dec.load(Relaxed)); + } +} + +#[derive(Debug, Copy, Clone)] +enum Sleep { + Skip, + Yield, + Rand, + Fixed(Duration), +} + +#[test] +fn hammer() { + use self::Sleep::*; + + const ITER: usize = 5; + + let combos = [ + (2, 4, 1_000, Skip), + (2, 4, 1_000, Yield), + (2, 4, 100, Rand), + (2, 4, 100, Fixed(Duration::from_millis(3))), + (2, 4, 100, Fixed(Duration::from_millis(12))), + ]; + + for &(size, max_blocking, n, sleep) in &combos { + for _ in 0..ITER { + let pool = Builder::new() + .pool_size(size) + .max_blocking(max_blocking) + .build(); + + let cnt_task = Arc::new(AtomicUsize::new(0)); + let cnt_block = Arc::new(AtomicUsize::new(0)); + + for _ in 0..n { + let cnt_task = cnt_task.clone(); + let cnt_block = cnt_block.clone(); + + pool.spawn(lazy(move || { + cnt_task.fetch_add(1, Relaxed); + + poll_fn(move || { + blocking(|| { + match sleep { + Skip => {} + Yield => { + thread::yield_now(); + } + Rand => { + let ms = thread_rng().gen_range(3, 12); + thread::sleep(Duration::from_millis(ms)); + } + Fixed(dur) => { + thread::sleep(dur); + } + } + + cnt_block.fetch_add(1, Relaxed); + }).map_err(|_| panic!()) + }) + })); + } + + // Wait for the work to complete + pool.shutdown_on_idle().wait().unwrap(); + + assert_eq!(n, cnt_task.load(Relaxed)); + assert_eq!(n, cnt_block.load(Relaxed)); + } + } +} diff --git a/third_party/rust/tokio-threadpool/tests/hammer.rs b/third_party/rust/tokio-threadpool/tests/hammer.rs new file mode 100644 index 000000000000..7d1e5152aed9 --- /dev/null +++ b/third_party/rust/tokio-threadpool/tests/hammer.rs @@ -0,0 +1,107 @@ +extern crate futures; +extern crate tokio_threadpool; + +use tokio_threadpool::*; + +use futures::{Future, Stream, Sink, Poll}; + +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::*; + +#[test] +fn hammer() { + use futures::future; + use futures::sync::{oneshot, mpsc}; + + const N: usize = 1000; + const ITER: usize = 20; + + struct Counted { + cnt: Arc, + inner: T, + } + + impl Future for Counted { + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll { + self.inner.poll() + } + } + + impl Drop for Counted { + fn drop(&mut self) { + self.cnt.fetch_add(1, Relaxed); + } + } + + for _ in 0.. ITER { + let pool = Builder::new() + // .pool_size(30) + .build(); + + let cnt = Arc::new(AtomicUsize::new(0)); + + let (listen_tx, listen_rx) = mpsc::unbounded::>>(); + let mut listen_tx = listen_tx.wait(); + + pool.spawn({ + let c1 = cnt.clone(); + let c2 = cnt.clone(); + let pool = pool.sender().clone(); + let task = listen_rx + .map_err(|e| panic!("accept error = {:?}", e)) + .for_each(move |tx| { + let task = future::lazy(|| { + let (tx2, rx2) = oneshot::channel(); + + tx.send(tx2).unwrap(); + rx2 + }) + .map_err(|e| panic!("e={:?}", e)) + .and_then(|_| { + Ok(()) + }); + + pool.spawn(Counted { + inner: task, + cnt: c1.clone(), + }).unwrap(); + + Ok(()) + }); + + Counted { + inner: task, + cnt: c2, + } + }); + + for _ in 0..N { + let cnt = cnt.clone(); + let (tx, rx) = oneshot::channel(); + listen_tx.send(tx).unwrap(); + + pool.spawn({ + let task = rx + .map_err(|e| panic!("rx err={:?}", e)) + .and_then(|tx| { + tx.send(()).unwrap(); + Ok(()) + }); + + Counted { + inner: task, + cnt, + } + }); + } + + drop(listen_tx); + + pool.shutdown_on_idle().wait().unwrap(); + assert_eq!(N * 2 + 1, cnt.load(Relaxed)); + } +} diff --git a/third_party/rust/tokio-threadpool/tests/threadpool.rs b/third_party/rust/tokio-threadpool/tests/threadpool.rs new file mode 100644 index 000000000000..9dcf2b71c84b --- /dev/null +++ b/third_party/rust/tokio-threadpool/tests/threadpool.rs @@ -0,0 +1,588 @@ +extern crate tokio_threadpool; +extern crate tokio_executor; +extern crate futures; +extern crate env_logger; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +use tokio_threadpool::*; + +#[cfg(not(feature = "unstable-futures"))] +use futures::{Poll, Sink, Stream, Async, Future}; +#[cfg(not(feature = "unstable-futures"))] +use futures::future::lazy; + +#[cfg(feature = "unstable-futures")] +use futures2::prelude::*; +#[cfg(feature = "unstable-futures")] +fn lazy(f: F) -> Box + Send> where + F: Send + 'static + FnOnce() -> R, + R: Send + 'static + IntoFuture, + R::Future: Send, +{ + Box::new(::futures2::future::lazy(|_| f())) +} + +use std::cell::Cell; +use std::sync::{mpsc, Arc}; +use std::sync::atomic::*; +use std::sync::atomic::Ordering::Relaxed; +use std::time::Duration; + +thread_local!(static FOO: Cell = Cell::new(0)); + +#[cfg(not(feature = "unstable-futures"))] +fn spawn_pool(pool: &mut Sender, f: F) + where F: Future + Send + 'static +{ + pool.spawn(f).unwrap() +} +#[cfg(feature = "unstable-futures")] +fn spawn_pool(pool: &mut Sender, f: F) + where F: Future + Send + 'static +{ + futures2::executor::Executor::spawn( + pool, + Box::new(f.map_err(|_| panic!())) + ).unwrap() +} + +#[cfg(not(feature = "unstable-futures"))] +fn spawn_default(f: F) + where F: Future + Send + 'static +{ + tokio_executor::spawn(f) +} +#[cfg(feature = "unstable-futures")] +fn spawn_default(f: F) + where F: Future + Send + 'static +{ + tokio_executor::spawn2(Box::new(f.map_err(|_| panic!()))) +} + +fn ignore_results(f: F) -> Box + Send> { + Box::new(f.map(|_| ()).map_err(|_| ())) +} + +#[cfg(feature = "unstable-futures")] +fn await_shutdown(shutdown: Shutdown) { + futures::Future::wait(shutdown).unwrap() +} +#[cfg(not(feature = "unstable-futures"))] +fn await_shutdown(shutdown: Shutdown) { + shutdown.wait().unwrap() +} + +#[cfg(not(feature = "unstable-futures"))] +fn block_on(f: F) -> Result { + f.wait() +} +#[cfg(feature = "unstable-futures")] +fn block_on(f: F) -> Result { + futures2::executor::block_on(f) +} + +#[test] +fn natural_shutdown_simple_futures() { + let _ = ::env_logger::init(); + + for _ in 0..1_000 { + let num_inc = Arc::new(AtomicUsize::new(0)); + let num_dec = Arc::new(AtomicUsize::new(0)); + + FOO.with(|f| { + f.set(1); + + let pool = { + let num_inc = num_inc.clone(); + let num_dec = num_dec.clone(); + + Builder::new() + .around_worker(move |w, _| { + num_inc.fetch_add(1, Relaxed); + w.run(); + num_dec.fetch_add(1, Relaxed); + }) + .build() + }; + + let mut tx = pool.sender().clone(); + + let a = { + let (t, rx) = mpsc::channel(); + spawn_pool(&mut tx, lazy(move || { + // Makes sure this runs on a worker thread + FOO.with(|f| assert_eq!(f.get(), 0)); + + t.send("one").unwrap(); + Ok(()) + })); + rx + }; + + let b = { + let (t, rx) = mpsc::channel(); + spawn_pool(&mut tx, lazy(move || { + // Makes sure this runs on a worker thread + FOO.with(|f| assert_eq!(f.get(), 0)); + + t.send("two").unwrap(); + Ok(()) + })); + rx + }; + + drop(tx); + + assert_eq!("one", a.recv().unwrap()); + assert_eq!("two", b.recv().unwrap()); + + // Wait for the pool to shutdown + await_shutdown(pool.shutdown()); + + // Assert that at least one thread started + let num_inc = num_inc.load(Relaxed); + assert!(num_inc > 0); + + // Assert that all threads shutdown + let num_dec = num_dec.load(Relaxed); + assert_eq!(num_inc, num_dec); + }); + } +} + +#[test] +fn force_shutdown_drops_futures() { + let _ = ::env_logger::init(); + + for _ in 0..1_000 { + let num_inc = Arc::new(AtomicUsize::new(0)); + let num_dec = Arc::new(AtomicUsize::new(0)); + let num_drop = Arc::new(AtomicUsize::new(0)); + + struct Never(Arc); + + #[cfg(not(feature = "unstable-futures"))] + impl Future for Never { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + Ok(Async::NotReady) + } + } + + #[cfg(feature = "unstable-futures")] + impl Future for Never { + type Item = (); + type Error = (); + + fn poll(&mut self, _: &mut futures2::task::Context) -> Poll<(), ()> { + Ok(Async::Pending) + } + } + + impl Drop for Never { + fn drop(&mut self) { + self.0.fetch_add(1, Relaxed); + } + } + + let a = num_inc.clone(); + let b = num_dec.clone(); + + let pool = Builder::new() + .around_worker(move |w, _| { + a.fetch_add(1, Relaxed); + w.run(); + b.fetch_add(1, Relaxed); + }) + .build(); + let mut tx = pool.sender().clone(); + + spawn_pool(&mut tx, Never(num_drop.clone())); + + // Wait for the pool to shutdown + await_shutdown(pool.shutdown_now()); + + // Assert that only a single thread was spawned. + let a = num_inc.load(Relaxed); + assert!(a >= 1); + + // Assert that all threads shutdown + let b = num_dec.load(Relaxed); + assert_eq!(a, b); + + // Assert that the future was dropped + let c = num_drop.load(Relaxed); + assert_eq!(c, 1); + } +} + +#[test] +fn drop_threadpool_drops_futures() { + let _ = ::env_logger::init(); + + for _ in 0..1_000 { + let num_inc = Arc::new(AtomicUsize::new(0)); + let num_dec = Arc::new(AtomicUsize::new(0)); + let num_drop = Arc::new(AtomicUsize::new(0)); + + struct Never(Arc); + + #[cfg(not(feature = "unstable-futures"))] + impl Future for Never { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + Ok(Async::NotReady) + } + } + + #[cfg(feature = "unstable-futures")] + impl Future for Never { + type Item = (); + type Error = (); + + fn poll(&mut self, _: &mut futures2::task::Context) -> Poll<(), ()> { + Ok(Async::Pending) + } + } + + impl Drop for Never { + fn drop(&mut self) { + self.0.fetch_add(1, Relaxed); + } + } + + let a = num_inc.clone(); + let b = num_dec.clone(); + + let pool = Builder::new() + .max_blocking(2) + .pool_size(20) + .around_worker(move |w, _| { + a.fetch_add(1, Relaxed); + w.run(); + b.fetch_add(1, Relaxed); + }) + .build(); + let mut tx = pool.sender().clone(); + + spawn_pool(&mut tx, Never(num_drop.clone())); + + // Wait for the pool to shutdown + drop(pool); + + // Assert that only a single thread was spawned. + let a = num_inc.load(Relaxed); + assert!(a >= 1); + + // Assert that all threads shutdown + let b = num_dec.load(Relaxed); + assert_eq!(a, b); + + // Assert that the future was dropped + let c = num_drop.load(Relaxed); + assert_eq!(c, 1); + } +} + +#[test] +fn thread_shutdown_timeout() { + use std::sync::Mutex; + + let _ = ::env_logger::init(); + + let (shutdown_tx, shutdown_rx) = mpsc::channel(); + let (complete_tx, complete_rx) = mpsc::channel(); + + let t = Mutex::new(shutdown_tx); + + let pool = Builder::new() + .keep_alive(Some(Duration::from_millis(200))) + .around_worker(move |w, _| { + w.run(); + // There could be multiple threads here + let _ = t.lock().unwrap().send(()); + }) + .build(); + let mut tx = pool.sender().clone(); + + let t = complete_tx.clone(); + spawn_pool(&mut tx, lazy(move || { + t.send(()).unwrap(); + Ok(()) + })); + + // The future completes + complete_rx.recv().unwrap(); + + // The thread shuts down eventually + shutdown_rx.recv().unwrap(); + + // Futures can still be run + spawn_pool(&mut tx, lazy(move || { + complete_tx.send(()).unwrap(); + Ok(()) + })); + + complete_rx.recv().unwrap(); + + await_shutdown(pool.shutdown()); +} + +#[test] +fn many_oneshot_futures() { + const NUM: usize = 10_000; + + let _ = ::env_logger::init(); + + for _ in 0..50 { + let pool = ThreadPool::new(); + let mut tx = pool.sender().clone(); + let cnt = Arc::new(AtomicUsize::new(0)); + + for _ in 0..NUM { + let cnt = cnt.clone(); + spawn_pool(&mut tx, lazy(move || { + cnt.fetch_add(1, Relaxed); + Ok(()) + })); + } + + // Wait for the pool to shutdown + await_shutdown(pool.shutdown()); + + let num = cnt.load(Relaxed); + assert_eq!(num, NUM); + } +} + +#[test] +fn many_multishot_futures() { + #[cfg(not(feature = "unstable-futures"))] + use futures::sync::mpsc; + + #[cfg(feature = "unstable-futures")] + use futures2::channel::mpsc; + + const CHAIN: usize = 200; + const CYCLES: usize = 5; + const TRACKS: usize = 50; + + let _ = ::env_logger::init(); + + for _ in 0..50 { + let pool = ThreadPool::new(); + let mut pool_tx = pool.sender().clone(); + + let mut start_txs = Vec::with_capacity(TRACKS); + let mut final_rxs = Vec::with_capacity(TRACKS); + + for _ in 0..TRACKS { + let (start_tx, mut chain_rx) = mpsc::channel(10); + + for _ in 0..CHAIN { + let (next_tx, next_rx) = mpsc::channel(10); + + let rx = chain_rx + .map_err(|e| panic!("{:?}", e)); + + // Forward all the messages + spawn_pool(&mut pool_tx, next_tx + .send_all(rx) + .map(|_| ()) + .map_err(|e| panic!("{:?}", e)) + ); + + chain_rx = next_rx; + } + + // This final task cycles if needed + let (final_tx, final_rx) = mpsc::channel(10); + let cycle_tx = start_tx.clone(); + let mut rem = CYCLES; + + let task = chain_rx.take(CYCLES as u64).for_each(move |msg| { + rem -= 1; + let send = if rem == 0 { + final_tx.clone().send(msg) + } else { + cycle_tx.clone().send(msg) + }; + + send.then(|res| { + res.unwrap(); + Ok(()) + }) + }); + spawn_pool(&mut pool_tx, ignore_results(task)); + + start_txs.push(start_tx); + final_rxs.push(final_rx); + } + + for start_tx in start_txs { + block_on(start_tx.send("ping")).unwrap(); + } + + for final_rx in final_rxs { + {#![cfg(feature = "unstable-futures")] + block_on(final_rx.next()).unwrap(); + } + + {#![cfg(not(feature = "unstable-futures"))] + block_on(final_rx.into_future()).unwrap(); + } + } + + // Shutdown the pool + await_shutdown(pool.shutdown()); + } +} + +#[test] +fn global_executor_is_configured() { + let pool = ThreadPool::new(); + let mut tx = pool.sender().clone(); + + let (signal_tx, signal_rx) = mpsc::channel(); + + spawn_pool(&mut tx, lazy(move || { + spawn_default(lazy(move || { + signal_tx.send(()).unwrap(); + Ok(()) + })); + + Ok(()) + })); + + signal_rx.recv().unwrap(); + + await_shutdown(pool.shutdown()); +} + +#[test] +fn new_threadpool_is_idle() { + let pool = ThreadPool::new(); + await_shutdown(pool.shutdown_on_idle()); +} + +#[test] +fn busy_threadpool_is_not_idle() { + #[cfg(not(feature = "unstable-futures"))] + use futures::sync::oneshot; + + #[cfg(feature = "unstable-futures")] + use futures2::channel::oneshot; + + // let pool = ThreadPool::new(); + let pool = Builder::new() + .pool_size(4) + .max_blocking(2) + .build(); + let mut tx = pool.sender().clone(); + + let (term_tx, term_rx) = oneshot::channel(); + + spawn_pool(&mut tx, term_rx.then(|_| { + Ok(()) + })); + + let mut idle = pool.shutdown_on_idle(); + + struct IdleFut<'a>(&'a mut Shutdown); + + #[cfg(not(feature = "unstable-futures"))] + impl<'a> Future for IdleFut<'a> { + type Item = (); + type Error = (); + fn poll(&mut self) -> Poll<(), ()> { + assert!(self.0.poll().unwrap().is_not_ready()); + Ok(Async::Ready(())) + } + } + + #[cfg(feature = "unstable-futures")] + impl<'a> Future for IdleFut<'a> { + type Item = (); + type Error = (); + fn poll(&mut self, cx: &mut futures2::task::Context) -> Poll<(), ()> { + assert!(self.0.poll(cx).unwrap().is_pending()); + Ok(Async::Ready(())) + } + } + + block_on(IdleFut(&mut idle)).unwrap(); + + term_tx.send(()).unwrap(); + + await_shutdown(idle); +} + +#[test] +fn panic_in_task() { + let pool = ThreadPool::new(); + let mut tx = pool.sender().clone(); + + struct Boom; + + #[cfg(not(feature = "unstable-futures"))] + impl Future for Boom { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + panic!(); + } + } + + #[cfg(feature = "unstable-futures")] + impl Future for Boom { + type Item = (); + type Error = (); + + fn poll(&mut self, _cx: &mut futures2::task::Context) -> Poll<(), ()> { + panic!(); + } + } + + impl Drop for Boom { + fn drop(&mut self) { + assert!(::std::thread::panicking()); + } + } + + spawn_pool(&mut tx, Boom); + + await_shutdown(pool.shutdown_on_idle()); +} + +#[test] +fn multi_threadpool() { + use futures::sync::oneshot; + + let pool1 = ThreadPool::new(); + let pool2 = ThreadPool::new(); + + let (tx, rx) = oneshot::channel(); + let (done_tx, done_rx) = mpsc::channel(); + + pool2.spawn({ + rx.and_then(move |_| { + done_tx.send(()).unwrap(); + Ok(()) + }) + .map_err(|e| panic!("err={:?}", e)) + }); + + pool1.spawn(lazy(move || { + tx.send(()).unwrap(); + Ok(()) + })); + + done_rx.recv().unwrap(); +} diff --git a/third_party/rust/tokio-timer/.cargo-checksum.json b/third_party/rust/tokio-timer/.cargo-checksum.json new file mode 100644 index 000000000000..947f08c4131d --- /dev/null +++ b/third_party/rust/tokio-timer/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"12a45b019aabc64a423d7447007476ae006c12375277bb937f973fbd2aaa8ece","Cargo.toml":"53bcee9cbeb43e04bd00e967bd41e106e53d98fe5d3b9de7b9f174ba977a8eeb","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"dbd3905284477c9b5f0391ac24a20bfcebfcfccd288c87bbe589407796ca5e93","src/atomic.rs":"c2da5e751d3e049cded49a0cc12b756b8171ee36ea69d797c3d2797ee3a82091","src/clock/clock.rs":"8fb1342f8909d65ec0fc34087a1788178026b63c919726d08cd39beaa94672db","src/clock/mod.rs":"c33f960ffbf522dd7dbe8511f1e28f6e6f229581b97119f6c1f97a24139d00aa","src/clock/now.rs":"6bf50b0b955938610826afc8d8a1394d7acbd544493542d8b32cac96f9d87c15","src/deadline.rs":"284364ecd204a80419be23c8a033c3746a0b98f2f7d6d8ca2fd50e96cb9ee0e9","src/delay.rs":"da3cfe8836217b6dfb8bd68828a58bc51827f14f17e6770b8070ba9baf641e7a","src/error.rs":"542c6364dfc81d710a5ba9da51156610c1d64a9467c62eda4f473c715e7e997b","src/interval.rs":"8ee9ec0b56d1124478407d5a4baa1768aad1a06bfd7abd8e8d2ef833147dbf36","src/lib.rs":"fd8c0c31279aa59b97f7bfe6cb39d801eeb18fa392191d4ab83af0a8415f1c43","src/timer/entry.rs":"c426cc03fb5910c7e6c5ae2f267ab253ae445d5f971daa218a226663c70a786e","src/timer/handle.rs":"c5c18efe0e04e6d7db841dea451f6d9df9af8ad8e41d6590cc45d24755c50cb7","src/timer/level.rs":"a681706cbf4215455a72c74efb070b09f167022e585f5720fa713a51c1f02536","src/timer/mod.rs":"d24fca8b858637b3eb79d38662350d9987185de7a0329dbe597bbbc8015faa5d","src/timer/now.rs":"61e258c7fad9c028cb088f4393eed470558536fb5d68cb621aa52ed9dd288448","src/timer/registration.rs":"00fb086b046fa44af5812cea144fc6d2cba9bf2ed5cdd7e4bdf7969ec29da0c5","tests/clock.rs":"102180344fae71730d4628a30ec7d298a07a4fae5c1a711f0a8b274219eab362","tests/deadline.rs":"838db280eb258ebeed5d73f2b7ac7204f334acc614ef831df48edac81ac278d2","tests/delay.rs":"13be260ebabf13790070a0a3a0c9b20d1cb7a769b5103685379ab08c6d3abb1c","tests/hammer.rs":"ed077d709d26ceb53ec17715aa2809707e13b75436896b78a859c89eca703680","tests/interval.rs":"b591afd43043d1bb20bc19edb23c0f43381b5797219e609c2daac685e2982e3e","tests/support/mod.rs":"01066cb62f594f37f5c7f6f453a56b128d5b623210d2e308732b5b4e8815fbb5"},"package":"1c76b4e97a4f61030edff8bd272364e4f731b9f54c7307eb4eb733c3926eb96a"} \ No newline at end of file diff --git a/third_party/rust/tokio-timer/CHANGELOG.md b/third_party/rust/tokio-timer/CHANGELOG.md new file mode 100644 index 000000000000..653a2856c0b4 --- /dev/null +++ b/third_party/rust/tokio-timer/CHANGELOG.md @@ -0,0 +1,38 @@ +# 0.2.5 (August 6, 2018) + +* Add `Interval::interval` shortcut (#492). + +# 0.2.4 (June 6, 2018) + +* Add `sleep` function for easy interval delays (#347). +* Provide `clock::now()`, a configurable source of time (#381). + +# 0.2.3 (May 2, 2018) + +* Improve parking semantics (#327). + +# 0.2.2 (Skipped due to failure in counting module) + +# 0.2.1 (April 2, 2018) + +* Fix build on 32-bit systems (#274). + +# 0.2.0 (March 30, 2018) + +* Rewrite from scratch using a hierarchical wheel strategy (#249). + +# 0.1.2 (Jun 27, 2017) + +* Allow naming timer thread. +* Track changes in dependencies. + +# 0.1.1 (Apr 6, 2017) + +* Set Rust v1.14 as the minimum supported version. +* Fix bug related to intervals. +* Impl `PartialEq + Eq` for TimerError. +* Add `Debug` implementations. + +# 0.1.0 (Jan 11, 2017) + +* Initial Release diff --git a/third_party/rust/tokio-timer/Cargo.toml b/third_party/rust/tokio-timer/Cargo.toml new file mode 100644 index 000000000000..986bbc120ddc --- /dev/null +++ b/third_party/rust/tokio-timer/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-timer" +version = "0.2.5" +authors = ["Carl Lerche "] +description = "Timer facilities for Tokio\n" +homepage = "https://github.com/tokio-rs/tokio" +documentation = "https://docs.rs/tokio-timer" +readme = "README.md" +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.futures] +version = "0.1.19" + +[dependencies.tokio-executor] +version = "0.1.1" +[dev-dependencies.rand] +version = "0.4.2" diff --git a/third_party/rust/tokio-timer/LICENSE b/third_party/rust/tokio-timer/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-timer/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-timer/README.md b/third_party/rust/tokio-timer/README.md new file mode 100644 index 000000000000..8ccdc06a8d5f --- /dev/null +++ b/third_party/rust/tokio-timer/README.md @@ -0,0 +1,19 @@ +# tokio-timer + +Timer facilities for Tokio + +[Documentation](https://tokio-rs.github.io/tokio/tokio_timer/) + +## Overview + +This crate provides timer facilities for usage with Tokio. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-timer/src/atomic.rs b/third_party/rust/tokio-timer/src/atomic.rs new file mode 100644 index 000000000000..060fed5162ae --- /dev/null +++ b/third_party/rust/tokio-timer/src/atomic.rs @@ -0,0 +1,88 @@ +//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a wrapper +//! around `AtomicUsize`. On 32 bit platforms, this is implemented using a +//! `Mutex`. +//! +//! This file can be removed if/when `AtomicU64` lands in `std`. + +pub use self::imp::AtomicU64; + +#[cfg(target_pointer_width = "64")] +mod imp { + use std::sync::atomic::{AtomicUsize, Ordering}; + + #[derive(Debug)] + pub struct AtomicU64 { + inner: AtomicUsize, + } + + impl AtomicU64 { + pub fn new(val: u64) -> AtomicU64 { + AtomicU64 { + inner: AtomicUsize::new(val as usize), + } + } + + pub fn load(&self, ordering: Ordering) -> u64 { + self.inner.load(ordering) as u64 + } + + pub fn store(&self, val: u64, ordering: Ordering) { + self.inner.store(val as usize, ordering) + } + + pub fn fetch_or(&self, val: u64, ordering: Ordering) -> u64 { + self.inner.fetch_or(val as usize, ordering) as u64 + } + + pub fn compare_and_swap(&self, old: u64, new: u64, ordering: Ordering) -> u64 { + self.inner.compare_and_swap( + old as usize, new as usize, ordering) as u64 + } + } +} + +#[cfg(not(target_pointer_width = "64"))] +mod imp { + use std::sync::Mutex; + use std::sync::atomic::Ordering; + + #[derive(Debug)] + pub struct AtomicU64 { + inner: Mutex, + } + + impl AtomicU64 { + pub fn new(val: u64) -> AtomicU64 { + AtomicU64 { + inner: Mutex::new(val), + } + } + + pub fn load(&self, _: Ordering) -> u64 { + *self.inner.lock().unwrap() + } + + pub fn store(&self, val: u64, _: Ordering) { + *self.inner.lock().unwrap() = val; + } + + pub fn fetch_or(&self, val: u64, _: Ordering) -> u64 { + let mut lock = self.inner.lock().unwrap(); + let prev = *lock; + *lock = prev | val; + prev + } + + pub fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 { + let mut lock = self.inner.lock().unwrap(); + let prev = *lock; + + if prev != old { + return prev; + } + + *lock = new; + prev + } + } +} diff --git a/third_party/rust/tokio-timer/src/clock/clock.rs b/third_party/rust/tokio-timer/src/clock/clock.rs new file mode 100644 index 000000000000..2236bc63f124 --- /dev/null +++ b/third_party/rust/tokio-timer/src/clock/clock.rs @@ -0,0 +1,138 @@ +use clock::Now; +use timer; + +use tokio_executor::Enter; + +use std::cell::Cell; +use std::fmt; +use std::sync::Arc; +use std::time::Instant; + +/// A handle to a source of time. +/// +/// `Clock` instances return `Instant` values corresponding to "now". The source +/// of these values is configurable. The default source is `Instant::now()`. +#[derive(Default, Clone)] +pub struct Clock { + now: Option>, +} + +/// Thread-local tracking the current clock +thread_local!(static CLOCK: Cell> = Cell::new(None)); + +/// Returns an `Instant` corresponding to "now". +/// +/// This function delegates to the source of time configured for the current +/// execution context. By default, this is `Instant::now()`. +/// +/// Note that, because the source of time is configurable, it is possible to +/// observe non-monotonic behavior when calling `now` from different +/// executors. +/// +/// See [module](index.html) level documentation for more details. +/// +/// # Examples +/// +/// ``` +/// # use tokio_timer::clock; +/// let now = clock::now(); +/// ``` +pub fn now() -> Instant { + CLOCK.with(|current| { + match current.get() { + Some(ptr) => { + unsafe { (*ptr).now() } + } + None => Instant::now(), + } + }) +} + +impl Clock { + /// Return a new `Clock` instance that uses the current execution context's + /// source of time. + pub fn new() -> Clock { + CLOCK.with(|current| { + match current.get() { + Some(ptr) => { + unsafe { (*ptr).clone() } + } + None => Clock::system(), + } + }) + } + + /// Return a new `Clock` instance that uses `now` as the source of time. + pub fn new_with_now(now: T) -> Clock { + Clock { + now: Some(Arc::new(now)), + } + } + + /// Return a new `Clock` instance that uses `Instant::now()` as the source + /// of time. + pub fn system() -> Clock { + Clock { + now: None, + } + } + + /// Returns an instant corresponding to "now" by using the instance's source + /// of time. + pub fn now(&self) -> Instant { + match self.now { + Some(ref now) => now.now(), + None => Instant::now(), + } + } +} + +#[allow(deprecated)] +impl timer::Now for Clock { + fn now(&mut self) -> Instant { + Clock::now(self) + } +} + +impl fmt::Debug for Clock { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Clock") + .field("now", { + if self.now.is_some() { + &"Some(Arc)" + } else { + &"None" + } + }) + .finish() + } +} + +/// Set the default clock for the duration of the closure. +/// +/// # Panics +/// +/// This function panics if there already is a default clock set. +pub fn with_default(clock: &Clock, enter: &mut Enter, f: F) -> R +where F: FnOnce(&mut Enter) -> R +{ + CLOCK.with(|cell| { + assert!(cell.get().is_none(), "default clock already set for execution context"); + + // Ensure that the clock is removed from the thread-local context + // when leaving the scope. This handles cases that involve panicking. + struct Reset<'a>(&'a Cell>); + + impl<'a> Drop for Reset<'a> { + fn drop(&mut self) { + self.0.set(None); + } + } + + let _reset = Reset(cell); + + cell.set(Some(clock as *const Clock)); + + f(enter) + }) +} diff --git a/third_party/rust/tokio-timer/src/clock/mod.rs b/third_party/rust/tokio-timer/src/clock/mod.rs new file mode 100644 index 000000000000..527a6818ce7c --- /dev/null +++ b/third_party/rust/tokio-timer/src/clock/mod.rs @@ -0,0 +1,22 @@ +//! A configurable source of time. +//! +//! This module provides an API to get the current instant in such a way that +//! the source of time may be configured. This allows mocking out the source of +//! time in tests. +//! +//! The [`now`][n] function returns the current `Instant`. By default, it delegates +//! to [`Instant::now`][std]. +//! +//! The source of time used by [`now`][n] can be configured by implementing the +//! [`Now`] trait and passing an instance to [`with_default`]. +//! +//! [n]: fn.now.html +//! [`Now`]: trait.Now.html +//! [std]: https://doc.rust-lang.org/std/time/struct.Instant.html +//! [`with_default`]: fn.with_default.html + +mod clock; +mod now; + +pub use self::clock::{Clock, now, with_default}; +pub use self::now::Now; diff --git a/third_party/rust/tokio-timer/src/clock/now.rs b/third_party/rust/tokio-timer/src/clock/now.rs new file mode 100644 index 000000000000..65472543c10d --- /dev/null +++ b/third_party/rust/tokio-timer/src/clock/now.rs @@ -0,0 +1,13 @@ +use std::time::Instant; + +/// Returns `Instant` values representing the current instant in time. +/// +/// This allows customizing the source of time which is especially useful for +/// testing. +/// +/// Implementations must ensure that calls to `now` return monotonically +/// increasing `Instant` values. +pub trait Now: Send + Sync + 'static { + /// Returns an instant corresponding to "now". + fn now(&self) -> Instant; +} diff --git a/third_party/rust/tokio-timer/src/deadline.rs b/third_party/rust/tokio-timer/src/deadline.rs new file mode 100644 index 000000000000..05ff827c53fc --- /dev/null +++ b/third_party/rust/tokio-timer/src/deadline.rs @@ -0,0 +1,181 @@ +use Delay; + +use futures::{Future, Poll, Async}; + +use std::error; +use std::fmt; +use std::time::Instant; + +/// Allows a given `Future` to execute until the specified deadline. +/// +/// If the inner future completes before the deadline is reached, then +/// `Deadline` completes with that value. Otherwise, `Deadline` completes with a +/// [`DeadlineError`]. +/// +/// [`DeadlineError`]: struct.DeadlineError.html +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct Deadline { + future: T, + delay: Delay, +} + +/// Error returned by `Deadline` future. +#[derive(Debug)] +pub struct DeadlineError(Kind); + +/// Deadline error variants +#[derive(Debug)] +enum Kind { + /// Inner future returned an error + Inner(T), + + /// The deadline elapsed. + Elapsed, + + /// Timer returned an error. + Timer(::Error), +} + +impl Deadline { + /// Create a new `Deadline` that completes when `future` completes or when + /// `deadline` is reached. + pub fn new(future: T, deadline: Instant) -> Deadline { + Deadline::new_with_delay(future, Delay::new(deadline)) + } + + pub(crate) fn new_with_delay(future: T, delay: Delay) -> Deadline { + Deadline { + future, + delay, + } + } + + /// Gets a reference to the underlying future in this deadline. + pub fn get_ref(&self) -> &T { + &self.future + } + + /// Gets a mutable reference to the underlying future in this deadline. + pub fn get_mut(&mut self) -> &mut T { + &mut self.future + } + + /// Consumes this deadline, returning the underlying future. + pub fn into_inner(self) -> T { + self.future + } +} + +impl Future for Deadline +where T: Future, +{ + type Item = T::Item; + type Error = DeadlineError; + + fn poll(&mut self) -> Poll { + // First, try polling the future + match self.future.poll() { + Ok(Async::Ready(v)) => return Ok(Async::Ready(v)), + Ok(Async::NotReady) => {} + Err(e) => return Err(DeadlineError::inner(e)), + } + + // Now check the timer + match self.delay.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(_)) => { + Err(DeadlineError::elapsed()) + }, + Err(e) => Err(DeadlineError::timer(e)), + } + } +} + +// ===== impl DeadlineError ===== + +impl DeadlineError { + /// Create a new `DeadlineError` representing the inner future completing + /// with `Err`. + pub fn inner(err: T) -> DeadlineError { + DeadlineError(Kind::Inner(err)) + } + + /// Returns `true` if the error was caused by the inner future completing + /// with `Err`. + pub fn is_inner(&self) -> bool { + match self.0 { + Kind::Inner(_) => true, + _ => false, + } + } + + /// Consumes `self`, returning the inner future error. + pub fn into_inner(self) -> Option { + match self.0 { + Kind::Inner(err) => Some(err), + _ => None, + } + } + + /// Create a new `DeadlineError` representing the inner future not + /// completing before the deadline is reached. + pub fn elapsed() -> DeadlineError { + DeadlineError(Kind::Elapsed) + } + + /// Returns `true` if the error was caused by the inner future not + /// completing before the deadline is reached. + pub fn is_elapsed(&self) -> bool { + match self.0 { + Kind::Elapsed => true, + _ => false, + } + } + + /// Creates a new `DeadlineError` representing an error encountered by the + /// timer implementation + pub fn timer(err: ::Error) -> DeadlineError { + DeadlineError(Kind::Timer(err)) + } + + /// Returns `true` if the error was caused by the timer. + pub fn is_timer(&self) -> bool { + match self.0 { + Kind::Timer(_) => true, + _ => false, + } + } + + /// Consumes `self`, returning the error raised by the timer implementation. + pub fn into_timer(self) -> Option<::Error> { + match self.0 { + Kind::Timer(err) => Some(err), + _ => None, + } + } +} + +impl error::Error for DeadlineError { + fn description(&self) -> &str { + use self::Kind::*; + + match self.0 { + Inner(ref e) => e.description(), + Elapsed => "deadline has elapsed", + Timer(ref e) => e.description(), + } + } +} + +impl fmt::Display for DeadlineError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::Kind::*; + + match self.0 { + Inner(ref e) => e.fmt(fmt), + Elapsed => "deadline has elapsed".fmt(fmt), + Timer(ref e) => e.fmt(fmt), + } + } +} diff --git a/third_party/rust/tokio-timer/src/delay.rs b/third_party/rust/tokio-timer/src/delay.rs new file mode 100644 index 000000000000..104fecff076a --- /dev/null +++ b/third_party/rust/tokio-timer/src/delay.rs @@ -0,0 +1,108 @@ +use Error; +use timer::Registration; + +use futures::{Future, Poll}; + +use std::time::Instant; + +/// A future that completes at a specified instant in time. +/// +/// Instances of `Delay` perform no work and complete with `()` once the +/// specified deadline has been reached. +/// +/// `Delay` has a resolution of one millisecond and should not be used for tasks +/// that require high-resolution timers. +/// +/// [`new`]: #method.new +#[derive(Debug)] +pub struct Delay { + /// The instant at which the future completes. + deadline: Instant, + + /// The link between the `Delay` instance at the timer that drives it. + /// + /// When `Delay` is created with `new`, this is initialized to `None` and is + /// lazily set in `poll`. When `poll` is called, the default for the current + /// execution context is used (obtained via `Handle::current`). + /// + /// When `delay` is created with `new_with_registration`, the value is set. + /// + /// Once `registration` is set to `Some`, it is never changed. + registration: Option, +} + +impl Delay { + /// Create a new `Delay` instance that elapses at `deadline`. + /// + /// Only millisecond level resolution is guaranteed. There is no guarantee + /// as to how the sub-millisecond portion of `deadline` will be handled. + /// `Delay` should not be used for high-resolution timer use cases. + pub fn new(deadline: Instant) -> Delay { + Delay { + deadline, + registration: None, + } + } + + pub(crate) fn new_with_registration( + deadline: Instant, + registration: Registration) -> Delay + { + Delay { + deadline, + registration: Some(registration), + } + } + + /// Returns the instant at which the future will complete. + pub fn deadline(&self) -> Instant { + self.deadline + } + + /// Returns true if the `Delay` has elapsed + /// + /// A `Delay` is elapsed when the requested duration has elapsed. + pub fn is_elapsed(&self) -> bool { + self.registration.as_ref() + .map(|r| r.is_elapsed()) + .unwrap_or(false) + } + + /// Reset the `Delay` instance to a new deadline. + /// + /// Calling this function allows changing the instant at which the `Delay` + /// future completes without having to create new associated state. + /// + /// This function can be called both before and after the future has + /// completed. + pub fn reset(&mut self, deadline: Instant) { + self.deadline = deadline; + + if let Some(registration) = self.registration.as_ref() { + registration.reset(deadline); + } + } + + /// Register the delay with the timer instance for the current execution + /// context. + fn register(&mut self) { + if self.registration.is_some() { + return; + } + + self.registration = Some(Registration::new(self.deadline)); + } +} + +impl Future for Delay { + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll { + // Ensure the `Delay` instance is associated with a timer. + self.register(); + + self.registration.as_ref().unwrap() + .poll_elapsed() + } +} diff --git a/third_party/rust/tokio-timer/src/error.rs b/third_party/rust/tokio-timer/src/error.rs new file mode 100644 index 000000000000..a6136d8b55d0 --- /dev/null +++ b/third_party/rust/tokio-timer/src/error.rs @@ -0,0 +1,78 @@ +use self::Kind::*; + +use std::error; +use std::fmt; + +/// Errors encountered by the timer implementation. +/// +/// Currently, there are two different errors that can occur: +/// +/// * `shutdown` occurs when a timer operation is attempted, but the timer +/// instance has been dropped. In this case, the operation will never be able +/// to complete and the `shutdown` error is returned. This is a permanent +/// error, i.e., once this error is observed, timer operations will never +/// succeed in the future. +/// +/// * `at_capacity` occurs when a timer operation is attempted, but the timer +/// instance is currently handling its maximum number of outstanding delays. +/// In this case, the operation is not able to be performed at the current +/// moment, and `at_capacity` is returned. This is a transient error, i.e., at +/// some point in the future, if the operation is attempted again, it might +/// succeed. Callers that observe this error should attempt to [shed load]. One +/// way to do this would be dropping the future that issued the timer operation. +/// +/// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding +#[derive(Debug)] +pub struct Error(Kind); + +#[derive(Debug)] +enum Kind { + Shutdown, + AtCapacity, +} + +impl Error { + /// Create an error representing a shutdown timer. + pub fn shutdown() -> Error { + Error(Shutdown) + } + + /// Returns `true` if the error was caused by the timer being shutdown. + pub fn is_shutdown(&self) -> bool { + match self.0 { + Kind::Shutdown => true, + _ => false, + } + } + + /// Create an error representing a timer at capacity. + pub fn at_capacity() -> Error { + Error(AtCapacity) + } + + /// Returns `true` if the error was caused by the timer being at capacity. + pub fn is_at_capacity(&self) -> bool { + match self.0 { + Kind::AtCapacity => true, + _ => false, + } + } +} + +impl error::Error for Error { + fn description(&self) -> &str { + use self::Kind::*; + + match self.0 { + Shutdown => "timer is shutdown", + AtCapacity => "timer is at capacity and cannot create a new entry", + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use std::error::Error; + self.description().fmt(fmt) + } +} diff --git a/third_party/rust/tokio-timer/src/interval.rs b/third_party/rust/tokio-timer/src/interval.rs new file mode 100644 index 000000000000..80d09600ea37 --- /dev/null +++ b/third_party/rust/tokio-timer/src/interval.rs @@ -0,0 +1,75 @@ +use Delay; + +use clock; + +use futures::{Future, Stream, Poll}; + +use std::time::{Instant, Duration}; + +/// A stream representing notifications at fixed interval +#[derive(Debug)] +pub struct Interval { + /// Future that completes the next time the `Interval` yields a value. + delay: Delay, + + /// The duration between values yielded by `Interval`. + duration: Duration, +} + +impl Interval { + /// Create a new `Interval` that starts at `at` and yields every `duration` + /// interval after that. + /// + /// Note that when it starts, it produces item too. + /// + /// The `duration` argument must be a non-zero duration. + /// + /// # Panics + /// + /// This function panics if `duration` is zero. + pub fn new(at: Instant, duration: Duration) -> Interval { + assert!(duration > Duration::new(0, 0), "`duration` must be non-zero."); + + Interval::new_with_delay(Delay::new(at), duration) + } + + /// Creates new `Interval` that yields with interval of `duration`. + /// + /// The function is shortcut for `Interval::new(Instant::now() + duration, duration)`. + /// + /// The `duration` argument must be a non-zero duration. + /// + /// # Panics + /// + /// This function panics if `duration` is zero. + pub fn new_interval(duration: Duration) -> Interval { + Interval::new(clock::now() + duration, duration) + } + + pub(crate) fn new_with_delay(delay: Delay, duration: Duration) -> Interval { + Interval { + delay, + duration, + } + } +} + +impl Stream for Interval { + type Item = Instant; + type Error = ::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + // Wait for the delay to be done + let _ = try_ready!(self.delay.poll()); + + // Get the `now` by looking at the `delay` deadline + let now = self.delay.deadline(); + + // The next interval value is `duration` after the one that just + // yielded. + self.delay.reset(now + self.duration); + + // Return the current instant + Ok(Some(now).into()) + } +} diff --git a/third_party/rust/tokio-timer/src/lib.rs b/third_party/rust/tokio-timer/src/lib.rs new file mode 100644 index 000000000000..754f9aa88b45 --- /dev/null +++ b/third_party/rust/tokio-timer/src/lib.rs @@ -0,0 +1,49 @@ +//! Utilities for scheduling work to happen after a period of time. +//! +//! This crate provides a number of utilities for working with periods of time: +//! +//! * [`Delay`]: A future that completes at a specified instant in time. +//! +//! * [`Interval`] A stream that yields at fixed time intervals. +//! +//! * [`Deadline`]: Wraps a future, requiring it to complete before a specified +//! instant in time, erroring if the future takes too long. +//! +//! These three types are backed by a [`Timer`] instance. In order for +//! [`Delay`], [`Interval`], and [`Deadline`] to function, the associated +//! [`Timer`] instance must be running on some thread. +//! +//! [`Delay`]: struct.Delay.html +//! [`Deadline`]: struct.Deadline.html +//! [`Interval`]: struct.Interval.html +//! [`Timer`]: timer/struct.Timer.html + +#![doc(html_root_url = "https://docs.rs/tokio-timer/0.2.5")] +#![deny(missing_docs, warnings, missing_debug_implementations)] + +extern crate tokio_executor; + +#[macro_use] +extern crate futures; + +pub mod clock; +pub mod timer; + +mod atomic; +mod deadline; +mod delay; +mod error; +mod interval; + +use std::time::{Duration, Instant}; + +pub use self::deadline::{Deadline, DeadlineError}; +pub use self::delay::Delay; +pub use self::error::Error; +pub use self::interval::Interval; +pub use self::timer::{with_default, Timer}; + +/// Create a Future that completes in `duration` from now. +pub fn sleep(duration: Duration) -> Delay { + Delay::new(Instant::now() + duration) +} diff --git a/third_party/rust/tokio-timer/src/timer/entry.rs b/third_party/rust/tokio-timer/src/timer/entry.rs new file mode 100644 index 000000000000..3cea9fe61b02 --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/entry.rs @@ -0,0 +1,559 @@ +use Error; +use atomic::AtomicU64; +use timer::{Handle, Inner}; + +use futures::Poll; +use futures::task::AtomicTask; + +use std::cell::UnsafeCell; +use std::ptr; +use std::sync::{Arc, Weak}; +use std::sync::atomic::{AtomicBool, AtomicPtr}; +use std::sync::atomic::Ordering::SeqCst; +use std::time::Instant; +use std::u64; + +/// Internal state shared between a `Delay` instance and the timer. +/// +/// This struct is used as a node in two intrusive data structures: +/// +/// * An atomic stack used to signal to the timer thread that the entry state +/// has changed. The timer thread will observe the entry on this stack and +/// perform any actions as necessary. +/// +/// * A doubly linked list used **only** by the timer thread. Each slot in the +/// timer wheel is a head pointer to the list of entries that must be +/// processed during that timer tick. +#[derive(Debug)] +pub(crate) struct Entry { + /// Timer internals. Using a weak pointer allows the timer to shutdown + /// without all `Delay` instances having completed. + inner: Weak, + + /// Task to notify once the deadline is reached. + task: AtomicTask, + + /// Tracks the entry state. This value contains the following information: + /// + /// * The deadline at which the entry must be "fired". + /// * A flag indicating if the entry has already been fired. + /// * Whether or not the entry transitioned to the error state. + /// + /// When an `Entry` is created, `state` is initialized to the instant at + /// which the entry must be fired. When a timer is reset to a different + /// instant, this value is changed. + state: AtomicU64, + + /// When true, the entry is counted by `Inner` towards the max outstanding + /// timeouts. The drop fn uses this to know if it should decrement the + /// counter. + /// + /// One might think that it would be easier to just not create the `Entry`. + /// The problem is that `Delay` expects creating a `Registration` to always + /// return a `Registration` instance. This simplifying factor allows it to + /// improve the struct layout. To do this, we must always allocate the node. + counted: bool, + + /// True when the entry is queued in the "process" stack. This value + /// is set before pushing the value and unset after popping the value. + queued: AtomicBool, + + /// Next entry in the "process" linked list. + /// + /// Represents a strong Arc ref. + next_atomic: UnsafeCell<*mut Entry>, + + /// When the entry expires, relative to the `start` of the timer + /// (Inner::start). This is only used by the timer. + /// + /// A `Delay` instance can be reset to a different deadline by the thread + /// that owns the `Delay` instance. In this case, the timer thread will not + /// immediately know that this has happened. The timer thread must know the + /// last deadline that it saw as it uses this value to locate the entry in + /// its wheel. + /// + /// Once the timer thread observes that the instant has changed, it updates + /// the wheel and sets this value. The idea is that this value eventually + /// converges to the value of `state` as the timer thread makes updates. + when: UnsafeCell>, + + /// Next entry in the State's linked list. + /// + /// This is only accessed by the timer + next_stack: UnsafeCell>>, + + /// Previous entry in the State's linked list. + /// + /// This is only accessed by the timer and is used to unlink a canceled + /// entry. + /// + /// This is a weak reference. + prev_stack: UnsafeCell<*const Entry>, +} + +/// A doubly linked stack +pub(crate) struct Stack { + head: Option>, +} + +/// A stack of `Entry` nodes +#[derive(Debug)] +pub(crate) struct AtomicStack { + /// Stack head + head: AtomicPtr, +} + +/// Entries that were removed from the stack +#[derive(Debug)] +pub(crate) struct AtomicStackEntries { + ptr: *mut Entry, +} + +/// Flag indicating a timer entry has elapsed +const ELAPSED: u64 = 1 << 63; + +/// Flag indicating a timer entry has reached an error state +const ERROR: u64 = u64::MAX; + +/// Used to indicate that the timer has shutdown. +const SHUTDOWN: *mut Entry = 1 as *mut _; + +// ===== impl Entry ===== + +impl Entry { + pub fn new(when: u64, handle: Handle) -> Entry { + assert!(when > 0 && when < u64::MAX); + + Entry { + inner: handle.into_inner(), + task: AtomicTask::new(), + state: AtomicU64::new(when), + counted: true, + queued: AtomicBool::new(false), + next_atomic: UnsafeCell::new(ptr::null_mut()), + when: UnsafeCell::new(None), + next_stack: UnsafeCell::new(None), + prev_stack: UnsafeCell::new(ptr::null_mut()), + } + } + + pub fn new_elapsed(handle: Handle) -> Entry { + Entry { + inner: handle.into_inner(), + task: AtomicTask::new(), + state: AtomicU64::new(ELAPSED), + counted: true, + queued: AtomicBool::new(false), + next_atomic: UnsafeCell::new(ptr::null_mut()), + when: UnsafeCell::new(None), + next_stack: UnsafeCell::new(None), + prev_stack: UnsafeCell::new(ptr::null_mut()), + } + } + + /// Create a new `Entry` that is in the error state. Calling `poll_elapsed` on + /// this `Entry` will always result in `Err` being returned. + pub fn new_error() -> Entry { + Entry { + inner: Weak::new(), + task: AtomicTask::new(), + state: AtomicU64::new(ERROR), + counted: false, + queued: AtomicBool::new(false), + next_atomic: UnsafeCell::new(ptr::null_mut()), + when: UnsafeCell::new(None), + next_stack: UnsafeCell::new(None), + prev_stack: UnsafeCell::new(ptr::null_mut()), + } + } + + /// The current entry state as known by the timer. This is not the value of + /// `state`, but lets the timer know how to converge its state to `state`. + pub fn when_internal(&self) -> Option { + unsafe { (*self.when.get()) } + } + + pub fn set_when_internal(&self, when: Option) { + unsafe { (*self.when.get()) = when; } + } + + /// Called by `Timer` to load the current value of `state` for processing + pub fn load_state(&self) -> Option { + let state = self.state.load(SeqCst); + + if is_elapsed(state) { + None + } else { + Some(state) + } + } + + pub fn is_elapsed(&self) -> bool { + let state = self.state.load(SeqCst); + is_elapsed(state) + } + + pub fn fire(&self, when: u64) { + let mut curr = self.state.load(SeqCst); + + loop { + if is_elapsed(curr) || curr > when { + return; + } + + let next = ELAPSED | curr; + let actual = self.state.compare_and_swap(curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + self.task.notify(); + } + + pub fn error(&self) { + // Only transition to the error state if not currently elapsed + let mut curr = self.state.load(SeqCst); + + loop { + if is_elapsed(curr) { + return; + } + + let next = ERROR; + + let actual = self.state.compare_and_swap(curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + self.task.notify(); + } + + pub fn cancel(entry: &Arc) { + let state = entry.state.fetch_or(ELAPSED, SeqCst); + + if is_elapsed(state) { + // Nothing more to do + return; + } + + let inner = match entry.inner.upgrade() { + Some(inner) => inner, + None => return, + }; + + let _ = inner.queue(entry); + } + + pub fn poll_elapsed(&self) -> Poll<(), Error> { + use futures::Async::NotReady; + + let mut curr = self.state.load(SeqCst); + + if is_elapsed(curr) { + if curr == ERROR { + return Err(Error::shutdown()); + } else { + return Ok(().into()); + } + } + + self.task.register(); + + curr = self.state.load(SeqCst).into(); + + if is_elapsed(curr) { + if curr == ERROR { + return Err(Error::shutdown()); + } else { + return Ok(().into()); + } + } + + Ok(NotReady) + } + + pub fn reset(entry: &Arc, deadline: Instant) { + let inner = match entry.inner.upgrade() { + Some(inner) => inner, + None => return, + }; + + let when = inner.normalize_deadline(deadline); + let elapsed = inner.elapsed(); + + let mut curr = entry.state.load(SeqCst); + let mut notify; + + loop { + // In these two cases, there is no work to do when resetting the + // timer. If the `Entry` is in an error state, then it cannot be + // used anymore. If resetting the entry to the current value, then + // the reset is a noop. + if curr == ERROR || curr == when { + return; + } + + let next; + + if when <= elapsed { + next = ELAPSED; + notify = !is_elapsed(curr); + } else { + next = when; + notify = true; + } + + let actual = entry.state.compare_and_swap( + curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + if notify { + let _ = inner.queue(entry); + } + } +} + +fn is_elapsed(state: u64) -> bool { + state & ELAPSED == ELAPSED +} + +impl Drop for Entry { + fn drop(&mut self) { + if !self.counted { + return; + } + + let inner = match self.inner.upgrade() { + Some(inner) => inner, + None => return, + }; + + inner.decrement(); + } +} + +unsafe impl Send for Entry {} +unsafe impl Sync for Entry {} + +// ===== impl Stack ===== + +impl Stack { + pub fn new() -> Stack { + Stack { head: None } + } + + pub fn is_empty(&self) -> bool { + self.head.is_none() + } + + /// Push an entry to the head of the linked list + pub fn push(&mut self, entry: Arc) { + // Get a pointer to the entry to for the prev link + let ptr: *const Entry = &*entry as *const _; + + // Remove the old head entry + let old = self.head.take(); + + unsafe { + // Ensure the entry is not already in a stack. + debug_assert!((*entry.next_stack.get()).is_none()); + debug_assert!((*entry.prev_stack.get()).is_null()); + + if let Some(ref entry) = old.as_ref() { + debug_assert!({ + // The head is not already set to the entry + ptr != &***entry as *const _ + }); + + // Set the previous link on the old head + *entry.prev_stack.get() = ptr; + } + + // Set this entry's next pointer + *entry.next_stack.get() = old; + + } + + // Update the head pointer + self.head = Some(entry); + } + + /// Pop the head of the linked list + pub fn pop(&mut self) -> Option> { + let entry = self.head.take(); + + unsafe { + if let Some(entry) = entry.as_ref() { + self.head = (*entry.next_stack.get()).take(); + + if let Some(entry) = self.head.as_ref() { + *entry.prev_stack.get() = ptr::null(); + } + + *entry.prev_stack.get() = ptr::null(); + } + } + + entry + } + + /// Remove the entry from the linked list + /// + /// The caller must ensure that the entry actually is contained by the list. + pub fn remove(&mut self, entry: &Entry) { + unsafe { + // Ensure that the entry is in fact contained by the stack + debug_assert!({ + // This walks the full linked list even if an entry is found. + let mut next = self.head.as_ref(); + let mut contains = false; + + while let Some(n) = next { + if entry as *const _ == &**n as *const _ { + debug_assert!(!contains); + contains = true; + } + + next = (*n.next_stack.get()).as_ref(); + } + + contains + }); + + // Unlink `entry` from the next node + let next = (*entry.next_stack.get()).take(); + + if let Some(next) = next.as_ref() { + (*next.prev_stack.get()) = *entry.prev_stack.get(); + } + + // Unlink `entry` from the prev node + + if let Some(prev) = (*entry.prev_stack.get()).as_ref() { + *prev.next_stack.get() = next; + } else { + // It is the head + self.head = next; + } + + // Unset the prev pointer + *entry.prev_stack.get() = ptr::null(); + } + } +} + +// ===== impl AtomicStack ===== + +impl AtomicStack { + pub fn new() -> AtomicStack { + AtomicStack { head: AtomicPtr::new(ptr::null_mut()) } + } + + /// Push an entry onto the stack. + /// + /// Returns `true` if the entry was pushed, `false` if the entry is already + /// on the stack, `Err` if the timer is shutdown. + pub fn push(&self, entry: &Arc) -> Result { + // First, set the queued bit on the entry + let queued = entry.queued.fetch_or(true, SeqCst).into(); + + if queued { + // Already queued, nothing more to do + return Ok(false); + } + + let ptr = Arc::into_raw(entry.clone()) as *mut _; + + let mut curr = self.head.load(SeqCst); + + loop { + if curr == SHUTDOWN { + // Don't leak the entry node + let _ = unsafe { Arc::from_raw(ptr) }; + + return Err(Error::shutdown()); + } + + // Update the `next` pointer. This is safe because setting the queued + // bit is a "lock" on this field. + unsafe { + *(entry.next_atomic.get()) = curr; + } + + let actual = self.head.compare_and_swap(curr, ptr, SeqCst); + + if actual == curr { + break; + } + + curr = actual; + } + + Ok(true) + } + + /// Take all entries from the stack + pub fn take(&self) -> AtomicStackEntries { + let ptr = self.head.swap(ptr::null_mut(), SeqCst); + AtomicStackEntries { ptr } + } + + /// Drain all remaining nodes in the stack and prevent any new nodes from + /// being pushed onto the stack. + pub fn shutdown(&self) { + // Shutdown the processing queue + let ptr = self.head.swap(SHUTDOWN, SeqCst); + + // Let the drop fn of `AtomicStackEntries` handle draining the stack + drop(AtomicStackEntries { ptr }); + } +} + +// ===== impl AtomicStackEntries ===== + +impl Iterator for AtomicStackEntries { + type Item = Arc; + + fn next(&mut self) -> Option { + if self.ptr.is_null() { + return None; + } + + // Convert the pointer to an `Arc` + let entry = unsafe { Arc::from_raw(self.ptr) }; + + // Update `self.ptr` to point to the next element of the stack + self.ptr = unsafe { (*entry.next_atomic.get()) }; + + // Unset the queued flag + let res = entry.queued.fetch_and(false, SeqCst); + debug_assert!(res); + + // Return the entry + Some(entry) + } +} + +impl Drop for AtomicStackEntries { + fn drop(&mut self) { + while let Some(entry) = self.next() { + // Flag the entry as errored + entry.error(); + } + } +} diff --git a/third_party/rust/tokio-timer/src/timer/handle.rs b/third_party/rust/tokio-timer/src/timer/handle.rs new file mode 100644 index 000000000000..d06ece1f2c10 --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/handle.rs @@ -0,0 +1,128 @@ +use {Error, Delay, Deadline, Interval}; +use timer::{Registration, Inner}; + +use tokio_executor::Enter; + +use std::cell::RefCell; +use std::sync::{Arc, Weak}; +use std::time::{Duration, Instant}; + +/// Handle to timer instance. +/// +/// The `Handle` allows creating `Delay` instances that are driven by the +/// associated timer. +/// +/// A `Handle` is obtained by calling [`Timer::handle`]. +/// +/// [`Timer::handle`]: struct.Timer.html#method.handle +#[derive(Debug, Clone)] +pub struct Handle { + inner: Weak, +} + +/// Tracks the timer for the current execution context. +thread_local!(static CURRENT_TIMER: RefCell> = RefCell::new(None)); + +/// Set the default timer for the duration of the closure. +/// +/// From within the closure, [`Delay`] instances that are created via +/// [`Delay::new`] can be used. +/// +/// # Panics +/// +/// This function panics if there already is a default timer set. +/// +/// [`Delay`]: ../struct.Delay.html +/// [`Delay::new`]: ../struct.Delay.html#method.new +pub fn with_default(handle: &Handle, enter: &mut Enter, f: F) -> R +where F: FnOnce(&mut Enter) -> R +{ + // Ensure that the timer is removed from the thread-local context + // when leaving the scope. This handles cases that involve panicking. + struct Reset; + + impl Drop for Reset { + fn drop(&mut self) { + CURRENT_TIMER.with(|current| { + let mut current = current.borrow_mut(); + *current = None; + }); + } + } + + // This ensures the value for the current timer gets reset even if there is + // a panic. + let _r = Reset; + + CURRENT_TIMER.with(|current| { + { + let mut current = current.borrow_mut(); + assert!(current.is_none(), "default Tokio timer already set \ + for execution context"); + *current = Some(handle.clone()); + } + + f(enter) + }) +} + +impl Handle { + pub(crate) fn new(inner: Weak) -> Handle { + Handle { inner } + } + + /// Returns a handle to the current timer. + /// + /// The current timer is the timer that is currently set as default using + /// [`with_default`]. + /// + /// This function should only be called from within the context of + /// [`with_default`]. Calling this function from outside of this context + /// will return a `Handle` that does not reference a timer. `Delay` + /// instances created with this handle will error. + /// + /// [`with_default`]: ../fn.with_default.html + pub fn current() -> Handle { + Handle::try_current() + .unwrap_or(Handle { inner: Weak::new() }) + } + + /// Create a `Delay` driven by this handle's associated `Timer`. + pub fn delay(&self, deadline: Instant) -> Delay { + let registration = Registration::new_with_handle(deadline, self.clone()); + Delay::new_with_registration(deadline, registration) + } + + /// Create a `Deadline` driven by this handle's associated `Timer`. + pub fn deadline(&self, future: T, deadline: Instant) -> Deadline { + Deadline::new_with_delay(future, self.delay(deadline)) + } + + /// Create a new `Interval` that starts at `at` and yields every `duration` + /// interval after that. + pub fn interval(&self, at: Instant, duration: Duration) -> Interval { + Interval::new_with_delay(self.delay(at), duration) + } + + /// Try to get a handle to the current timer. + /// + /// Returns `Err` if no handle is found. + pub(crate) fn try_current() -> Result { + CURRENT_TIMER.with(|current| { + match *current.borrow() { + Some(ref handle) => Ok(handle.clone()), + None => Err(Error::shutdown()), + } + }) + } + + /// Try to return a strong ref to the inner + pub(crate) fn inner(&self) -> Option> { + self.inner.upgrade() + } + + /// Consume the handle, returning the weak Inner ref. + pub(crate) fn into_inner(self) -> Weak { + self.inner + } +} diff --git a/third_party/rust/tokio-timer/src/timer/level.rs b/third_party/rust/tokio-timer/src/timer/level.rs new file mode 100644 index 000000000000..8be0ba18095e --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/level.rs @@ -0,0 +1,201 @@ +use timer::{entry, Entry}; + +use std::fmt; +use std::sync::Arc; + +/// Wheel for a single level in the timer. This wheel contains 64 slots. +pub(crate) struct Level { + level: usize, + + /// Bit field tracking which slots currently contain entries. + /// + /// Using a bit field to track slots that contain entries allows avoiding a + /// scan to find entries. This field is updated when entries are added or + /// removed from a slot. + /// + /// The least-significant bit represents slot zero. + occupied: u64, + + /// Slots + slot: [entry::Stack; LEVEL_MULT], +} + +/// Indicates when a slot must be processed next. +#[derive(Debug)] +pub struct Expiration { + /// The level containing the slot. + pub level: usize, + + /// The slot index. + pub slot: usize, + + /// The instant at which the slot needs to be processed. + pub deadline: u64, +} + +/// Level multiplier. +/// +/// Being a power of 2 is very important. +const LEVEL_MULT: usize = 64; + +impl Level { + pub fn new(level: usize) -> Level { + // Rust's derived implementations for arrays require that the value + // contained by the array be `Copy`. So, here we have to manually + // initialize every single slot. + macro_rules! s { + () => { entry::Stack::new() }; + }; + + Level { + level, + occupied: 0, + slot: [ + // It does not look like the necessary traits are + // derived for [T; 64]. + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + s!(), s!(), s!(), s!(), s!(), s!(), s!(), s!(), + ], + } + } + + /// Finds the slot that needs to be processed next and returns the slot and + /// `Instant` at which this slot must be processed. + pub fn next_expiration(&self, now: u64) -> Option { + // Use the `occupied` bit field to get the index of the next slot that + // needs to be processed. + let slot = match self.next_occupied_slot(now) { + Some(slot) => slot, + None => return None, + }; + + // From the slot index, calculate the `Instant` at which it needs to be + // processed. This value *must* be in the future with respect to `now`. + + let level_range = level_range(self.level); + let slot_range = slot_range(self.level); + + // TODO: This can probably be simplified w/ power of 2 math + let level_start = now - (now % level_range); + let deadline = level_start + slot as u64 * slot_range; + + debug_assert!(deadline >= now, "deadline={}; now={}; level={}; slot={}; occupied={:b}", + deadline, now, self.level, slot, self.occupied); + + Some(Expiration { + level: self.level, + slot, + deadline, + }) + } + + fn next_occupied_slot(&self, now: u64) -> Option { + if self.occupied == 0 { + return None; + } + + // Get the slot for now using Maths + let now_slot = (now / slot_range(self.level)) as usize; + let occupied = self.occupied.rotate_right(now_slot as u32); + let zeros = occupied.trailing_zeros() as usize; + let slot = (zeros + now_slot) % 64; + + Some(slot) + } + + pub fn add_entry(&mut self, entry: Arc, when: u64) { + let slot = slot_for(when, self.level); + + self.slot[slot].push(entry); + self.occupied |= occupied_bit(slot); + } + + pub fn remove_entry(&mut self, entry: &Entry, when: u64) { + let slot = slot_for(when, self.level); + + self.slot[slot].remove(entry); + + if self.slot[slot].is_empty() { + // The bit is currently set + debug_assert!(self.occupied & occupied_bit(slot) != 0); + + // Unset the bit + self.occupied ^= occupied_bit(slot); + } + } + + pub fn pop_entry_slot(&mut self, slot: usize) -> Option> { + let ret = self.slot[slot].pop(); + + if ret.is_some() && self.slot[slot].is_empty() { + // The bit is currently set + debug_assert!(self.occupied & occupied_bit(slot) != 0); + + self.occupied ^= occupied_bit(slot); + } + + ret + } +} + +impl Drop for Level { + fn drop(&mut self) { + while let Some(slot) = self.next_occupied_slot(0) { + // This should always have one + let entry = self.pop_entry_slot(slot) + .expect("occupied bit set invalid"); + + entry.error(); + } + } +} + +impl fmt::Debug for Level { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Level") + .field("occupied", &self.occupied) + .finish() + } +} + +fn occupied_bit(slot: usize) -> u64 { + (1 << slot) +} + +fn slot_range(level: usize) -> u64 { + LEVEL_MULT.pow(level as u32) as u64 +} + +fn level_range(level: usize) -> u64 { + LEVEL_MULT as u64 * slot_range(level) +} + +/// Convert a duration (milliseconds) and a level to a slot position +fn slot_for(duration: u64, level: usize) -> usize { + ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_slot_for() { + for pos in 1..64 { + assert_eq!(pos as usize, slot_for(pos, 0)); + } + + for level in 1..5 { + for pos in level..64 { + let a = pos * 64_usize.pow(level as u32); + assert_eq!(pos as usize, slot_for(a as u64, level)); + } + } + } +} diff --git a/third_party/rust/tokio-timer/src/timer/mod.rs b/third_party/rust/tokio-timer/src/timer/mod.rs new file mode 100644 index 000000000000..d9501a7c5c59 --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/mod.rs @@ -0,0 +1,653 @@ +//! Timer implementation. +//! +//! This module contains the types needed to run a timer. +//! +//! The [`Timer`] type runs the timer logic. It holds all the necessary state +//! to track all associated [`Delay`] instances and delivering notifications +//! once the deadlines are reached. +//! +//! The [`Handle`] type is a reference to a [`Timer`] instance. This type is +//! `Clone`, `Send`, and `Sync`. This type is used to create instances of +//! [`Delay`]. +//! +//! The [`Now`] trait describes how to get an `Instance` representing the +//! current moment in time. [`SystemNow`] is the default implementation, where +//! [`Now::now`] is implemented by calling `Instant::now`. +//! +//! [`Timer`] is generic over [`Now`]. This allows the source of time to be +//! customized. This ability is especially useful in tests and any environment +//! where determinism is necessary. +//! +//! Note, when using the Tokio runtime, the `Timer` does not need to be manually +//! setup as the runtime comes pre-configured with a `Timer` instance. +//! +//! [`Timer`]: struct.Timer.html +//! [`Handle`]: struct.Handle.html +//! [`Delay`]: ../struct.Delay.html +//! [`Now`]: trait.Now.html +//! [`Now::now`]: trait.Now.html#method.now +//! [`SystemNow`]: struct.SystemNow.html + +// This allows the usage of the old `Now` trait. +#![allow(deprecated)] + +mod entry; +mod handle; +mod level; +mod now; +mod registration; + +use self::entry::Entry; +use self::level::{Level, Expiration}; + +pub use self::handle::{Handle, with_default}; +pub use self::now::{Now, SystemNow}; +pub(crate) use self::registration::Registration; + +use Error; +use atomic::AtomicU64; + +use tokio_executor::park::{Park, Unpark, ParkThread}; + +use std::{cmp, fmt}; +use std::time::{Duration, Instant}; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::usize; + +/// Timer implementation that drives [`Delay`], [`Interval`], and [`Deadline`]. +/// +/// A `Timer` instance tracks the state necessary for managing time and +/// notifying the [`Delay`] instances once their deadlines are reached. +/// +/// It is expected that a single `Timer` instance manages many individual +/// `Delay` instances. The `Timer` implementation is thread-safe and, as such, +/// is able to handle callers from across threads. +/// +/// Callers do not use `Timer` directly to create `Delay` instances. Instead, +/// [`Handle`] is used. A handle for the timer instance is obtained by calling +/// [`handle`]. [`Handle`] is the type that implements `Clone` and is `Send + +/// Sync`. +/// +/// After creating the `Timer` instance, the caller must repeatedly call +/// [`turn`]. The timer will perform no work unless [`turn`] is called +/// repeatedly. +/// +/// The `Timer` has a resolution of one millisecond. Any unit of time that falls +/// between milliseconds are rounded up to the next millisecond. +/// +/// When the `Timer` instance is dropped, any outstanding `Delay` instance that +/// has not elapsed will be notified with an error. At this point, calling +/// `poll` on the `Delay` instance will result in `Err` being returned. +/// +/// # Implementation +/// +/// `Timer` is based on the [paper by Varghese and Lauck][paper]. +/// +/// A hashed timing wheel is a vector of slots, where each slot handles a time +/// slice. As time progresses, the timer walks over the slot for the current +/// instant, and processes each entry for that slot. When the timer reaches the +/// end of the wheel, it starts again at the beginning. +/// +/// The `Timer` implementation maintains six wheels arranged in a set of levels. +/// As the levels go up, the slots of the associated wheel represent larger +/// intervals of time. At each level, the wheel has 64 slots. Each slot covers a +/// range of time equal to the wheel at the lower level. At level zero, each +/// slot represents one millisecond of time. +/// +/// The wheels are: +/// +/// * Level 0: 64 x 1 millisecond slots. +/// * Level 1: 64 x 64 millisecond slots. +/// * Level 2: 64 x ~4 second slots. +/// * Level 3: 64 x ~4 minute slots. +/// * Level 4: 64 x ~4 hour slots. +/// * Level 5: 64 x ~12 day slots. +/// +/// When the timer processes entries at level zero, it will notify all the +/// [`Delay`] instances as their deadlines have been reached. For all higher +/// levels, all entries will be redistributed across the wheel at the next level +/// down. Eventually, as time progresses, entries will `Delay` instances will +/// either be canceled (dropped) or their associated entries will reach level +/// zero and be notified. +/// +/// [`Delay`]: ../struct.Delay.html +/// [`Interval`]: ../struct.Interval.html +/// [`Deadline`]: ../struct.Deadline.html +/// [paper]: http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf +/// [`handle`]: #method.handle +/// [`turn`]: #method.turn +/// [`Handle`]: struct.Handle.html +#[derive(Debug)] +pub struct Timer { + /// Shared state + inner: Arc, + + /// The number of milliseconds elapsed since the timer started. + elapsed: u64, + + /// Timer wheel. + /// + /// Levels: + /// + /// * 1 ms slots / 64 ms range + /// * 64 ms slots / ~ 4 sec range + /// * ~ 4 sec slots / ~ 4 min range + /// * ~ 4 min slots / ~ 4 hr range + /// * ~ 4 hr slots / ~ 12 day range + /// * ~ 12 day slots / ~ 2 yr range + levels: Vec, + + /// Thread parker. The `Timer` park implementation delegates to this. + park: T, + + /// Source of "now" instances + now: N, +} + +/// Return value from the `turn` method on `Timer`. +/// +/// Currently this value doesn't actually provide any functionality, but it may +/// in the future give insight into what happened during `turn`. +#[derive(Debug)] +pub struct Turn(()); + +/// Timer state shared between `Timer`, `Handle`, and `Registration`. +pub(crate) struct Inner { + /// The instant at which the timer started running. + start: Instant, + + /// The last published timer `elapsed` value. + elapsed: AtomicU64, + + /// Number of active timeouts + num: AtomicUsize, + + /// Head of the "process" linked list. + process: entry::AtomicStack, + + /// Unparks the timer thread. + unpark: Box, +} + +/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots +/// each, the timer is able to track time up to 2 years into the future with a +/// precision of 1 millisecond. +const NUM_LEVELS: usize = 6; + +/// The maximum duration of a delay +const MAX_DURATION: u64 = 1 << (6 * NUM_LEVELS); + +/// Maximum number of timeouts the system can handle concurrently. +const MAX_TIMEOUTS: usize = usize::MAX >> 1; + +// ===== impl Timer ===== + +impl Timer +where T: Park +{ + /// Create a new `Timer` instance that uses `park` to block the current + /// thread. + /// + /// Once the timer has been created, a handle can be obtained using + /// [`handle`]. The handle is used to create `Delay` instances. + /// + /// Use `default` when constructing a `Timer` using the default `park` + /// instance. + /// + /// [`handle`]: #method.handle + pub fn new(park: T) -> Self { + Timer::new_with_now(park, SystemNow::new()) + } +} + +impl Timer { + /// Returns a reference to the underlying `Park` instance. + pub fn get_park(&self) -> &T { + &self.park + } + + /// Returns a mutable reference to the underlying `Park` instance. + pub fn get_park_mut(&mut self) -> &mut T { + &mut self.park + } +} + +impl Timer +where T: Park, + N: Now, +{ + /// Create a new `Timer` instance that uses `park` to block the current + /// thread and `now` to get the current `Instant`. + /// + /// Specifying the source of time is useful when testing. + pub fn new_with_now(park: T, mut now: N) -> Self { + let unpark = Box::new(park.unpark()); + + let levels = (0..NUM_LEVELS) + .map(Level::new) + .collect(); + + Timer { + inner: Arc::new(Inner::new(now.now(), unpark)), + elapsed: 0, + levels, + park, + now, + } + } + + /// Returns a handle to the timer. + /// + /// The `Handle` is how `Delay` instances are created. The `Delay` instances + /// can either be created directly or the `Handle` instance can be passed to + /// `with_default`, setting the timer as the default timer for the execution + /// context. + pub fn handle(&self) -> Handle { + Handle::new(Arc::downgrade(&self.inner)) + } + + /// Performs one iteration of the timer loop. + /// + /// This function must be called repeatedly in order for the `Timer` + /// instance to make progress. This is where the work happens. + /// + /// The `Timer` will use the `Park` instance that was specified in [`new`] + /// to block the current thread until the next `Delay` instance elapses. One + /// call to `turn` results in at most one call to `park.park()`. + /// + /// # Return + /// + /// On success, `Ok(Turn)` is returned, where `Turn` is a placeholder type + /// that currently does nothing but may, in the future, have functions add + /// to provide information about the call to `turn`. + /// + /// If the call to `park.park()` fails, then `Err` is returned with the + /// error. + /// + /// [`new`]: #method.new + pub fn turn(&mut self, max_wait: Option) -> Result { + match max_wait { + Some(timeout) => self.park_timeout(timeout)?, + None => self.park()?, + } + + Ok(Turn(())) + } + + /// Returns the instant at which the next timeout expires. + fn next_expiration(&self) -> Option { + // Check all levels + for level in 0..NUM_LEVELS { + if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { + // There cannot be any expirations at a higher level that happen + // before this one. + debug_assert!({ + let mut res = true; + + for l2 in (level+1)..NUM_LEVELS { + if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { + if e2.deadline < expiration.deadline { + res = false; + } + } + } + + res + }); + + return Some(expiration); + } + } + + None + } + + /// Converts an `Expiration` to an `Instant`. + fn expiration_instant(&self, expiration: &Expiration) -> Instant { + self.inner.start + Duration::from_millis(expiration.deadline) + } + + /// Run timer related logic + fn process(&mut self) { + let now = ms(self.now.now() - self.inner.start, Round::Down); + + loop { + let expiration = match self.next_expiration() { + Some(expiration) => expiration, + None => break, + }; + + if expiration.deadline > now { + // This expiration should not fire on this tick + break; + } + + // Process the slot, either moving it down a level or firing the + // timeout if currently at the final (boss) level. + self.process_expiration(&expiration); + + self.set_elapsed(expiration.deadline); + } + + self.set_elapsed(now); + } + + fn set_elapsed(&mut self, when: u64) { + assert!(self.elapsed <= when, "elapsed={:?}; when={:?}", self.elapsed, when); + + if when > self.elapsed { + self.elapsed = when; + self.inner.elapsed.store(when, SeqCst); + } else { + assert_eq!(self.elapsed, when); + } + } + + fn process_expiration(&mut self, expiration: &Expiration) { + while let Some(entry) = self.pop_entry(expiration) { + if expiration.level == 0 { + let when = entry.when_internal() + .expect("invalid internal entry state"); + + debug_assert_eq!(when, expiration.deadline); + + // Fire the entry + entry.fire(when); + + // Track that the entry has been fired + entry.set_when_internal(None); + } else { + let when = entry.when_internal() + .expect("entry not tracked"); + + let next_level = expiration.level - 1; + + self.levels[next_level] + .add_entry(entry, when); + } + } + } + + fn pop_entry(&mut self, expiration: &Expiration) -> Option> { + self.levels[expiration.level].pop_entry_slot(expiration.slot) + } + + /// Process the entry queue + /// + /// This handles adding and canceling timeouts. + fn process_queue(&mut self) { + for entry in self.inner.process.take() { + match (entry.when_internal(), entry.load_state()) { + (None, None) => { + // Nothing to do + } + (Some(when), None) => { + // Remove the entry + self.clear_entry(&entry, when); + } + (None, Some(when)) => { + // Queue the entry + self.add_entry(entry, when); + } + (Some(curr), Some(next)) => { + self.clear_entry(&entry, curr); + self.add_entry(entry, next); + } + } + } + } + + fn clear_entry(&mut self, entry: &Arc, when: u64) { + // Get the level at which the entry should be stored + let level = self.level_for(when); + self.levels[level].remove_entry(entry, when); + + entry.set_when_internal(None); + } + + /// Fire the entry if it needs to, otherwise queue it to be processed later. + /// + /// Returns `None` if the entry was fired. + fn add_entry(&mut self, entry: Arc, when: u64) { + if when <= self.elapsed { + // The entry's deadline has elapsed, so fire it and update the + // internal state accordingly. + entry.set_when_internal(None); + entry.fire(when); + + return; + } else if when - self.elapsed > MAX_DURATION { + // The entry's deadline is invalid, so error it and update the + // internal state accordingly. + entry.set_when_internal(None); + entry.error(); + + return; + } + + // Get the level at which the entry should be stored + let level = self.level_for(when); + + entry.set_when_internal(Some(when)); + self.levels[level].add_entry(entry, when); + + debug_assert!({ + self.levels[level].next_expiration(self.elapsed) + .map(|e| e.deadline >= self.elapsed) + .unwrap_or(true) + }); + } + + fn level_for(&self, when: u64) -> usize { + level_for(self.elapsed, when) + } +} + +fn level_for(elapsed: u64, when: u64) -> usize { + let masked = elapsed ^ when; + + assert!(masked != 0, "elapsed={}; when={}", elapsed, when); + + let leading_zeros = masked.leading_zeros() as usize; + let significant = 63 - leading_zeros; + significant / 6 +} + +impl Default for Timer { + fn default() -> Self { + Timer::new(ParkThread::new()) + } +} + +impl Park for Timer +where T: Park, + N: Now, +{ + type Unpark = T::Unpark; + type Error = T::Error; + + fn unpark(&self) -> Self::Unpark { + self.park.unpark() + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.process_queue(); + + match self.next_expiration() { + Some(expiration) => { + let now = self.now.now(); + let deadline = self.expiration_instant(&expiration); + + if deadline > now { + self.park.park_timeout(deadline - now)?; + } else { + self.park.park_timeout(Duration::from_secs(0))?; + } + } + None => { + self.park.park()?; + } + } + + self.process(); + + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.process_queue(); + + match self.next_expiration() { + Some(expiration) => { + let now = self.now.now(); + let deadline = self.expiration_instant(&expiration); + + if deadline > now { + self.park.park_timeout(cmp::min(deadline - now, duration))?; + } else { + self.park.park_timeout(Duration::from_secs(0))?; + } + } + None => { + self.park.park_timeout(duration)?; + } + } + + self.process(); + + Ok(()) + } +} + +impl Drop for Timer { + fn drop(&mut self) { + // Shutdown the stack of entries to process, preventing any new entries + // from being pushed. + self.inner.process.shutdown(); + } +} + +// ===== impl Inner ===== + +impl Inner { + fn new(start: Instant, unpark: Box) -> Inner { + Inner { + num: AtomicUsize::new(0), + elapsed: AtomicU64::new(0), + process: entry::AtomicStack::new(), + start, + unpark, + } + } + + fn elapsed(&self) -> u64 { + self.elapsed.load(SeqCst) + } + + /// Increment the number of active timeouts + fn increment(&self) -> Result<(), Error> { + let mut curr = self.num.load(SeqCst); + + loop { + if curr == MAX_TIMEOUTS { + return Err(Error::at_capacity()); + } + + let actual = self.num.compare_and_swap(curr, curr + 1, SeqCst); + + if curr == actual { + return Ok(()); + } + + curr = actual; + } + } + + /// Decrement the number of active timeouts + fn decrement(&self) { + let prev = self.num.fetch_sub(1, SeqCst); + debug_assert!(prev <= MAX_TIMEOUTS); + } + + fn queue(&self, entry: &Arc) -> Result<(), Error> { + if self.process.push(entry)? { + // The timer is notified so that it can process the timeout + self.unpark.unpark(); + } + + Ok(()) + } + + fn normalize_deadline(&self, deadline: Instant) -> u64 { + if deadline < self.start { + return 0; + } + + ms(deadline - self.start, Round::Up) + } +} + +impl fmt::Debug for Inner { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Inner") + .finish() + } +} + +enum Round { + Up, + Down, +} + +/// Convert a `Duration` to milliseconds, rounding up and saturating at +/// `u64::MAX`. +/// +/// The saturating is fine because `u64::MAX` milliseconds are still many +/// million years. +#[inline] +fn ms(duration: Duration, round: Round) -> u64 { + const NANOS_PER_MILLI: u32 = 1_000_000; + const MILLIS_PER_SEC: u64 = 1_000; + + // Round up. + let millis = match round { + Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, + Round::Down => duration.subsec_nanos() / NANOS_PER_MILLI, + }; + + duration.as_secs().saturating_mul(MILLIS_PER_SEC).saturating_add(millis as u64) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_level_for() { + for pos in 1..64 { + assert_eq!(0, level_for(0, pos), "level_for({}) -- binary = {:b}", pos, pos); + } + + for level in 1..5 { + for pos in level..64 { + let a = pos * 64_usize.pow(level as u32); + assert_eq!(level, level_for(0, a as u64), + "level_for({}) -- binary = {:b}", a, a); + + if pos > level { + let a = a - 1; + assert_eq!(level, level_for(0, a as u64), + "level_for({}) -- binary = {:b}", a, a); + } + + if pos < 64 { + let a = a + 1; + assert_eq!(level, level_for(0, a as u64), + "level_for({}) -- binary = {:b}", a, a); + } + } + } + } +} diff --git a/third_party/rust/tokio-timer/src/timer/now.rs b/third_party/rust/tokio-timer/src/timer/now.rs new file mode 100644 index 000000000000..bc8ca7808c1f --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/now.rs @@ -0,0 +1,10 @@ +use std::time::Instant; + +#[doc(hidden)] +#[deprecated(since = "0.2.4", note = "use clock::Now instead")] +pub trait Now { + /// Returns an instant corresponding to "now". + fn now(&mut self) -> Instant; +} + +pub use ::clock::Clock as SystemNow; diff --git a/third_party/rust/tokio-timer/src/timer/registration.rs b/third_party/rust/tokio-timer/src/timer/registration.rs new file mode 100644 index 000000000000..8df21abab9da --- /dev/null +++ b/third_party/rust/tokio-timer/src/timer/registration.rs @@ -0,0 +1,82 @@ +use Error; +use timer::{Handle, Entry}; + +use futures::Poll; + +use std::sync::Arc; +use std::time::Instant; + +/// Registration with a timer. +/// +/// The association between a `Delay` instance and a timer is done lazily in +/// `poll` +#[derive(Debug)] +pub(crate) struct Registration { + entry: Arc, +} + +impl Registration { + pub fn new(deadline: Instant) -> Registration { + fn is_send() {} + is_send::(); + + match Handle::try_current() { + Ok(handle) => Registration::new_with_handle(deadline, handle), + Err(_) => Registration::new_error(), + } + } + + pub fn new_with_handle(deadline: Instant, handle: Handle) -> Registration { + let inner = match handle.inner() { + Some(inner) => inner, + None => return Registration::new_error(), + }; + + // Increment the number of active timeouts + if inner.increment().is_err() { + return Registration::new_error(); + } + + let when = inner.normalize_deadline(deadline); + + if when <= inner.elapsed() { + // The deadline has already elapsed, there is no point creating the + // structures. + return Registration { + entry: Arc::new(Entry::new_elapsed(handle)), + }; + } + + let entry = Arc::new(Entry::new(when, handle)); + + if inner.queue(&entry).is_err() { + // The timer has shutdown, transition the entry to the error state. + entry.error(); + } + + Registration { entry } + } + + pub fn reset(&self, deadline: Instant) { + Entry::reset(&self.entry, deadline); + } + + fn new_error() -> Registration { + let entry = Arc::new(Entry::new_error()); + Registration { entry } + } + + pub fn is_elapsed(&self) -> bool { + self.entry.is_elapsed() + } + + pub fn poll_elapsed(&self) -> Poll<(), Error> { + self.entry.poll_elapsed() + } +} + +impl Drop for Registration { + fn drop(&mut self) { + Entry::cancel(&self.entry); + } +} diff --git a/third_party/rust/tokio-timer/tests/clock.rs b/third_party/rust/tokio-timer/tests/clock.rs new file mode 100644 index 000000000000..d8bcaaebd725 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/clock.rs @@ -0,0 +1,51 @@ +extern crate tokio_executor; +extern crate tokio_timer; + +use tokio_timer::clock; +use tokio_timer::clock::*; + +use std::time::Instant; + +struct ConstNow(Instant); + +impl Now for ConstNow { + fn now(&self) -> Instant { + self.0 + } +} + +#[test] +fn default_clock() { + let a = Instant::now(); + let b = clock::now(); + let c = Clock::new().now(); + + assert!(a <= b); + assert!(b <= c); +} + +#[test] +fn custom_clock() { + let now = ConstNow(Instant::now()); + let clock = Clock::new_with_now(now); + + let a = Instant::now(); + let b = clock.now(); + + assert!(b <= a); +} + +#[test] +fn execution_context() { + let now = ConstNow(Instant::now()); + let clock = Clock::new_with_now(now); + + let mut enter = tokio_executor::enter().unwrap(); + + with_default(&clock, &mut enter, |_| { + let a = Instant::now(); + let b = clock::now(); + + assert!(b <= a); + }); +} diff --git a/third_party/rust/tokio-timer/tests/deadline.rs b/third_party/rust/tokio-timer/tests/deadline.rs new file mode 100644 index 000000000000..229a23cbef17 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/deadline.rs @@ -0,0 +1,105 @@ +extern crate futures; +extern crate tokio_executor; +extern crate tokio_timer; + +#[macro_use] +mod support; +use support::*; + +use tokio_timer::*; + +use futures::{future, Future}; +use futures::sync::oneshot; + +#[test] +fn simultaneous_deadline_future_completion() { + mocked(|_, time| { + // Create a future that is immediately ready + let fut = future::ok::<_, ()>(()); + + // Wrap it with a deadline + let mut fut = Deadline::new(fut, time.now()); + + // Ready! + assert_ready!(fut); + }); +} + +#[test] +fn completed_future_past_deadline() { + mocked(|_, time| { + // Create a future that is immediately ready + let fut = future::ok::<_, ()>(()); + + // Wrap it with a deadline + let mut fut = Deadline::new(fut, time.now() - ms(1000)); + + // Ready! + assert_ready!(fut); + }); +} + +#[test] +fn future_and_deadline_in_future() { + mocked(|timer, time| { + // Not yet complete + let (tx, rx) = oneshot::channel(); + + // Wrap it with a deadline + let mut fut = Deadline::new(rx, time.now() + ms(100)); + + // Ready! + assert_not_ready!(fut); + + // Turn the timer, it runs for the elapsed time + advance(timer, ms(90)); + + assert_not_ready!(fut); + + // Complete the future + tx.send(()).unwrap(); + + assert_ready!(fut); + }); +} + +#[test] +fn deadline_now_elapses() { + mocked(|_, time| { + let fut = future::empty::<(), ()>(); + + // Wrap it with a deadline + let mut fut = Deadline::new(fut, time.now()); + + assert_elapsed!(fut); + }); +} + +#[test] +fn deadline_future_elapses() { + mocked(|timer, time| { + let fut = future::empty::<(), ()>(); + + // Wrap it with a deadline + let mut fut = Deadline::new(fut, time.now() + ms(300)); + + assert_not_ready!(fut); + + advance(timer, ms(300)); + + assert_elapsed!(fut); + }); +} + +#[test] +fn future_errors_first() { + mocked(|_, time| { + let fut = future::err::<(), ()>(()); + + // Wrap it with a deadline + let mut fut = Deadline::new(fut, time.now() + ms(100)); + + // Ready! + assert!(fut.poll().unwrap_err().is_inner()); + }); +} diff --git a/third_party/rust/tokio-timer/tests/delay.rs b/third_party/rust/tokio-timer/tests/delay.rs new file mode 100644 index 000000000000..02ac28a644e3 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/delay.rs @@ -0,0 +1,488 @@ +extern crate futures; +extern crate tokio_executor; +extern crate tokio_timer; + +#[macro_use] +mod support; +use support::*; + +use tokio_timer::*; + +use futures::Future; + +use std::time::{Duration, Instant}; + +#[test] +fn immediate_delay() { + mocked(|timer, time| { + // Create `Delay` that elapsed immediately. + let mut delay = Delay::new(time.now()); + + // Ready! + assert_ready!(delay); + + // Turn the timer, it runs for the elapsed time + turn(timer, ms(1000)); + + // The time has not advanced. The `turn` completed immediately. + assert_eq!(time.advanced(), ms(1000)); + }); +} + +#[test] +fn delayed_delay_level_0() { + for &i in &[1, 10, 60] { + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(i)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(i)); + + assert_ready!(delay); + }); + } +} + +#[test] +fn sub_ms_delayed_delay() { + mocked(|timer, time| { + for _ in 0..5 { + let deadline = time.now() + + Duration::from_millis(1) + + Duration::new(0, 1); + + let mut delay = Delay::new(deadline); + + assert_not_ready!(delay); + + turn(timer, None); + assert_ready!(delay); + + assert!(time.now() >= deadline); + + time.advance(Duration::new(0, 1)); + } + }); +} + +#[test] +fn delayed_delay_wrapping_level_0() { + mocked(|timer, time| { + turn(timer, ms(5)); + assert_eq!(time.advanced(), ms(5)); + + let mut delay = Delay::new(time.now() + ms(60)); + + assert_not_ready!(delay); + + turn(timer, None); + assert_eq!(time.advanced(), ms(64)); + assert_not_ready!(delay); + + turn(timer, None); + assert_eq!(time.advanced(), ms(65)); + + assert_ready!(delay); + }); +} + +#[test] +fn timer_wrapping_with_higher_levels() { + mocked(|timer, time| { + // Set delay to hit level 1 + let mut s1 = Delay::new(time.now() + ms(64)); + assert_not_ready!(s1); + + // Turn a bit + turn(timer, ms(5)); + + // Set timeout such that it will hit level 0, but wrap + let mut s2 = Delay::new(time.now() + ms(60)); + assert_not_ready!(s2); + + // This should result in s1 firing + turn(timer, None); + assert_eq!(time.advanced(), ms(64)); + + assert_ready!(s1); + assert_not_ready!(s2); + + turn(timer, None); + assert_eq!(time.advanced(), ms(65)); + + assert_ready!(s2); + }); +} + +#[test] +fn delay_with_deadline_in_past() { + mocked(|timer, time| { + // Create `Delay` that elapsed immediately. + let mut delay = Delay::new(time.now() - ms(100)); + + // Even though the delay expires in the past, it is not ready yet + // because the timer must observe it. + assert_ready!(delay); + + // Turn the timer, it runs for the elapsed time + turn(timer, ms(1000)); + + // The time has not advanced. The `turn` completed immediately. + assert_eq!(time.advanced(), ms(1000)); + }); +} + +#[test] +fn delayed_delay_level_1() { + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(234)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + // Turn the timer, this will wake up to cascade the timer down. + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(192)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + // Turn the timer again + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(234)); + + // The delay has elapsed. + assert_ready!(delay); + }); + + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(234)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + // Turn the timer with a smaller timeout than the cascade. + turn(timer, ms(100)); + assert_eq!(time.advanced(), ms(100)); + + assert_not_ready!(delay); + + // Turn the timer, this will wake up to cascade the timer down. + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(192)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + // Turn the timer again + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(234)); + + // The delay has elapsed. + assert_ready!(delay); + }); +} + +#[test] +fn creating_delay_outside_of_context() { + let now = Instant::now(); + + // This creates a delay outside of the context of a mock timer. This tests + // that it will still expire. + let mut delay = Delay::new(now + ms(500)); + + mocked_with_now(now, |timer, time| { + // This registers the delay with the timer + assert_not_ready!(delay); + + // Wait some time... the timer is cascading + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(448)); + + assert_not_ready!(delay); + + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(500)); + + // The delay has elapsed + assert_ready!(delay); + }); +} + +#[test] +fn concurrently_set_two_timers_second_one_shorter() { + mocked(|timer, time| { + let mut delay1 = Delay::new(time.now() + ms(500)); + let mut delay2 = Delay::new(time.now() + ms(200)); + + // The delay has not elapsed + assert_not_ready!(delay1); + assert_not_ready!(delay2); + + // Delay until a cascade + turn(timer, None); + assert_eq!(time.advanced(), ms(192)); + + // Delay until the second timer. + turn(timer, None); + assert_eq!(time.advanced(), ms(200)); + + // The shorter delay fires + assert_ready!(delay2); + assert_not_ready!(delay1); + + turn(timer, None); + assert_eq!(time.advanced(), ms(448)); + + assert_not_ready!(delay1); + + // Turn again, this time the time will advance to the second delay + turn(timer, None); + assert_eq!(time.advanced(), ms(500)); + + assert_ready!(delay1); + }) +} + +#[test] +fn short_delay() { + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(1)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + // Turn the timer, but not enough time will go by. + turn(timer, None); + + // The delay has elapsed. + assert_ready!(delay); + + // The time has advanced to the point of the delay elapsing. + assert_eq!(time.advanced(), ms(1)); + }) +} + +#[test] +fn sorta_long_delay() { + const MIN_5: u64 = 5 * 60 * 1000; + + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(MIN_5)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + let cascades = &[ + 262_144, + 262_144 + 9 * 4096, + 262_144 + 9 * 4096 + 15 * 64, + ]; + + for &elapsed in cascades { + turn(timer, None); + assert_eq!(time.advanced(), ms(elapsed)); + + assert_not_ready!(delay); + } + + turn(timer, None); + assert_eq!(time.advanced(), ms(MIN_5)); + + // The delay has elapsed. + assert_ready!(delay); + }) +} + +#[test] +fn very_long_delay() { + const MO_5: u64 = 5 * 30 * 24 * 60 * 60 * 1000; + + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(MO_5)); + + // The delay has not elapsed. + assert_not_ready!(delay); + + let cascades = &[ + 12_884_901_888, + 12_952_010_752, + 12_959_875_072, + 12_959_997_952, + ]; + + for &elapsed in cascades { + turn(timer, None); + assert_eq!(time.advanced(), ms(elapsed)); + + assert_not_ready!(delay); + } + + // Turn the timer, but not enough time will go by. + turn(timer, None); + + // The time has advanced to the point of the delay elapsing. + assert_eq!(time.advanced(), ms(MO_5)); + + // The delay has elapsed. + assert_ready!(delay); + }) +} + +#[test] +fn greater_than_max() { + const YR_5: u64 = 5 * 365 * 24 * 60 * 60 * 1000; + + mocked(|timer, time| { + // Create a `Delay` that elapses in the future + let mut delay = Delay::new(time.now() + ms(YR_5)); + + assert_not_ready!(delay); + + turn(timer, ms(0)); + + assert!(delay.poll().is_err()); + }) +} + +#[test] +fn unpark_is_delayed() { + mocked(|timer, time| { + let mut delay1 = Delay::new(time.now() + ms(100)); + let mut delay2 = Delay::new(time.now() + ms(101)); + let mut delay3 = Delay::new(time.now() + ms(200)); + + assert_not_ready!(delay1); + assert_not_ready!(delay2); + assert_not_ready!(delay3); + + time.park_for(ms(500)); + + turn(timer, None); + + assert_eq!(time.advanced(), ms(500)); + + assert_ready!(delay1); + assert_ready!(delay2); + assert_ready!(delay3); + }) +} + +#[test] +fn set_timeout_at_deadline_greater_than_max_timer() { + const YR_1: u64 = 365 * 24 * 60 * 60 * 1000; + const YR_5: u64 = 5 * YR_1; + + mocked(|timer, time| { + for _ in 0..5 { + turn(timer, ms(YR_1)); + } + + let mut delay = Delay::new(time.now() + ms(1)); + assert_not_ready!(delay); + + turn(timer, ms(1000)); + assert_eq!(time.advanced(), Duration::from_millis(YR_5) + ms(1)); + + assert_ready!(delay); + }); +} + +#[test] +fn reset_future_delay_before_fire() { + mocked(|timer, time| { + let mut delay = Delay::new(time.now() + ms(100)); + + assert_not_ready!(delay); + + delay.reset(time.now() + ms(200)); + + turn(timer, None); + assert_eq!(time.advanced(), ms(192)); + + assert_not_ready!(delay); + + turn(timer, None); + assert_eq!(time.advanced(), ms(200)); + + assert_ready!(delay); + }); +} + +#[test] +fn reset_past_delay_before_turn() { + mocked(|timer, time| { + let mut delay = Delay::new(time.now() + ms(100)); + + assert_not_ready!(delay); + + delay.reset(time.now() + ms(80)); + + turn(timer, None); + assert_eq!(time.advanced(), ms(64)); + + assert_not_ready!(delay); + + turn(timer, None); + assert_eq!(time.advanced(), ms(80)); + + assert_ready!(delay); + }); +} + +#[test] +fn reset_past_delay_before_fire() { + mocked(|timer, time| { + let mut delay = Delay::new(time.now() + ms(100)); + + assert_not_ready!(delay); + turn(timer, ms(10)); + + assert_not_ready!(delay); + delay.reset(time.now() + ms(80)); + + turn(timer, None); + assert_eq!(time.advanced(), ms(64)); + + assert_not_ready!(delay); + + turn(timer, None); + assert_eq!(time.advanced(), ms(90)); + + assert_ready!(delay); + }); +} + +#[test] +fn reset_future_delay_after_fire() { + mocked(|timer, time| { + let mut delay = Delay::new(time.now() + ms(100)); + + assert_not_ready!(delay); + + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(64)); + + turn(timer, None); + assert_eq!(time.advanced(), ms(100)); + + assert_ready!(delay); + + delay.reset(time.now() + ms(10)); + assert_not_ready!(delay); + + turn(timer, ms(1000)); + assert_eq!(time.advanced(), ms(110)); + + assert_ready!(delay); + }); +} diff --git a/third_party/rust/tokio-timer/tests/hammer.rs b/third_party/rust/tokio-timer/tests/hammer.rs new file mode 100644 index 000000000000..42b0d2982272 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/hammer.rs @@ -0,0 +1,240 @@ +extern crate futures; +extern crate rand; +extern crate tokio_executor; +extern crate tokio_timer; + +use tokio_executor::park::{Park, Unpark, UnparkThread}; +use tokio_timer::*; + +use futures::{Future, Stream}; +use futures::stream::FuturesUnordered; +use rand::Rng; + +use std::cmp; +use std::sync::{Arc, Barrier}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::thread; +use std::time::{Duration, Instant}; + +struct Signal { + rem: AtomicUsize, + unpark: UnparkThread, +} + +#[test] +fn hammer_complete() { + const ITERS: usize = 5; + const THREADS: usize = 4; + const PER_THREAD: usize = 40; + const MIN_DELAY: u64 = 1; + const MAX_DELAY: u64 = 5_000; + + for _ in 0..ITERS { + let mut timer = Timer::default(); + let handle = timer.handle(); + let barrier = Arc::new(Barrier::new(THREADS)); + + let done = Arc::new(Signal { + rem: AtomicUsize::new(THREADS), + unpark: timer.get_park().unpark(), + }); + + for _ in 0..THREADS { + let handle = handle.clone(); + let barrier = barrier.clone(); + let done = done.clone(); + + thread::spawn(move || { + let mut exec = FuturesUnordered::new(); + let mut rng = rand::thread_rng(); + + barrier.wait(); + + for _ in 0..PER_THREAD { + let deadline = Instant::now() + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + exec.push({ + handle.delay(deadline) + .and_then(move |_| { + let now = Instant::now(); + assert!(now >= deadline, "deadline greater by {:?}", deadline - now); + Ok(()) + }) + }); + } + + // Run the logic + exec.for_each(|_| Ok(())) + .wait() + .unwrap(); + + if 1 == done.rem.fetch_sub(1, SeqCst) { + done.unpark.unpark(); + } + }); + } + + while done.rem.load(SeqCst) > 0 { + timer.turn(None).unwrap(); + } + } +} + +#[test] +fn hammer_cancel() { + const ITERS: usize = 5; + const THREADS: usize = 4; + const PER_THREAD: usize = 40; + const MIN_DELAY: u64 = 1; + const MAX_DELAY: u64 = 5_000; + + for _ in 0..ITERS { + let mut timer = Timer::default(); + let handle = timer.handle(); + let barrier = Arc::new(Barrier::new(THREADS)); + + let done = Arc::new(Signal { + rem: AtomicUsize::new(THREADS), + unpark: timer.get_park().unpark(), + }); + + for _ in 0..THREADS { + let handle = handle.clone(); + let barrier = barrier.clone(); + let done = done.clone(); + + thread::spawn(move || { + let mut exec = FuturesUnordered::new(); + let mut rng = rand::thread_rng(); + + barrier.wait(); + + for _ in 0..PER_THREAD { + let deadline1 = Instant::now() + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + let deadline2 = Instant::now() + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + let deadline = cmp::min(deadline1, deadline2); + + let delay = handle.delay(deadline1); + let join = handle.deadline(delay, deadline2); + + exec.push({ + join + .and_then(move |_| { + let now = Instant::now(); + assert!(now >= deadline, "deadline greater by {:?}", deadline - now); + Ok(()) + }) + }); + } + + // Run the logic + exec + .or_else(|e| { + assert!(e.is_elapsed()); + Ok::<_, ()>(()) + }) + .for_each(|_| Ok(())) + .wait() + .unwrap(); + + if 1 == done.rem.fetch_sub(1, SeqCst) { + done.unpark.unpark(); + } + }); + } + + while done.rem.load(SeqCst) > 0 { + timer.turn(None).unwrap(); + } + } +} + +#[test] +fn hammer_reset() { + const ITERS: usize = 5; + const THREADS: usize = 4; + const PER_THREAD: usize = 40; + const MIN_DELAY: u64 = 1; + const MAX_DELAY: u64 = 250; + + for _ in 0..ITERS { + let mut timer = Timer::default(); + let handle = timer.handle(); + let barrier = Arc::new(Barrier::new(THREADS)); + + let done = Arc::new(Signal { + rem: AtomicUsize::new(THREADS), + unpark: timer.get_park().unpark(), + }); + + for _ in 0..THREADS { + let handle = handle.clone(); + let barrier = barrier.clone(); + let done = done.clone(); + + thread::spawn(move || { + let mut exec = FuturesUnordered::new(); + let mut rng = rand::thread_rng(); + + barrier.wait(); + + for _ in 0..PER_THREAD { + let deadline1 = Instant::now() + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + let deadline2 = deadline1 + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + let deadline3 = deadline2 + Duration::from_millis( + rng.gen_range(MIN_DELAY, MAX_DELAY)); + + exec.push({ + handle.delay(deadline1) + // Select over a second delay + .select2(handle.delay(deadline2)) + .map_err(|e| panic!("boom; err={:?}", e)) + .and_then(move |res| { + use futures::future::Either::*; + + let now = Instant::now(); + assert!(now >= deadline1, "deadline greater by {:?}", deadline1 - now); + + let mut other = match res { + A((_, other)) => other, + B((_, other)) => other, + }; + + other.reset(deadline3); + other + }) + .and_then(move |_| { + let now = Instant::now(); + assert!(now >= deadline3, "deadline greater by {:?}", deadline3 - now); + Ok(()) + }) + }); + } + + // Run the logic + exec + .for_each(|_| Ok(())) + .wait() + .unwrap(); + + if 1 == done.rem.fetch_sub(1, SeqCst) { + done.unpark.unpark(); + } + }); + } + + while done.rem.load(SeqCst) > 0 { + timer.turn(None).unwrap(); + } + } +} diff --git a/third_party/rust/tokio-timer/tests/interval.rs b/third_party/rust/tokio-timer/tests/interval.rs new file mode 100644 index 000000000000..60d6d8d62a39 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/interval.rs @@ -0,0 +1,46 @@ +extern crate futures; +extern crate tokio_executor; +extern crate tokio_timer; + +#[macro_use] +mod support; +use support::*; + +use tokio_timer::*; + +use futures::{Stream}; + +#[test] +#[should_panic] +fn interval_zero_duration() { + mocked(|_, time| { + let _ = Interval::new(time.now(), ms(0)); + }); +} + +#[test] +fn usage() { + mocked(|timer, time| { + let start = time.now(); + let mut int = Interval::new(start, ms(300)); + + assert_ready!(int, Some(start)); + assert_not_ready!(int); + + advance(timer, ms(100)); + assert_not_ready!(int); + + advance(timer, ms(200)); + assert_ready!(int, Some(start + ms(300))); + assert_not_ready!(int); + + advance(timer, ms(400)); + assert_ready!(int, Some(start + ms(600))); + assert_not_ready!(int); + + advance(timer, ms(500)); + assert_ready!(int, Some(start + ms(900))); + assert_ready!(int, Some(start + ms(1200))); + assert_not_ready!(int); + }); +} diff --git a/third_party/rust/tokio-timer/tests/support/mod.rs b/third_party/rust/tokio-timer/tests/support/mod.rs new file mode 100644 index 000000000000..a79b0dab0611 --- /dev/null +++ b/third_party/rust/tokio-timer/tests/support/mod.rs @@ -0,0 +1,234 @@ +#![allow(unused_macros, unused_imports, dead_code, deprecated)] + +use tokio_executor::park::{Park, Unpark}; +use tokio_timer::timer::{Timer, Now}; + +use futures::future::{lazy, Future}; + +use std::marker::PhantomData; +use std::rc::Rc; +use std::sync::{Arc, Mutex}; +use std::time::{Instant, Duration}; + +macro_rules! assert_ready { + ($f:expr) => { + assert!($f.poll().unwrap().is_ready()); + }; + ($f:expr, $expect:expr) => { + assert_eq!($f.poll().unwrap(), ::futures::Async::Ready($expect)); + }; +} + +macro_rules! assert_not_ready { + ($f:expr) => { + assert!(!$f.poll().unwrap().is_ready()); + } +} + +macro_rules! assert_elapsed { + ($f:expr) => { + assert!($f.poll().unwrap_err().is_elapsed()); + } +} + +#[derive(Debug)] +pub struct MockTime { + inner: Inner, + _p: PhantomData>, +} + +#[derive(Debug)] +pub struct MockNow { + inner: Inner, + _p: PhantomData>, +} + +#[derive(Debug)] +pub struct MockPark { + inner: Inner, + _p: PhantomData>, +} + +#[derive(Debug)] +pub struct MockUnpark { + inner: Inner, +} + +type Inner = Arc>; + +#[derive(Debug)] +struct State { + base: Instant, + advance: Duration, + unparked: bool, + park_for: Option, +} + +pub fn ms(num: u64) -> Duration { + Duration::from_millis(num) +} + +pub trait IntoTimeout { + fn into_timeout(self) -> Option; +} + +impl IntoTimeout for Option { + fn into_timeout(self) -> Self { + self + } +} + +impl IntoTimeout for Duration { + fn into_timeout(self) -> Option { + Some(self) + } +} + +/// Turn the timer state once +pub fn turn(timer: &mut Timer, duration: T) { + timer.turn(duration.into_timeout()).unwrap(); +} + +/// Advance the timer the specified amount +pub fn advance(timer: &mut Timer, duration: Duration) { + let inner = timer.get_park().inner.clone(); + let deadline = inner.lock().unwrap().now() + duration; + + while inner.lock().unwrap().now() < deadline { + let dur = deadline - inner.lock().unwrap().now(); + turn(timer, dur); + } +} + +pub fn mocked(f: F) -> R +where F: FnOnce(&mut Timer, &mut MockTime) -> R +{ + mocked_with_now(Instant::now(), f) +} + +pub fn mocked_with_now(now: Instant, f: F) -> R +where F: FnOnce(&mut Timer, &mut MockTime) -> R +{ + let mut time = MockTime::new(now); + let park = time.mock_park(); + let now = time.mock_now(); + + let mut timer = Timer::new_with_now(park, now); + let handle = timer.handle(); + + let mut enter = ::tokio_executor::enter().unwrap(); + + ::tokio_timer::with_default(&handle, &mut enter, |_| { + lazy(|| { + Ok::<_, ()>(f(&mut timer, &mut time)) + }).wait().unwrap() + }) +} + +impl MockTime { + pub fn new(now: Instant) -> MockTime { + let state = State { + base: now, + advance: Duration::default(), + unparked: false, + park_for: None, + }; + + MockTime { + inner: Arc::new(Mutex::new(state)), + _p: PhantomData, + } + } + + pub fn mock_now(&self) -> MockNow { + let inner = self.inner.clone(); + MockNow { + inner, + _p: PhantomData, + } + } + + pub fn mock_park(&self) -> MockPark { + let inner = self.inner.clone(); + MockPark { + inner, + _p: PhantomData, + } + } + + pub fn now(&self) -> Instant { + self.inner.lock().unwrap().now() + } + + /// Returns the total amount of time the time has been advanced. + pub fn advanced(&self) -> Duration { + self.inner.lock().unwrap().advance + } + + pub fn advance(&self, duration: Duration) { + let mut inner = self.inner.lock().unwrap(); + inner.advance(duration); + } + + /// The next call to park_timeout will be for this duration, regardless of + /// the timeout passed to `park_timeout`. + pub fn park_for(&self, duration: Duration) { + self.inner.lock().unwrap().park_for = Some(duration); + } +} + +impl Park for MockPark { + type Unpark = MockUnpark; + type Error = (); + + fn unpark(&self) -> Self::Unpark { + let inner = self.inner.clone(); + MockUnpark { inner } + } + + fn park(&mut self) -> Result<(), Self::Error> { + let mut inner = self.inner.lock().map_err(|_| ())?; + + let duration = inner.park_for.take() + .expect("call park_for first"); + + inner.advance(duration); + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + let mut inner = self.inner.lock().unwrap(); + + if let Some(duration) = inner.park_for.take() { + inner.advance(duration); + } else { + inner.advance(duration); + } + + Ok(()) + } +} + +impl Unpark for MockUnpark { + fn unpark(&self) { + if let Ok(mut inner) = self.inner.lock() { + inner.unparked = true; + } + } +} + +impl Now for MockNow { + fn now(&mut self) -> Instant { + self.inner.lock().unwrap().now() + } +} + +impl State { + fn now(&self) -> Instant { + self.base + self.advance + } + + fn advance(&mut self, duration: Duration) { + self.advance += duration; + } +} diff --git a/third_party/rust/tokio-udp/.cargo-checksum.json b/third_party/rust/tokio-udp/.cargo-checksum.json new file mode 100644 index 000000000000..facc0c3fb8f3 --- /dev/null +++ b/third_party/rust/tokio-udp/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"12c90f0adbca12bb4093c9ff49e5582b94ceba32cfb1883125cea2d86bc49229","Cargo.toml":"48d6aa4b9cf001ef34a4f2e6b831e7b8a607bc48c86ea4f3df8732111e177a9a","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"6e14fb99ef1acf472ca5c3b501066df97de615c961b32fa19d1d4a335950a0a5","src/frame.rs":"739e2625b5b6a61024e61c8f7be02fdfcb7560e1547125c4f1fa4be7ff72e4d6","src/lib.rs":"c1d32d0874a6abbcaaabb40386e8366b1c0beb13738b2f3184b83b817f86dceb","src/recv_dgram.rs":"88a898ccbcfce5bb5c23f0f5485ad87b46fabd5c84e87b6876efbd964caba548","src/send_dgram.rs":"1ba6b5ba70c05e4b36fb209d3c4b46f3ffe7b075792ce209913824ecb04aaef0","src/socket.rs":"fdc5faef8075afa83072d09fa03eb615464926698bdbbf77d27d66c22047391e","tests/udp.rs":"120f35f20b1b134e3ac3c1fb5091cfa82d51e36dcd227905620b036741c2e313"},"package":"43eb534af6e8f37d43ab1b612660df14755c42bd003c5f8d2475ee78cc4600c0"} \ No newline at end of file diff --git a/third_party/rust/tokio-udp/CHANGELOG.md b/third_party/rust/tokio-udp/CHANGELOG.md new file mode 100644 index 000000000000..dd8dad17a3b9 --- /dev/null +++ b/third_party/rust/tokio-udp/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.1.1 (June 13, 2018) + +* Switch to tokio-codec (#360) + +# 0.1.0 (Mar 23, 2018) + +* Initial release diff --git a/third_party/rust/tokio-udp/Cargo.toml b/third_party/rust/tokio-udp/Cargo.toml new file mode 100644 index 000000000000..2292109131ee --- /dev/null +++ b/third_party/rust/tokio-udp/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio-udp" +version = "0.1.1" +authors = ["Carl Lerche "] +description = "UDP bindings for tokio.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio-udp/0.1" +categories = ["asynchronous"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.bytes] +version = "0.4" + +[dependencies.futures] +version = "0.1.19" + +[dependencies.log] +version = "0.4" + +[dependencies.mio] +version = "0.6.14" + +[dependencies.tokio-codec] +version = "0.1.0" + +[dependencies.tokio-io] +version = "0.1.7" + +[dependencies.tokio-reactor] +version = "0.1.1" +[dev-dependencies.env_logger] +version = "0.4" +default-features = false diff --git a/third_party/rust/tokio-udp/LICENSE b/third_party/rust/tokio-udp/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio-udp/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-udp/README.md b/third_party/rust/tokio-udp/README.md new file mode 100644 index 000000000000..e677c0749f34 --- /dev/null +++ b/third_party/rust/tokio-udp/README.md @@ -0,0 +1,15 @@ +# tokio-udp + +UDP bindings for `tokio`. + +[Documentation](https://tokio-rs.github.io/tokio/tokio_udp/) + +## License + +This project is licensed under the [MIT license](./LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio-udp/src/frame.rs b/third_party/rust/tokio-udp/src/frame.rs new file mode 100644 index 000000000000..37097ca37786 --- /dev/null +++ b/third_party/rust/tokio-udp/src/frame.rs @@ -0,0 +1,156 @@ +use std::io; +use std::net::{SocketAddr, Ipv4Addr, SocketAddrV4}; + +use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink}; + +use super::UdpSocket; + +use tokio_codec::{Decoder, Encoder}; +use bytes::{BytesMut, BufMut}; + +/// A unified `Stream` and `Sink` interface to an underlying `UdpSocket`, using +/// the `Encoder` and `Decoder` traits to encode and decode frames. +/// +/// Raw UDP sockets work with datagrams, but higher-level code usually wants to +/// batch these into meaningful chunks, called "frames". This method layers +/// framing on top of this socket by using the `Encoder` and `Decoder` traits to +/// handle encoding and decoding of messages frames. Note that the incoming and +/// outgoing frame types may be distinct. +/// +/// This function returns a *single* object that is both `Stream` and `Sink`; +/// grouping this into a single object is often useful for layering things which +/// require both read and write access to the underlying object. +/// +/// If you want to work more directly with the streams and sink, consider +/// calling `split` on the `UdpFramed` returned by this method, which will break +/// them into separate objects, allowing them to interact more easily. +#[must_use = "sinks do nothing unless polled"] +#[derive(Debug)] +pub struct UdpFramed { + socket: UdpSocket, + codec: C, + rd: BytesMut, + wr: BytesMut, + out_addr: SocketAddr, + flushed: bool, +} + +impl Stream for UdpFramed { + type Item = (C::Item, SocketAddr); + type Error = C::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + self.rd.reserve(INITIAL_RD_CAPACITY); + + let (n, addr) = unsafe { + // Read into the buffer without having to initialize the memory. + let (n, addr) = try_ready!(self.socket.poll_recv_from(self.rd.bytes_mut())); + self.rd.advance_mut(n); + (n, addr) + }; + trace!("received {} bytes, decoding", n); + let frame_res = self.codec.decode(&mut self.rd); + self.rd.clear(); + let frame = frame_res?; + let result = frame.map(|frame| (frame, addr)); // frame -> (frame, addr) + trace!("frame decoded from buffer"); + Ok(Async::Ready(result)) + } +} + +impl Sink for UdpFramed { + type SinkItem = (C::Item, SocketAddr); + type SinkError = C::Error; + + fn start_send(&mut self, item: Self::SinkItem) -> StartSend { + trace!("sending frame"); + + if !self.flushed { + match try!(self.poll_complete()) { + Async::Ready(()) => {}, + Async::NotReady => return Ok(AsyncSink::NotReady(item)), + } + } + + let (frame, out_addr) = item; + self.codec.encode(frame, &mut self.wr)?; + self.out_addr = out_addr; + self.flushed = false; + trace!("frame encoded; length={}", self.wr.len()); + + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), C::Error> { + if self.flushed { + return Ok(Async::Ready(())) + } + + trace!("flushing frame; length={}", self.wr.len()); + let n = try_ready!(self.socket.poll_send_to(&self.wr, &self.out_addr)); + trace!("written {}", n); + + let wrote_all = n == self.wr.len(); + self.wr.clear(); + self.flushed = true; + + if wrote_all { + Ok(Async::Ready(())) + } else { + Err(io::Error::new(io::ErrorKind::Other, + "failed to write entire datagram to socket").into()) + } + } + + fn close(&mut self) -> Poll<(), C::Error> { + try_ready!(self.poll_complete()); + Ok(().into()) + } +} + +const INITIAL_RD_CAPACITY: usize = 64 * 1024; +const INITIAL_WR_CAPACITY: usize = 8 * 1024; + +impl UdpFramed { + /// Create a new `UdpFramed` backed by the given socket and codec. + /// + /// See struct level documentation for more details. + pub fn new(socket: UdpSocket, codec: C) -> UdpFramed { + UdpFramed { + socket: socket, + codec: codec, + out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), + rd: BytesMut::with_capacity(INITIAL_RD_CAPACITY), + wr: BytesMut::with_capacity(INITIAL_WR_CAPACITY), + flushed: true, + } + } + + /// Returns a reference to the underlying I/O stream wrapped by `Framed`. + /// + /// # Note + /// + /// Care should be taken to not tamper with the underlying stream of data + /// coming in as it may corrupt the stream of frames otherwise being worked + /// with. + pub fn get_ref(&self) -> &UdpSocket { + &self.socket + } + + /// Returns a mutable reference to the underlying I/O stream wrapped by + /// `Framed`. + /// + /// # Note + /// + /// Care should be taken to not tamper with the underlying stream of data + /// coming in as it may corrupt the stream of frames otherwise being worked + /// with. + pub fn get_mut(&mut self) -> &mut UdpSocket { + &mut self.socket + } + + /// Consumes the `Framed`, returning its underlying I/O stream. + pub fn into_inner(self) -> UdpSocket { + self.socket + } +} diff --git a/third_party/rust/tokio-udp/src/lib.rs b/third_party/rust/tokio-udp/src/lib.rs new file mode 100644 index 000000000000..4a37b697eff3 --- /dev/null +++ b/third_party/rust/tokio-udp/src/lib.rs @@ -0,0 +1,43 @@ +//! UDP bindings for `tokio`. +//! +//! This module contains the UDP networking types, similar to the standard +//! library, which can be used to implement networking protocols. +//! +//! The main struct for UDP is the [`UdpSocket`], which represents a UDP socket. +//! Reading and writing to it can be done using futures, which return the +//! [`RecvDgram`] and [`SendDgram`] structs respectively. +//! +//! For convenience it's also possible to convert raw datagrams into higher-level +//! frames. +//! +//! [`UdpSocket`]: struct.UdpSocket.html +//! [`RecvDgram`]: struct.RecvDgram.html +//! [`SendDgram`]: struct.SendDgram.html +//! [`UdpFramed`]: struct.UdpFramed.html +//! [`framed`]: struct.UdpSocket.html#method.framed + +#![doc(html_root_url = "https://docs.rs/tokio-tcp/0.1.1")] +#![deny(missing_docs, warnings, missing_debug_implementations)] + +extern crate bytes; +#[macro_use] +extern crate futures; +extern crate mio; +#[macro_use] +extern crate log; +extern crate tokio_codec; +extern crate tokio_io; +extern crate tokio_reactor; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +mod frame; +mod socket; +mod send_dgram; +mod recv_dgram; + +pub use self::frame::UdpFramed; +pub use self::socket::UdpSocket; +pub use self::send_dgram::SendDgram; +pub use self::recv_dgram::RecvDgram; diff --git a/third_party/rust/tokio-udp/src/recv_dgram.rs b/third_party/rust/tokio-udp/src/recv_dgram.rs new file mode 100644 index 000000000000..2a9e296064d2 --- /dev/null +++ b/third_party/rust/tokio-udp/src/recv_dgram.rs @@ -0,0 +1,52 @@ +use super::socket::UdpSocket; + +use std::io; +use std::net::SocketAddr; + +use futures::{Async, Future, Poll}; + +/// A future used to receive a datagram from a UDP socket. +/// +/// This is created by the `UdpSocket::recv_dgram` method. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct RecvDgram { + /// None means future was completed + state: Option> +} + +/// A struct is used to represent the full info of RecvDgram. +#[derive(Debug)] +struct RecvDgramInner { + /// Rx socket + socket: UdpSocket, + /// The received data will be put in the buffer + buffer: T +} + +impl RecvDgram { + /// Create a new future to receive UDP Datagram + pub(crate) fn new(socket: UdpSocket, buffer: T) -> RecvDgram { + let inner = RecvDgramInner { socket: socket, buffer: buffer }; + RecvDgram { state: Some(inner) } + } +} + +impl Future for RecvDgram + where T: AsMut<[u8]>, +{ + type Item = (UdpSocket, T, usize, SocketAddr); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (n, addr) = { + let ref mut inner = + self.state.as_mut().expect("RecvDgram polled after completion"); + + try_ready!(inner.socket.poll_recv_from(inner.buffer.as_mut())) + }; + + let inner = self.state.take().unwrap(); + Ok(Async::Ready((inner.socket, inner.buffer, n, addr))) + } +} diff --git a/third_party/rust/tokio-udp/src/send_dgram.rs b/third_party/rust/tokio-udp/src/send_dgram.rs new file mode 100644 index 000000000000..50d650389926 --- /dev/null +++ b/third_party/rust/tokio-udp/src/send_dgram.rs @@ -0,0 +1,61 @@ +use super::socket::UdpSocket; + +use std::io; +use std::net::SocketAddr; + +use futures::{Async, Future, Poll}; + +/// A future used to write the entire contents of some data to a UDP socket. +/// +/// This is created by the `UdpSocket::send_dgram` method. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct SendDgram { + /// None means future was completed + state: Option> +} + +/// A struct is used to represent the full info of SendDgram. +#[derive(Debug)] +struct SendDgramInner { + /// Tx socket + socket: UdpSocket, + /// The whole buffer will be sent + buffer: T, + /// Destination addr + addr: SocketAddr, +} + +impl SendDgram { + /// Create a new future to send UDP Datagram + pub(crate) fn new(socket: UdpSocket, buffer: T, addr: SocketAddr) -> SendDgram { + let inner = SendDgramInner { socket: socket, buffer: buffer, addr: addr }; + SendDgram { state: Some(inner) } + } +} + +fn incomplete_write(reason: &str) -> io::Error { + io::Error::new(io::ErrorKind::Other, reason) +} + +impl Future for SendDgram + where T: AsRef<[u8]>, +{ + type Item = (UdpSocket, T); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(UdpSocket, T), io::Error> { + { + let ref mut inner = + self.state.as_mut().expect("SendDgram polled after completion"); + let n = try_ready!(inner.socket.poll_send_to(inner.buffer.as_ref(), &inner.addr)); + if n != inner.buffer.as_ref().len() { + return Err(incomplete_write("failed to send entire message \ + in datagram")) + } + } + + let inner = self.state.take().unwrap(); + Ok(Async::Ready((inner.socket, inner.buffer))) + } +} diff --git a/third_party/rust/tokio-udp/src/socket.rs b/third_party/rust/tokio-udp/src/socket.rs new file mode 100644 index 000000000000..aaf6b8f72618 --- /dev/null +++ b/third_party/rust/tokio-udp/src/socket.rs @@ -0,0 +1,425 @@ +use super::{SendDgram, RecvDgram}; + +use std::io; +use std::net::{self, SocketAddr, Ipv4Addr, Ipv6Addr}; +use std::fmt; + +use futures::{Async, Poll}; +use mio; + +use tokio_reactor::{Handle, PollEvented}; + +/// An I/O object representing a UDP socket. +pub struct UdpSocket { + io: PollEvented, +} + +impl UdpSocket { + /// This function will create a new UDP socket and attempt to bind it to + /// the `addr` provided. + pub fn bind(addr: &SocketAddr) -> io::Result { + mio::net::UdpSocket::bind(addr) + .map(UdpSocket::new) + } + + fn new(socket: mio::net::UdpSocket) -> UdpSocket { + let io = PollEvented::new(socket); + UdpSocket { io: io } + } + + /// Creates a new `UdpSocket` from the previously bound socket provided. + /// + /// The socket given will be registered with the event loop that `handle` + /// is associated with. This function requires that `socket` has previously + /// been bound to an address to work correctly. + /// + /// This can be used in conjunction with net2's `UdpBuilder` interface to + /// configure a socket before it's handed off, such as setting options like + /// `reuse_address` or binding to multiple addresses. + /// + /// Use `Handle::default()` to lazily bind to an event loop, just like `bind` does. + pub fn from_std(socket: net::UdpSocket, + handle: &Handle) -> io::Result { + let io = mio::net::UdpSocket::from_socket(socket)?; + let io = PollEvented::new_with_handle(io, handle)?; + Ok(UdpSocket { io }) + } + + /// Returns the local address that this socket is bound to. + pub fn local_addr(&self) -> io::Result { + self.io.get_ref().local_addr() + } + + /// Connects the UDP socket setting the default destination for send() and + /// limiting packets that are read via recv from the address specified in + /// `addr`. + pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> { + self.io.get_ref().connect(*addr) + } + + #[deprecated(since = "0.1.2", note = "use poll_send instead")] + #[doc(hidden)] + pub fn send(&mut self, buf: &[u8]) -> io::Result { + match self.poll_send(buf)? { + Async::Ready(n) => Ok(n), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Sends data on the socket to the remote address to which it is connected. + /// + /// The [`connect`] method will connect this socket to a remote address. This + /// method will fail if the socket is not connected. + /// + /// [`connect`]: #method.connect + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready(num_bytes_written))`. + /// + /// If the socket is not ready for writing, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the socket becomes writable. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_send(&mut self, buf: &[u8]) -> Poll { + try_ready!(self.io.poll_write_ready()); + + match self.io.get_ref().send(buf) { + Ok(n) => Ok(n.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready()?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + #[deprecated(since = "0.1.2", note = "use poll_recv instead")] + #[doc(hidden)] + pub fn recv(&mut self, buf: &mut [u8]) -> io::Result { + match self.poll_recv(buf)? { + Async::Ready(n) => Ok(n), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Receives a single datagram message on the socket from the remote address to + /// which it is connected. On success, returns the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient size to + /// hold the message bytes. If a message is too long to fit in the supplied buffer, + /// excess bytes may be discarded. + /// + /// The [`connect`] method will connect this socket to a remote address. This + /// method will fail if the socket is not connected. + /// + /// [`connect`]: #method.connect + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the socket becomes receivable or is closed. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_recv(&mut self, buf: &mut [u8]) -> Poll { + try_ready!(self.io.poll_read_ready(mio::Ready::readable())); + + match self.io.get_ref().recv(buf) { + Ok(n) => Ok(n.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(mio::Ready::readable())?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + #[deprecated(since = "0.1.2", note = "use poll_send_to instead")] + #[doc(hidden)] + pub fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result { + match self.poll_send_to(buf, target)? { + Async::Ready(n) => Ok(n), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. + /// + /// This will return an error when the IP version of the local socket + /// does not match that of `target`. + /// + /// # Return + /// + /// On success, returns `Ok(Async::Ready(num_bytes_written))`. + /// + /// If the socket is not ready for writing, the method returns + /// `Ok(Async::NotReady)` and arranges for the current task to receive a + /// notification when the socket becomes writable. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_send_to(&mut self, buf: &[u8], target: &SocketAddr) -> Poll { + try_ready!(self.io.poll_write_ready()); + + match self.io.get_ref().send_to(buf, target) { + Ok(n) => Ok(n.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready()?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + /// Creates a future that will write the entire contents of the buffer + /// `buf` provided as a datagram to this socket. + /// + /// The returned future will return after data has been written to the + /// outbound socket. The future will resolve to the stream as well as the + /// buffer (for reuse if needed). + /// + /// Any error which happens during writing will cause both the stream and + /// the buffer to get destroyed. Note that failure to write the entire + /// buffer is considered an error for the purposes of sending a datagram. + /// + /// The `buf` parameter here only requires the `AsRef<[u8]>` trait, which + /// should be broadly applicable to accepting data which can be converted + /// to a slice. + pub fn send_dgram(self, buf: T, addr: &SocketAddr) -> SendDgram + where T: AsRef<[u8]>, + { + SendDgram::new(self, buf, *addr) + } + + #[deprecated(since = "0.1.2", note = "use poll_recv_from instead")] + #[doc(hidden)] + pub fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.poll_recv_from(buf)? { + Async::Ready(ret) => Ok(ret), + Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), + } + } + + /// Receives data from the socket. On success, returns the number of bytes + /// read and the address from whence the data came. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_recv_from(&mut self, buf: &mut [u8]) -> Poll<(usize, SocketAddr), io::Error> { + try_ready!(self.io.poll_read_ready(mio::Ready::readable())); + + match self.io.get_ref().recv_from(buf) { + Ok(n) => Ok(n.into()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(mio::Ready::readable())?; + Ok(Async::NotReady) + } + Err(e) => Err(e), + } + } + + /// Creates a future that receive a datagram to be written to the buffer + /// provided. + /// + /// The returned future will return after a datagram has been received on + /// this socket. The future will resolve to the socket, the buffer, the + /// amount of data read, and the address the data was received from. + /// + /// An error during reading will cause the socket and buffer to get + /// destroyed. + /// + /// The `buf` parameter here only requires the `AsMut<[u8]>` trait, which + /// should be broadly applicable to accepting data which can be converted + /// to a slice. + pub fn recv_dgram(self, buf: T) -> RecvDgram + where T: AsMut<[u8]>, + { + RecvDgram::new(self, buf) + } + + /// Gets the value of the `SO_BROADCAST` option for this socket. + /// + /// For more information about this option, see [`set_broadcast`]. + /// + /// [`set_broadcast`]: #method.set_broadcast + pub fn broadcast(&self) -> io::Result { + self.io.get_ref().broadcast() + } + + /// Sets the value of the `SO_BROADCAST` option for this socket. + /// + /// When enabled, this socket is allowed to send packets to a broadcast + /// address. + pub fn set_broadcast(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_broadcast(on) + } + + /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v4`]. + /// + /// [`set_multicast_loop_v4`]: #method.set_multicast_loop_v4 + pub fn multicast_loop_v4(&self) -> io::Result { + self.io.get_ref().multicast_loop_v4() + } + + /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// If enabled, multicast packets will be looped back to the local socket. + /// + /// # Note + /// + /// This may not have any affect on IPv6 sockets. + pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_multicast_loop_v4(on) + } + + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see [`set_multicast_ttl_v4`]. + /// + /// [`set_multicast_ttl_v4`]: #method.set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result { + self.io.get_ref().multicast_ttl_v4() + } + + /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// Indicates the time-to-live value of outgoing multicast packets for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + /// + /// # Note + /// + /// This may not have any affect on IPv6 sockets. + pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_multicast_ttl_v4(ttl) + } + + /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v6`]. + /// + /// [`set_multicast_loop_v6`]: #method.set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result { + self.io.get_ref().multicast_loop_v6() + } + + /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// Controls whether this socket sees the multicast packets it sends itself. + /// + /// # Note + /// + /// This may not have any affect on IPv4 sockets. + pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_multicast_loop_v6(on) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: #method.set_ttl + pub fn ttl(&self) -> io::Result { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } + + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// address of the local interface with which the system should join the + /// multicast group. If it's equal to `INADDR_ANY` then an appropriate + /// interface is chosen by the system. + pub fn join_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.io.get_ref().join_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// index of the interface to join/leave (or 0 to indicate any interface). + pub fn join_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.io.get_ref().join_multicast_v6(multiaddr, interface) + } + + /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see [`join_multicast_v4`]. + /// + /// [`join_multicast_v4`]: #method.join_multicast_v4 + pub fn leave_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.io.get_ref().leave_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see [`join_multicast_v6`]. + /// + /// [`join_multicast_v6`]: #method.join_multicast_v6 + pub fn leave_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.io.get_ref().leave_multicast_v6(multiaddr, interface) + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +#[cfg(all(unix))] +mod sys { + use std::os::unix::prelude::*; + use super::UdpSocket; + + impl AsRawFd for UdpSocket { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::UdpSocket; + // + // impl AsRawHandle for UdpSocket { + // fn as_raw_handle(&self) -> RawHandle { + // self.io.get_ref().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio-udp/tests/udp.rs b/third_party/rust/tokio-udp/tests/udp.rs new file mode 100644 index 000000000000..ef70ed23f79e --- /dev/null +++ b/third_party/rust/tokio-udp/tests/udp.rs @@ -0,0 +1,260 @@ +extern crate futures; +extern crate tokio_udp; +extern crate tokio_codec; +#[macro_use] +extern crate tokio_io; +extern crate bytes; +extern crate env_logger; + +use std::io; +use std::net::SocketAddr; + +use futures::{Future, Poll, Stream, Sink}; + +use tokio_udp::{UdpSocket, UdpFramed}; +use tokio_codec::{Encoder, Decoder}; +use bytes::{BytesMut, BufMut}; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +fn send_messages(send: S, recv: R) { + let mut a = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into())); + let mut b = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into())); + let a_addr = t!(a.local_addr()); + let b_addr = t!(b.local_addr()); + + { + let send = SendMessage::new(a, send.clone(), b_addr, b"1234"); + let recv = RecvMessage::new(b, recv.clone(), a_addr, b"1234"); + let (sendt, received) = t!(send.join(recv).wait()); + a = sendt; + b = received; + } + + { + let send = SendMessage::new(a, send, b_addr, b""); + let recv = RecvMessage::new(b, recv, a_addr, b""); + t!(send.join(recv).wait()); + } +} + +#[test] +fn send_to_and_recv_from() { + send_messages(SendTo {}, RecvFrom {}); +} + +#[test] +fn send_and_recv() { + send_messages(Send {}, Recv {}); +} + +trait SendFn { + fn send(&self, &mut UdpSocket, &[u8], &SocketAddr) -> Result; +} + +#[derive(Debug, Clone)] +struct SendTo {} + +impl SendFn for SendTo { + fn send(&self, socket: &mut UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { + socket.send_to(buf, addr) + } +} + +#[derive(Debug, Clone)] +struct Send {} + +impl SendFn for Send { + fn send(&self, socket: &mut UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { + socket.connect(addr).expect("could not connect"); + socket.send(buf) + } +} + +struct SendMessage { + socket: Option, + send: S, + addr: SocketAddr, + data: &'static [u8], +} + +impl SendMessage { + fn new(socket: UdpSocket, send: S, addr: SocketAddr, data: &'static [u8]) -> SendMessage { + SendMessage { + socket: Some(socket), + send: send, + addr: addr, + data: data, + } + } +} + +impl Future for SendMessage { + type Item = UdpSocket; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let n = try_nb!(self.send.send(self.socket.as_mut().unwrap(), &self.data[..], &self.addr)); + + assert_eq!(n, self.data.len()); + + Ok(self.socket.take().unwrap().into()) + } +} + +trait RecvFn { + fn recv(&self, &mut UdpSocket, &mut [u8], &SocketAddr) -> Result; +} + +#[derive(Debug, Clone)] +struct RecvFrom {} + +impl RecvFn for RecvFrom { + fn recv(&self, socket: &mut UdpSocket, buf: &mut [u8], + expected_addr: &SocketAddr) -> Result { + socket.recv_from(buf).map(|(s, addr)| { + assert_eq!(addr, *expected_addr); + s + }) + } +} + +#[derive(Debug, Clone)] +struct Recv {} + +impl RecvFn for Recv { + fn recv(&self, socket: &mut UdpSocket, buf: &mut [u8], _: &SocketAddr) -> Result { + socket.recv(buf) + } +} + +struct RecvMessage { + socket: Option, + recv: R, + expected_addr: SocketAddr, + expected_data: &'static [u8], +} + +impl RecvMessage { + fn new(socket: UdpSocket, recv: R, expected_addr: SocketAddr, + expected_data: &'static [u8]) -> RecvMessage { + RecvMessage { + socket: Some(socket), + recv: recv, + expected_addr: expected_addr, + expected_data: expected_data, + } + } +} + +impl Future for RecvMessage { + type Item = UdpSocket; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let mut buf = vec![0u8; 10 + self.expected_data.len() * 10]; + let n = try_nb!(self.recv.recv(&mut self.socket.as_mut().unwrap(), &mut buf[..], + &self.expected_addr)); + + assert_eq!(n, self.expected_data.len()); + assert_eq!(&buf[..self.expected_data.len()], &self.expected_data[..]); + + Ok(self.socket.take().unwrap().into()) + } +} + +#[test] +fn send_dgrams() { + let mut a = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()))); + let mut b = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()))); + let mut buf = [0u8; 50]; + let b_addr = t!(b.local_addr()); + + { + let send = a.send_dgram(&b"4321"[..], &b_addr); + let recv = b.recv_dgram(&mut buf[..]); + let (sendt, received) = t!(send.join(recv).wait()); + assert_eq!(received.2, 4); + assert_eq!(&received.1[..4], b"4321"); + a = sendt.0; + b = received.0; + } + + { + let send = a.send_dgram(&b""[..], &b_addr); + let recv = b.recv_dgram(&mut buf[..]); + let received = t!(send.join(recv).wait()).1; + assert_eq!(received.2, 0); + } +} + +pub struct ByteCodec; + +impl Decoder for ByteCodec { + type Item = Vec; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result>, io::Error> { + let len = buf.len(); + Ok(Some(buf.split_to(len).to_vec())) + } +} + +impl Encoder for ByteCodec { + type Item = Vec; + type Error = io::Error; + + fn encode(&mut self, data: Vec, buf: &mut BytesMut) -> Result<(), io::Error> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} + +#[test] +fn send_framed() { + drop(env_logger::init()); + + let mut a_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()))); + let mut b_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()))); + let a_addr = t!(a_soc.local_addr()); + let b_addr = t!(b_soc.local_addr()); + + { + let a = UdpFramed::new(a_soc, ByteCodec); + let b = UdpFramed::new(b_soc, ByteCodec); + + let msg = b"4567".to_vec(); + + let send = a.send((msg.clone(), b_addr)); + let recv = b.into_future().map_err(|e| e.0); + let (sendt, received) = t!(send.join(recv).wait()); + + let (data, addr) = received.0.unwrap(); + assert_eq!(msg, data); + assert_eq!(a_addr, addr); + + a_soc = sendt.into_inner(); + b_soc = received.1.into_inner(); + } + + { + let a = UdpFramed::new(a_soc, ByteCodec); + let b = UdpFramed::new(b_soc, ByteCodec); + + let msg = b"".to_vec(); + + let send = a.send((msg.clone(), b_addr)); + let recv = b.into_future().map_err(|e| e.0); + let received = t!(send.join(recv).wait()).1; + + let (data, addr) = received.0.unwrap(); + assert_eq!(msg, data); + assert_eq!(a_addr, addr); + } +} diff --git a/third_party/rust/tokio/.appveyor.yml b/third_party/rust/tokio/.appveyor.yml new file mode 100644 index 000000000000..ebb36df5bad6 --- /dev/null +++ b/third_party/rust/tokio/.appveyor.yml @@ -0,0 +1,19 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + platform: x64 + - TARGET: i686-pc-windows-msvc + platform: x86 + +install: + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host %TARGET% + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --all --target %TARGET% diff --git a/third_party/rust/tokio/.cargo-checksum.json b/third_party/rust/tokio/.cargo-checksum.json new file mode 100644 index 000000000000..d07f7958be3e --- /dev/null +++ b/third_party/rust/tokio/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".appveyor.yml":"dcedb252adb4de5c07ea049244e1552146c4800e6c705a06e9acc6e6374a5d09",".travis.yml":"ac7913637497a90896bbe6c442426a8b854e23d657f35066168d427ec513bdea","CHANGELOG.md":"2fd73b7668da97c769a93a535d9eda78e10bc3b15def3262a40a468845cd075e","Cargo.toml":"05ee84f6aa9a48a4359782041ac4ea0cbf1937350d6e831f4b205440236021d3","LICENSE":"4899c290472c872cf8a1904a60e73ec58a1bc1db2e20bc143aa3d1498be49c96","README.md":"3f5b65915ac668865bac7b0b5e77ab62f984628524d2e7b9ce96afef7c6b70d0","benches/latency.rs":"b8f62578cf5784efa201549535e2a0d3bda78d78cf24d2be662851c35a1339df","benches/mio-ops.rs":"0df1a47f9bb3c8ed4291244da8c39d956b7a23cad9be7b2a5009fa58d39f8330","benches/tcp.rs":"85220e1b755ca3803d2a45db4a20f97a773a903b357ede7e696bc551803ff68f","ci/tsan":"f3e69d92a43136d9ddd627dd3b5ce548aff972c6cf2a19724395a1a7abb282d3","examples/README.md":"c976ccc5c8b44caf31ef3a1c5ac13605898e087bc9b5f7e5a7ac9e8c44157a99","examples/chat-combinator.rs":"a0a621dfbc0ec63fe78c5ef49403787fcf44f9ecc83e0cae36dd2e6f0b3480aa","examples/chat.rs":"261804834dab85451bcdbd7240db9c188a310d01152d7039ea0051ced29f12ba","examples/connect.rs":"738d3c280644ea865fa5bb14d4450145317be090bd041f7317d4acea088263cd","examples/echo-udp.rs":"a1f4105a80d82b8ea84cf2deaafafb654357bde451cec57621ff89fd168a0723","examples/echo.rs":"0819864913f0e390b48c620bb67a2b88d6c5d6e11229ec1f5eeb935092353dce","examples/hello_world.rs":"766db4454412ff30dad9655c80a5fcd43e15327ea4703c20771a4e7ac4112dac","examples/manual-runtime.rs":"38b38e88cf9c1db5edd54eb6ef1a41e227799376d982a3f5e5b1dbb203af1d6e","examples/print_each_packet.rs":"4e549135bb47e140b61e290f9e2e676dc79902ee923a0a50701666411b4d2f6e","examples/proxy.rs":"3f28f1a0bac9ae57c3bf6770679717d55220460fcfab05fb62e356f5f07586ee","examples/tinydb.rs":"0b8bc38310b0a619eb46db040cab8257d7357d2c3e86ee5297a640b255fabb55","examples/tinyhttp.rs":"4e4c14ac00ab1167edef586e76b78d8ed21a9053c260e4326e72303ba263ef9b","examples/udp-client.rs":"2df32c59e5dda17d7c18c6c7faf28a3f9ce2e07c97a908c5ee4ca97c3d775874","examples/udp-codec.rs":"ff1cfd19a7c0788de52a27857488a4ce117fe72b72ba340d836cb5d184e53696","src/clock.rs":"51ad21211d283ca2d4367668a95ab38b8a8ab02a3dd5a019d45675a3f7b202f8","src/executor/current_thread/mod.rs":"e7122984fdd25b23ac344bf2d95991f1f0b02b73ad67acd1f738de665c522d61","src/executor/current_thread/scheduler.rs":"e9fe0c7dc8186b57f453860e82f8af6d58d36fa3f6472abb90cbede508832a67","src/executor/mod.rs":"a84f767b5e8ac6ae412f29e7a42931b6c90df8291f3bd4349a3b9c76dab89f5d","src/fs.rs":"6bbc0dcc0b54589ddd924a6b66bb72e6ce784ddcbce92eaa92f8fc9bc91ebea8","src/lib.rs":"0afa9b0552a71cf2f4e2106668b3f6370f0e3d407a10b7d699760a5043a7af51","src/net.rs":"3d93450c3bde5efc9aa3ed4406a2ea54998be3022dfd31ba3b971f4d7a00e285","src/reactor/mod.rs":"97515d9d5fbb2a150125c63b1414eec6ce954d62df48f455b299a1f4d6c221b4","src/reactor/poll_evented.rs":"6d1233e82ebdefe42c63c3bbca839d99aab5873aeca2bb11787660ecd39ad2f9","src/runtime/builder.rs":"6c414b6b0d8ce281bc8fe5a3ce2093f4f22410e2a1dd948de5819f89865c3ca6","src/runtime/current_thread/builder.rs":"30f1dae794e0db2b4263eb1e96b71c12c91fc08d2845cbb7cb540cd31a3b1612","src/runtime/current_thread/mod.rs":"ebd7fbf6df2a37d3762ab674eac9165ec93df872add47d907325781a56ce8202","src/runtime/current_thread/runtime.rs":"5fb70407f1b2e41bb542d03e1ea844e1f275ea60adfd095e3d0a32a20c83055f","src/runtime/mod.rs":"53368474d095521a9ea1442ab8136d33c692a4416c5427aa454e105d13f91c4e","src/runtime/shutdown.rs":"a3f23cbff014a0ac6dc8a75839f636bc71fce4d5e2e2d34fa76f4ba51ca3ff95","src/runtime/task_executor.rs":"4db614068a5064f1fc7866402d7ad75dd328aa320da461a1d16fff7129bf4a5b","src/timer.rs":"bcff7eb187cf6406f34a6590deeafb168f2c265b377b5d6b98be63f44fd7e10a","src/util/future.rs":"98252223fe282de280791ae9586fee36f477b35ac1143b622141e798dc53b98b","src/util/mod.rs":"374cf5c1cff49362d91e44a7ec6c4abb89573e4a21c6fc2e5e47a55b6ea6c49c","tests/buffered.rs":"26fff50fbf68de6374bb9608903f6f2ddafc89486f391aeb00305fe1fbbbefa1","tests/clock.rs":"8241dd77817aa039cf16e285c71b75fc06c0b0fc26d5edca496370057780cb69","tests/current_thread.rs":"a3d7bb3f867b8c0287ae441c2125f5a61047373899429553b7a53d42fd7f5622","tests/drop-core.rs":"9a074dd521840d28e5c740a767aaae4c957614901a7f4c6b330cc1560ef50fd9","tests/echo2.rs":"ea69e1fef4a29db0b653fd86cf00487df624202009318386819c8e77cc044dc5","tests/global.rs":"c85292f691febce0730c628edf318099bc44681ff4abe7fb700a37aaa27b6d07","tests/global2.rs":"2f2d74d66c0756d69efc24f92e6d4329e30da54f91bd07a883c4b704d9f9092f","tests/line-frames.rs":"5156eb6937d1bb63efab75b811a3be331a4044d2b81892358ef2731d2479f437","tests/pipe-hup.rs":"a43861363bfdd749db1298972aad89a128411242d366a11bd11a5cb7e1d8bff2","tests/runtime.rs":"47fe4a9d9dcadfe6d4e9ad257dd8fa64204e4018f8311956efd36b251c0deec4","tests/tcp2.rs":"7571f378c1a9945b8d0767d105b63729bdd1542de9efdb32d7efb30c69736208","tests/timer.rs":"2d79fe7fbc685e163911418693ee58c393f4e2f0fb259a6d5d62779d5852edbf"},"package":"8ee337e5f4e501fc32966fec6fe0ca0cc1c237b0b1b14a335f8bfe3c5f06e286"} \ No newline at end of file diff --git a/third_party/rust/tokio/.travis.yml b/third_party/rust/tokio/.travis.yml new file mode 100644 index 000000000000..3d4cae6f0d13 --- /dev/null +++ b/third_party/rust/tokio/.travis.yml @@ -0,0 +1,95 @@ +--- +language: rust +sudo: false +cache: + - apt + - cargo +addons: + apt: + packages: + # to x-compile miniz-sys from sources + - gcc-multilib + +matrix: + include: + # This represents the minimum Rust version supported by Tokio. Updating this + # should be done in a dedicated PR and cannot be greater than two 0.x + # releases prior to the current stable. + - rust: 1.21.0 + - rust: stable + - rust: beta + - rust: nightly + - os: osx + - env: TARGET=x86_64-unknown-freebsd + - env: TARGET=i686-unknown-freebsd + - env: TARGET=i686-unknown-linux-gnu + +script: + - | + set -e + if [[ "$TRAVIS_RUST_VERSION" == nightly ]] + then + # Make sure the benchmarks compile + cargo build --benches --all + + export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" + export TSAN_OPTIONS="suppressions=`pwd`/ci/tsan" + + # === tokio-timer ==== + + # Run address sanitizer + RUSTFLAGS="-Z sanitizer=address" \ + cargo test -p tokio-timer --test hammer --target x86_64-unknown-linux-gnu + + # Run thread sanitizer + RUSTFLAGS="-Z sanitizer=thread" \ + cargo test -p tokio-timer --test hammer --target x86_64-unknown-linux-gnu + + # === tokio-threadpool ==== + + # Run address sanitizer + RUSTFLAGS="-Z sanitizer=address" \ + cargo test -p tokio-threadpool --tests + + # Run thread sanitizer + RUSTFLAGS="-Z sanitizer=thread" \ + cargo test -p tokio-threadpool --tests + fi + - | + set -e + if [[ "$TARGET" ]] + then + rustup target add $TARGET + cargo check --all --target $TARGET + cargo check --tests --all --target $TARGET + else + cargo test --all + # Disable these tests for now as they are buggy + # + # cargo test --features unstable-futures + # cargo test --manifest-path tokio-threadpool/Cargo.toml --features unstable-futures + # cargo test --manifest-path tokio-reactor/Cargo.toml --features unstable-futures + fi + +before_deploy: + - cargo doc --all --no-deps + +deploy: + provider: pages + skip_cleanup: true + github_token: $GH_TOKEN + target_branch: gh-pages + local_dir: target/doc + on: + branch: master + repo: tokio-rs/tokio + rust: stable + condition: $TRAVIS_OS_NAME = linux + +env: + global: + - secure: iwlN1zfUCp/5BAAheqIRSFIqiM9zSwfIGcVDw/V7jHveqXyNzmCs7H58/cd90WLqonqpPX0t5GF66oTjms4v0DFjgXr/k4358qeSZaV082V3baNrVpCDHeCQV0SvKsfiYxDDJGSUL1WIUP+tqqDm4+ksZQP3LnwZojkABjWz5CBNt4kX+Wz5ZbYqtQoxyuZba5UyPY2CXJtubvCVPGMJULuUpklYxXZ4dWM2olzGgVJ8rE8udhSZ4ER4JgxB0KUx3/5TwHHzgyPEsWR4bKN6JzBjIczQofXUcUXXdoZBs23H/VhCpzKcn3/oJ8btVYPzwtdj5FmVB1aVR/gjPo2bSGi/sofq+LwL/1HJXkM+kjl8m2dLLcDBKqNYNERtVA1++LhkMWAFRgGYe8v8Ryxjiue1NF5LgAIA/fjK0uI1DELTzTf/TKrM+AtPDNTvhOft4/YD+hoImjwk6nv6PBb2TiTYnc79Qf4AZ65tv1qtsAUPuw4plLaccHQAO4ldYVXn4u9c+iisJwvovs6jo06bF3U3qtdI5gXsrI9+T25TrXvYb+IREo0MHzYEM0KlPFnscEArzC3eajuSd36ARFP3lDc+gp2RPs89iJjowms0eRyepp7Cu6XO3Cd2pfAX8AqvnmttZf4Nm51ONeiBPXPXItUkJm49MCpMJywU1IZcWZg= + +notifications: + email: + on_success: never diff --git a/third_party/rust/tokio/CHANGELOG.md b/third_party/rust/tokio/CHANGELOG.md new file mode 100644 index 000000000000..227237fe5e94 --- /dev/null +++ b/third_party/rust/tokio/CHANGELOG.md @@ -0,0 +1,48 @@ +# 0.1.7 (June 6, 2018) + +* Add `Runtime::block_on` for concurrent runtime (#391). +* Provide handle to `current_thread::Runtime` that allows spawning tasks from + other threads (#340). +* Provide `clock::now()`, a configurable source of time (#381). + +# 0.1.6 (May 2, 2018) + +* Add asynchronous filesystem APIs (#323). +* Add "current thread" runtime variant (#308). +* `CurrentThread`: Expose inner `Park` instance. +* Improve fairness of `CurrentThread` executor (#313). + +# 0.1.5 (March 30, 2018) + +* Provide timer API (#266) + +# 0.1.4 (March 22, 2018) + +* Fix build on FreeBSD (#218) +* Shutdown the Runtime when the handle is dropped (#214) +* Set Runtime thread name prefix for worker threads (#232) +* Add builder for Runtime (#234) +* Extract TCP and UDP types into separate crates (#224) +* Optionally support futures 0.2. + +# 0.1.3 (March 09, 2018) + +* Fix `CurrentThread::turn` to block on idle (#212). + +# 0.1.2 (March 09, 2018) + +* Introduce Tokio Runtime (#141) +* Provide `CurrentThread` for more flexible usage of current thread executor (#141). +* Add Lio for platforms that support it (#142). +* I/O resources now lazily bind to the reactor (#160). +* Extract Reactor to dedicated crate (#169) +* Add facade to sub crates and add prelude (#166). +* Switch TCP/UDP fns to poll_ -> Poll<...> style (#175) + +# 0.1.1 (February 09, 2018) + +* Doc fixes + +# 0.1.0 (February 07, 2018) + +* Initial crate released based on [RFC](https://github.com/tokio-rs/tokio-rfcs/pull/3). diff --git a/third_party/rust/tokio/Cargo.toml b/third_party/rust/tokio/Cargo.toml new file mode 100644 index 000000000000..184b500c103a --- /dev/null +++ b/third_party/rust/tokio/Cargo.toml @@ -0,0 +1,96 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "tokio" +version = "0.1.7" +authors = ["Carl Lerche "] +description = "An event-driven, non-blocking I/O platform for writing asynchronous I/O\nbacked applications.\n" +homepage = "https://tokio.rs" +documentation = "https://docs.rs/tokio/0.1" +readme = "README.md" +keywords = ["io", "async", "non-blocking", "futures"] +categories = ["asynchronous", "network-programming"] +license = "MIT" +repository = "https://github.com/tokio-rs/tokio" +[dependencies.futures] +version = "0.1.20" + +[dependencies.mio] +version = "0.6.14" + +[dependencies.tokio-executor] +version = "0.1.2" + +[dependencies.tokio-fs] +version = "0.1.0" + +[dependencies.tokio-io] +version = "0.1.6" + +[dependencies.tokio-reactor] +version = "0.1.1" + +[dependencies.tokio-tcp] +version = "0.1.0" + +[dependencies.tokio-threadpool] +version = "0.1.4" + +[dependencies.tokio-timer] +version = "0.2.4" + +[dependencies.tokio-udp] +version = "0.1.0" +[dev-dependencies.bytes] +version = "0.4" + +[dev-dependencies.env_logger] +version = "0.4" +default-features = false + +[dev-dependencies.flate2] +version = "1" +features = ["tokio"] + +[dev-dependencies.futures-cpupool] +version = "0.1" + +[dev-dependencies.http] +version = "0.1" + +[dev-dependencies.httparse] +version = "1.0" + +[dev-dependencies.libc] +version = "0.2" + +[dev-dependencies.num_cpus] +version = "1.0" + +[dev-dependencies.serde] +version = "1.0" + +[dev-dependencies.serde_derive] +version = "1.0" + +[dev-dependencies.serde_json] +version = "1.0" + +[dev-dependencies.time] +version = "0.1" +[badges.appveyor] +id = "s83yxhy9qeb58va7" +repository = "carllerche/tokio" + +[badges.travis-ci] +repository = "tokio-rs/tokio" diff --git a/third_party/rust/tokio/LICENSE b/third_party/rust/tokio/LICENSE new file mode 100644 index 000000000000..38c1e27b8e2e --- /dev/null +++ b/third_party/rust/tokio/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Tokio Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio/README.md b/third_party/rust/tokio/README.md new file mode 100644 index 000000000000..44ed340cd8b4 --- /dev/null +++ b/third_party/rust/tokio/README.md @@ -0,0 +1,153 @@ +# Tokio + +A runtime for writing reliable, asynchronous, and slim applications with +the Rust programming language. It is: + +* **Fast**: Tokio's zero-cost abstractions give you bare-metal + performance. + +* **Reliable**: Tokio leverages Rust's ownership, type system, and + concurrency model to reduce bugs and ensure thread safety. + +* **Scalable**: Tokio has a minimal footprint, and handles backpressure + and cancellation naturally. + +[![Crates.io][crates-badge]][crates-url] +[![MIT licensed][mit-badge]][mit-url] +[![Travis Build Status][travis-badge]][travis-url] +[![Appveyor Build Status][appveyor-badge]][appveyor-url] +[![Gitter chat][gitter-badge]][gitter-url] + +[crates-badge]: https://img.shields.io/crates/v/tokio.svg +[crates-url]: https://crates.io/crates/tokio +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE-MIT +[travis-badge]: https://travis-ci.org/tokio-rs/tokio.svg?branch=master +[travis-url]: https://travis-ci.org/tokio-rs/tokio +[appveyor-badge]: https://ci.appveyor.com/api/projects/status/s83yxhy9qeb58va7/branch/master?svg=true +[appveyor-url]: https://ci.appveyor.com/project/carllerche/tokio/branch/master +[gitter-badge]: https://img.shields.io/gitter/room/tokio-rs/tokio.svg +[gitter-url]: https://gitter.im/tokio-rs/tokio + +[Website](https://tokio.rs) | +[Guides](https://tokio.rs/docs/getting-started/hello-world/) | +[API Docs](https://docs.rs/tokio) | +[Chat](https://gitter.im/tokio-rs/tokio) + +The API docs for the master branch are published [here][master-dox]. + +[master-dox]: https://tokio-rs.github.io/tokio/tokio/ + +## Overview + +Tokio is an event-driven, non-blocking I/O platform for writing +asynchronous applications with the Rust programming language. At a high +level, it provides a few major components: + +* A multithreaded, work-stealing based task [scheduler]. +* A [reactor] backed by the operating system's event queue (epoll, kqueue, + IOCP, etc...). +* Asynchronous [TCP and UDP][net] sockets. + +These components provide the runtime components necessary for building +an asynchronous application. + +[net]: https://docs.rs/tokio/0.1/tokio/net/index.html +[reactor]: https://docs.rs/tokio/0.1.1/tokio/reactor/index.html +[scheduler]: https://tokio-rs.github.io/tokio/tokio/runtime/index.html + +## Example + +A basic TCP echo server with Tokio: + +```rust +extern crate tokio; + +use tokio::prelude::*; +use tokio::io::copy; +use tokio::net::TcpListener; + +fn main() { + // Bind the server's socket. + let addr = "127.0.0.1:12345".parse().unwrap(); + let listener = TcpListener::bind(&addr) + .expect("unable to bind TCP listener"); + + // Pull out a stream of sockets for incoming connections + let server = listener.incoming() + .map_err(|e| eprintln!("accept failed = {:?}", e)) + .for_each(|sock| { + // Split up the reading and writing parts of the + // socket. + let (reader, writer) = sock.split(); + + // A future that echos the data and returns how + // many bytes were copied... + let bytes_copied = copy(reader, writer); + + // ... after which we'll print what happened. + let handle_conn = bytes_copied.map(|amt| { + println!("wrote {:?} bytes", amt) + }).map_err(|err| { + eprintln!("IO error {:?}", err) + }); + + // Spawn the future as a concurrent task. + tokio::spawn(handle_conn) + }); + + // Start the Tokio runtime + tokio::run(server); +} +``` + +More examples can be found [here](examples). + +## Project layout + +The `tokio` crate, found at the root, is primarily intended for use by +application developers. Library authors should depend on the sub crates, which +have greater guarantees of stability. + +The crates included as part of Tokio are: + +* [`tokio-executor`]: Task execution related traits and utilities. + +* [`tokio-fs`]: Filesystem (and standard in / out) APIs. + +* [`tokio-io`]: Asynchronous I/O related traits and utilities. + +* [`tokio-reactor`]: Event loop that drives I/O resources (like TCP and UDP + sockets). + +* [`tokio-tcp`]: TCP bindings for use with `tokio-io` and `tokio-reactor`. + +* [`tokio-threadpool`]: Schedules the execution of futures across a pool of + threads. + +* [ `tokio-timer`]: Time related APIs. + +* [`tokio-udp`]: UDP bindings for use with `tokio-io` and `tokio-reactor`. + +* [`tokio-uds`]: Unix Domain Socket bindings for use with `tokio-io` and + `tokio-reactor`. + +[`tokio-executor`]: tokio-executor +[`tokio-fs`]: tokio-fs +[`tokio-io`]: tokio-io +[`tokio-reactor`]: tokio-reactor +[`tokio-tcp`]: tokio-tcp +[`tokio-threadpool`]: tokio-threadpool +[`tokio-timer`]: tokio-timer +[`tokio-udp`]: tokio-udp +[`tokio-uds`]: tokio-uds + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. diff --git a/third_party/rust/tokio/benches/latency.rs b/third_party/rust/tokio/benches/latency.rs new file mode 100644 index 000000000000..c2619b711545 --- /dev/null +++ b/third_party/rust/tokio/benches/latency.rs @@ -0,0 +1,117 @@ +#![feature(test)] +#![deny(warnings)] + +extern crate test; +#[macro_use] +extern crate futures; +extern crate tokio; + +use std::io; +use std::net::SocketAddr; +use std::thread; + +use futures::sync::oneshot; +use futures::sync::mpsc; +use futures::{Future, Poll, Sink, Stream}; +use test::Bencher; +use tokio::net::UdpSocket; + +/// UDP echo server +struct EchoServer { + socket: UdpSocket, + buf: Vec, + to_send: Option<(usize, SocketAddr)>, +} + +impl EchoServer { + fn new(s: UdpSocket) -> Self { + EchoServer { + socket: s, + to_send: None, + buf: vec![0u8; 1600], + } + } +} + +impl Future for EchoServer { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + loop { + if let Some(&(size, peer)) = self.to_send.as_ref() { + try_ready!(self.socket.poll_send_to(&self.buf[..size], &peer)); + self.to_send = None; + } + self.to_send = Some(try_ready!(self.socket.poll_recv_from(&mut self.buf))); + } + } +} + +#[bench] +fn udp_echo_latency(b: &mut Bencher) { + let any_addr = "127.0.0.1:0".to_string(); + let any_addr = any_addr.parse::().unwrap(); + + let (stop_c, stop_p) = oneshot::channel::<()>(); + let (tx, rx) = oneshot::channel(); + + let child = thread::spawn(move || { + + let socket = tokio::net::UdpSocket::bind(&any_addr).unwrap(); + tx.send(socket.local_addr().unwrap()).unwrap(); + + let server = EchoServer::new(socket); + let server = server.select(stop_p.map_err(|_| panic!())); + let server = server.map_err(|_| ()); + server.wait().unwrap(); + }); + + + let client = std::net::UdpSocket::bind(&any_addr).unwrap(); + + let server_addr = rx.wait().unwrap(); + let mut buf = [0u8; 1000]; + + // warmup phase; for some reason initial couple of + // runs are much slower + // + // TODO: Describe the exact reasons; caching? branch predictor? lazy closures? + for _ in 0..8 { + client.send_to(&buf, &server_addr).unwrap(); + let _ = client.recv_from(&mut buf).unwrap(); + } + + b.iter(|| { + client.send_to(&buf, &server_addr).unwrap(); + let _ = client.recv_from(&mut buf).unwrap(); + }); + + stop_c.send(()).unwrap(); + child.join().unwrap(); +} + +#[bench] +fn futures_channel_latency(b: &mut Bencher) { + let (mut in_tx, in_rx) = mpsc::channel(32); + let (out_tx, out_rx) = mpsc::channel::<_>(32); + + let child = thread::spawn(|| out_tx.send_all(in_rx.then(|r| r.unwrap())).wait()); + let mut rx_iter = out_rx.wait(); + + // warmup phase; for some reason initial couple of runs are much slower + // + // TODO: Describe the exact reasons; caching? branch predictor? lazy closures? + for _ in 0..8 { + in_tx.start_send(Ok(1usize)).unwrap(); + let _ = rx_iter.next(); + } + + b.iter(|| { + in_tx.start_send(Ok(1usize)).unwrap(); + let _ = rx_iter.next(); + }); + + drop(in_tx); + child.join().unwrap().unwrap(); +} diff --git a/third_party/rust/tokio/benches/mio-ops.rs b/third_party/rust/tokio/benches/mio-ops.rs new file mode 100644 index 000000000000..6a71bebfe02c --- /dev/null +++ b/third_party/rust/tokio/benches/mio-ops.rs @@ -0,0 +1,58 @@ +// Measure cost of different operations +// to get a sense of performance tradeoffs +#![feature(test)] +#![deny(warnings)] + +extern crate test; +extern crate mio; + +use test::Bencher; + +use mio::tcp::TcpListener; +use mio::{Token, Ready, PollOpt}; + + +#[bench] +fn mio_register_deregister(b: &mut Bencher) { + let addr = "127.0.0.1:0".parse().unwrap(); + // Setup the server socket + let sock = TcpListener::bind(&addr).unwrap(); + let poll = mio::Poll::new().unwrap(); + + const CLIENT: Token = Token(1); + + b.iter(|| { + poll.register(&sock, CLIENT, Ready::readable(), + PollOpt::edge()).unwrap(); + poll.deregister(&sock).unwrap(); + }); +} + +#[bench] +fn mio_reregister(b: &mut Bencher) { + let addr = "127.0.0.1:0".parse().unwrap(); + // Setup the server socket + let sock = TcpListener::bind(&addr).unwrap(); + let poll = mio::Poll::new().unwrap(); + + const CLIENT: Token = Token(1); + poll.register(&sock, CLIENT, Ready::readable(), + PollOpt::edge()).unwrap(); + + b.iter(|| { + poll.reregister(&sock, CLIENT, Ready::readable(), + PollOpt::edge()).unwrap(); + }); + poll.deregister(&sock).unwrap(); +} + +#[bench] +fn mio_poll(b: &mut Bencher) { + let poll = mio::Poll::new().unwrap(); + let timeout = std::time::Duration::new(0, 0); + let mut events = mio::Events::with_capacity(1024); + + b.iter(|| { + poll.poll(&mut events, Some(timeout)).unwrap(); + }); +} diff --git a/third_party/rust/tokio/benches/tcp.rs b/third_party/rust/tokio/benches/tcp.rs new file mode 100644 index 000000000000..45ff371126cc --- /dev/null +++ b/third_party/rust/tokio/benches/tcp.rs @@ -0,0 +1,249 @@ +#![feature(test)] +#![deny(warnings)] + +extern crate futures; +extern crate tokio; + +#[macro_use] +extern crate tokio_io; + +pub extern crate test; + +mod prelude { + pub use futures::*; + pub use tokio::reactor::Reactor; + pub use tokio::net::{TcpListener, TcpStream}; + pub use tokio::executor::current_thread; + pub use tokio_io::io::read_to_end; + + pub use test::{self, Bencher}; + pub use std::thread; + pub use std::time::Duration; + pub use std::io::{self, Read, Write}; +} + +mod connect_churn { + use ::prelude::*; + + const NUM: usize = 300; + const CONCURRENT: usize = 8; + + #[bench] + fn one_thread(b: &mut Bencher) { + let addr = "127.0.0.1:0".parse().unwrap(); + + b.iter(move || { + let listener = TcpListener::bind(&addr).unwrap(); + let addr = listener.local_addr().unwrap(); + + // Spawn a single future that accepts & drops connections + let serve_incomings = listener.incoming() + .map_err(|e| panic!("server err: {:?}", e)) + .for_each(|_| Ok(())); + + let connects = stream::iter_result((0..NUM).map(|_| { + Ok(TcpStream::connect(&addr) + .and_then(|sock| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + read_to_end(sock, vec![]) + })) + })); + + let connects_concurrent = connects.buffer_unordered(CONCURRENT) + .map_err(|e| panic!("client err: {:?}", e)) + .for_each(|_| Ok(())); + + serve_incomings.select(connects_concurrent) + .map(|_| ()).map_err(|_| ()) + .wait().unwrap(); + }); + } + + fn n_workers(n: usize, b: &mut Bencher) { + let (shutdown_tx, shutdown_rx) = sync::oneshot::channel(); + let (addr_tx, addr_rx) = sync::oneshot::channel(); + + // Spawn reactor thread + let server_thread = thread::spawn(move || { + // Bind the TCP listener + let listener = TcpListener::bind( + &"127.0.0.1:0".parse().unwrap()).unwrap(); + + // Get the address being listened on. + let addr = listener.local_addr().unwrap(); + + // Send the remote & address back to the main thread + addr_tx.send(addr).unwrap(); + + // Spawn a single future that accepts & drops connections + let serve_incomings = listener.incoming() + .map_err(|e| panic!("server err: {:?}", e)) + .for_each(|_| Ok(())); + + // Run server + serve_incomings.select(shutdown_rx) + .map(|_| ()).map_err(|_| ()) + .wait().unwrap(); + }); + + // Get the bind addr of the server + let addr = addr_rx.wait().unwrap(); + + b.iter(move || { + use std::sync::{Barrier, Arc}; + + // Create a barrier to coordinate threads + let barrier = Arc::new(Barrier::new(n + 1)); + + // Spawn worker threads + let threads: Vec<_> = (0..n).map(|_| { + let barrier = barrier.clone(); + let addr = addr.clone(); + + thread::spawn(move || { + let connects = stream::iter_result((0..(NUM / n)).map(|_| { + Ok(TcpStream::connect(&addr) + .map_err(|e| panic!("connect err: {:?}", e)) + .and_then(|sock| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + read_to_end(sock, vec![]) + })) + })); + + barrier.wait(); + + connects.buffer_unordered(CONCURRENT) + .map_err(|e| panic!("client err: {:?}", e)) + .for_each(|_| Ok(())).wait().unwrap(); + }) + }).collect(); + + barrier.wait(); + + for th in threads { + th.join().unwrap(); + } + }); + + // Shutdown the server + shutdown_tx.send(()).unwrap(); + server_thread.join().unwrap(); + } + + #[bench] + fn two_threads(b: &mut Bencher) { + n_workers(1, b); + } + + #[bench] + fn multi_threads(b: &mut Bencher) { + n_workers(4, b); + } +} + +mod transfer { + use ::prelude::*; + use std::{cmp, mem}; + + const MB: usize = 3 * 1024 * 1024; + + struct Drain { + sock: TcpStream, + chunk: usize, + } + + impl Future for Drain { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + let mut buf: [u8; 1024] = unsafe { mem::uninitialized() }; + + loop { + match try_nb!(self.sock.read(&mut buf[..self.chunk])) { + 0 => return Ok(Async::Ready(())), + _ => {} + } + } + } + } + + struct Transfer { + sock: TcpStream, + rem: usize, + chunk: usize, + } + + impl Future for Transfer { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + while self.rem > 0 { + let len = cmp::min(self.rem, self.chunk); + let buf = &DATA[..len]; + + let n = try_nb!(self.sock.write(&buf)); + self.rem -= n; + } + + Ok(Async::Ready(())) + } + } + + static DATA: [u8; 1024] = [0; 1024]; + + fn one_thread(b: &mut Bencher, read_size: usize, write_size: usize) { + let addr = "127.0.0.1:0".parse().unwrap(); + + b.iter(move || { + let listener = TcpListener::bind(&addr).unwrap(); + let addr = listener.local_addr().unwrap(); + + // Spawn a single future that accepts 1 connection, Drain it and drops + let server = listener.incoming() + .into_future() // take the first connection + .map_err(|(e, _other_incomings)| e) + .map(|(connection, _other_incomings)| connection.unwrap()) + .and_then(|sock| { + sock.set_linger(Some(Duration::from_secs(0))).unwrap(); + let drain = Drain { + sock: sock, + chunk: read_size, + }; + drain.map(|_| ()).map_err(|e| panic!("server error: {:?}", e)) + }) + .map_err(|e| panic!("server err: {:?}", e)); + + let client = TcpStream::connect(&addr) + .and_then(move |sock| { + Transfer { + sock: sock, + rem: MB, + chunk: write_size, + } + }) + .map_err(|e| panic!("client err: {:?}", e)); + + server.join(client).wait().unwrap(); + }); + } + + mod small_chunks { + use ::prelude::*; + + #[bench] + fn one_thread(b: &mut Bencher) { + super::one_thread(b, 32, 32); + } + } + + mod big_chunks { + use ::prelude::*; + + #[bench] + fn one_thread(b: &mut Bencher) { + super::one_thread(b, 1_024, 1_024); + } + } +} diff --git a/third_party/rust/tokio/ci/tsan b/third_party/rust/tokio/ci/tsan new file mode 100644 index 000000000000..22a358abfec3 --- /dev/null +++ b/third_party/rust/tokio/ci/tsan @@ -0,0 +1,33 @@ +# TSAN suppressions file for Tokio + +# TSAN does not understand fences and `Arc::drop` is implemented using a fence. +# This causes many false positives. +race:Arc*drop +race:arc*Weak*drop + +# `std` mpsc is not used in any Tokio code base. This race is triggered by some +# rust runtime logic. +race:std*mpsc_queue + +# Probably more fences in std. +race:__call_tls_dtors + +# The crossbeam deque uses fences. +race:crossbeam_deque + +# This is excluded as this race shows up due to using the stealing features of +# the deque. Unfortunately, the implementation uses a fence, which makes tsan +# unhappy. +# +# TODO: It would be nice to not have to filter this out. +race:try_steal_task + +# This filters out expected data race in the treiber stack implementations. +# Treiber stacks are inherently racy. The pop operation will attempt to access +# the "next" pointer on the node it is attempting to pop. However, at this +# point it has not gained ownership of the node and another thread might beat +# it and take ownership of the node first (touching the next pointer). The +# original pop operation will fail due to the ABA guard, but tsan still picks +# up the access on the next pointer. +race:Backup::next_sleeper +race:WorkerEntry::set_next_sleeper diff --git a/third_party/rust/tokio/examples/README.md b/third_party/rust/tokio/examples/README.md new file mode 100644 index 000000000000..63634c82b617 --- /dev/null +++ b/third_party/rust/tokio/examples/README.md @@ -0,0 +1,60 @@ +## Examples of how to use Tokio + +This directory contains a number of examples showcasing various capabilities of +the `tokio` crate. + +All examples can be executed with: + +``` +cargo run --example $name +``` + +A high level description of each example is: + +* [`hello_world`](hello_world.rs) - a tiny server that writes "hello world" to + all connected clients and then terminates the connection, should help see how + to create and initialize `tokio`. + +* [`echo`](echo.rs) - this is your standard TCP "echo server" which accepts + connections and then echos back any contents that are read from each connected + client. + +* [`print_each_packet`](print_each_packet.rs) - this server will create a TCP + listener, accept connections in a loop, and put down in the stdout everything + that's read off of each TCP connection. + +* [`echo-udp`](echo-udp.rs) - again your standard "echo server", except for UDP + instead of TCP. This will echo back any packets received to the original + sender. + +* [`connect`](connect.rs) - this is a `nc`-like clone which can be used to + interact with most other examples. The program creates a TCP connection or UDP + socket to sends all information read on stdin to the remote peer, displaying + any data received on stdout. Often quite useful when interacting with the + various other servers here! + +* [`chat`](chat.rs) - this spins up a local TCP server which will broadcast from + any connected client to all other connected clients. You can connect to this + in multiple terminals and use it to chat between the terminals. + +* [`chat-combinator`](chat-combinator.rs) - Similar to `chat`, but this uses a + much more functional programming approach using combinators. + +* [`proxy`](proxy.rs) - an example proxy server that will forward all connected + TCP clients to the remote address specified when starting the program. + +* [`tinyhttp`](tinyhttp.rs) - a tiny HTTP/1.1 server which doesn't support HTTP + request bodies showcasing running on multiple cores, working with futures and + spawning tasks, and finally framing a TCP connection to discrete + request/response objects. + +* [`tinydb`](tinydb.rs) - an in-memory database which shows sharing state + between all connected clients, notably the key/value store of this database. + +* [`udp-client`](udp-client.rs) - a simple `send_dgram`/`recv_dgram` example. + +* [`manual-runtime`](manual-runtime.rs) - manually composing a runtime. + +If you've got an example you'd like to see here, please feel free to open an +issue. Otherwise if you've got an example you'd like to add, please feel free +to make a PR! diff --git a/third_party/rust/tokio/examples/chat-combinator.rs b/third_party/rust/tokio/examples/chat-combinator.rs new file mode 100644 index 000000000000..117541837001 --- /dev/null +++ b/third_party/rust/tokio/examples/chat-combinator.rs @@ -0,0 +1,150 @@ +//! A chat server that broadcasts a message to all connections. +//! +//! This is a line-based server which accepts connections, reads lines from +//! those connections, and broadcasts the lines to all other connected clients. +//! +//! This example is similar to chat.rs, but uses combinators and a much more +//! functional style. +//! +//! You can test this out by running: +//! +//! cargo run --example chat +//! +//! And then in another window run: +//! +//! cargo run --example connect 127.0.0.1:8080 +//! +//! You can run the second command in multiple windows and then chat between the +//! two, seeing the messages from the other client as they're received. For all +//! connected clients they'll all join the same room and see everyone else's +//! messages. + +#![deny(warnings)] + +extern crate tokio; +extern crate futures; + +use tokio::io; +use tokio::net::TcpListener; +use tokio::prelude::*; + +use std::collections::HashMap; +use std::iter; +use std::env; +use std::io::{BufReader}; +use std::sync::{Arc, Mutex}; + +fn main() { + // Create the TCP listener we'll accept connections on. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse().unwrap(); + + let socket = TcpListener::bind(&addr).unwrap(); + println!("Listening on: {}", addr); + + // This is running on the Tokio runtime, so it will be multi-threaded. The + // `Arc>` allows state to be shared across the threads. + let connections = Arc::new(Mutex::new(HashMap::new())); + + // The server task asynchronously iterates over and processes each incoming + // connection. + let srv = socket.incoming() + .map_err(|e| println!("failed to accept socket; error = {:?}", e)) + .for_each(move |stream| { + // The client's socket address + let addr = stream.peer_addr().unwrap(); + + println!("New Connection: {}", addr); + + // Split the TcpStream into two separate handles. One handle for reading + // and one handle for writing. This lets us use separate tasks for + // reading and writing. + let (reader, writer) = stream.split(); + + // Create a channel for our stream, which other sockets will use to + // send us messages. Then register our address with the stream to send + // data to us. + let (tx, rx) = futures::sync::mpsc::unbounded(); + connections.lock().unwrap().insert(addr, tx); + + // Define here what we do for the actual I/O. That is, read a bunch of + // lines from the socket and dispatch them while we also write any lines + // from other sockets. + let connections_inner = connections.clone(); + let reader = BufReader::new(reader); + + // Model the read portion of this socket by mapping an infinite + // iterator to each line off the socket. This "loop" is then + // terminated with an error once we hit EOF on the socket. + let iter = stream::iter_ok::<_, io::Error>(iter::repeat(())); + + let socket_reader = iter.fold(reader, move |reader, _| { + // Read a line off the socket, failing if we're at EOF + let line = io::read_until(reader, b'\n', Vec::new()); + let line = line.and_then(|(reader, vec)| { + if vec.len() == 0 { + Err(io::Error::new(io::ErrorKind::BrokenPipe, "broken pipe")) + } else { + Ok((reader, vec)) + } + }); + + // Convert the bytes we read into a string, and then send that + // string to all other connected clients. + let line = line.map(|(reader, vec)| { + (reader, String::from_utf8(vec)) + }); + + // Move the connection state into the closure below. + let connections = connections_inner.clone(); + + line.map(move |(reader, message)| { + println!("{}: {:?}", addr, message); + let mut conns = connections.lock().unwrap(); + + if let Ok(msg) = message { + // For each open connection except the sender, send the + // string via the channel. + let iter = conns.iter_mut() + .filter(|&(&k, _)| k != addr) + .map(|(_, v)| v); + for tx in iter { + tx.unbounded_send(format!("{}: {}", addr, msg)).unwrap(); + } + } else { + let tx = conns.get_mut(&addr).unwrap(); + tx.unbounded_send("You didn't send valid UTF-8.".to_string()).unwrap(); + } + + reader + }) + }); + + // Whenever we receive a string on the Receiver, we write it to + // `WriteHalf`. + let socket_writer = rx.fold(writer, |writer, msg| { + let amt = io::write_all(writer, msg.into_bytes()); + let amt = amt.map(|(writer, _)| writer); + amt.map_err(|_| ()) + }); + + // Now that we've got futures representing each half of the socket, we + // use the `select` combinator to wait for either half to be done to + // tear down the other. Then we spawn off the result. + let connections = connections.clone(); + let socket_reader = socket_reader.map_err(|_| ()); + let connection = socket_reader.map(|_| ()).select(socket_writer.map(|_| ())); + + // Spawn a task to process the connection + tokio::spawn(connection.then(move |_| { + connections.lock().unwrap().remove(&addr); + println!("Connection {} closed.", addr); + Ok(()) + })); + + Ok(()) + }); + + // execute server + tokio::run(srv); +} diff --git a/third_party/rust/tokio/examples/chat.rs b/third_party/rust/tokio/examples/chat.rs new file mode 100644 index 000000000000..8c347f40a2d0 --- /dev/null +++ b/third_party/rust/tokio/examples/chat.rs @@ -0,0 +1,474 @@ +//! A chat server that broadcasts a message to all connections. +//! +//! This example is explicitly more verbose than it has to be. This is to +//! illustrate more concepts. +//! +//! A chat server for telnet clients. After a telnet client connects, the first +//! line should contain the client's name. After that, all lines sent by a +//! client are broadcasted to all other connected clients. +//! +//! Because the client is telnet, lines are delimited by "\r\n". +//! +//! You can test this out by running: +//! +//! cargo run --example chat +//! +//! And then in another terminal run: +//! +//! telnet localhost 6142 +//! +//! You can run the `telnet` command in any number of additional windows. +//! +//! You can run the second command in multiple windows and then chat between the +//! two, seeing the messages from the other client as they're received. For all +//! connected clients they'll all join the same room and see everyone else's +//! messages. + +#![deny(warnings)] + +extern crate tokio; +#[macro_use] +extern crate futures; +extern crate bytes; + +use tokio::io; +use tokio::net::{TcpListener, TcpStream}; +use tokio::prelude::*; +use futures::sync::mpsc; +use futures::future::{self, Either}; +use bytes::{BytesMut, Bytes, BufMut}; + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::{Arc, Mutex}; + +/// Shorthand for the transmit half of the message channel. +type Tx = mpsc::UnboundedSender; + +/// Shorthand for the receive half of the message channel. +type Rx = mpsc::UnboundedReceiver; + +/// Data that is shared between all peers in the chat server. +/// +/// This is the set of `Tx` handles for all connected clients. Whenever a +/// message is received from a client, it is broadcasted to all peers by +/// iterating over the `peers` entries and sending a copy of the message on each +/// `Tx`. +struct Shared { + peers: HashMap, +} + +/// The state for each connected client. +struct Peer { + /// Name of the peer. + /// + /// When a client connects, the first line sent is treated as the client's + /// name (like alice or bob). The name is used to preface all messages that + /// arrive from the client so that we can simulate a real chat server: + /// + /// ```text + /// alice: Hello everyone. + /// bob: Welcome to telnet chat! + /// ``` + name: BytesMut, + + /// The TCP socket wrapped with the `Lines` codec, defined below. + /// + /// This handles sending and receiving data on the socket. When using + /// `Lines`, we can work at the line level instead of having to manage the + /// raw byte operations. + lines: Lines, + + /// Handle to the shared chat state. + /// + /// This is used to broadcast messages read off the socket to all connected + /// peers. + state: Arc>, + + /// Receive half of the message channel. + /// + /// This is used to receive messages from peers. When a message is received + /// off of this `Rx`, it will be written to the socket. + rx: Rx, + + /// Client socket address. + /// + /// The socket address is used as the key in the `peers` HashMap. The + /// address is saved so that the `Peer` drop implementation can clean up its + /// entry. + addr: SocketAddr, +} + +/// Line based codec +/// +/// This decorates a socket and presents a line based read / write interface. +/// +/// As a user of `Lines`, we can focus on working at the line level. So, we send +/// and receive values that represent entire lines. The `Lines` codec will +/// handle the encoding and decoding as well as reading from and writing to the +/// socket. +#[derive(Debug)] +struct Lines { + /// The TCP socket. + socket: TcpStream, + + /// Buffer used when reading from the socket. Data is not returned from this + /// buffer until an entire line has been read. + rd: BytesMut, + + /// Buffer used to stage data before writing it to the socket. + wr: BytesMut, +} + +impl Shared { + /// Create a new, empty, instance of `Shared`. + fn new() -> Self { + Shared { + peers: HashMap::new(), + } + } +} + +impl Peer { + /// Create a new instance of `Peer`. + fn new(name: BytesMut, + state: Arc>, + lines: Lines) -> Peer + { + // Get the client socket address + let addr = lines.socket.peer_addr().unwrap(); + + // Create a channel for this peer + let (tx, rx) = mpsc::unbounded(); + + // Add an entry for this `Peer` in the shared state map. + state.lock().unwrap() + .peers.insert(addr, tx); + + Peer { + name, + lines, + state, + rx, + addr, + } + } +} + +/// This is where a connected client is managed. +/// +/// A `Peer` is also a future representing completely processing the client. +/// +/// When a `Peer` is created, the first line (representing the client's name) +/// has already been read. When the socket closes, the `Peer` future completes. +/// +/// While processing, the peer future implementation will: +/// +/// 1) Receive messages on its message channel and write them to the socket. +/// 2) Receive messages from the socket and broadcast them to all peers. +/// +impl Future for Peer { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + // Tokio (and futures) use cooperative scheduling without any + // preemption. If a task never yields execution back to the executor, + // then other tasks may be starved. + // + // To deal with this, robust applications should not have any unbounded + // loops. In this example, we will read at most `LINES_PER_TICK` lines + // from the client on each tick. + // + // If the limit is hit, the current task is notified, informing the + // executor to schedule the task again asap. + const LINES_PER_TICK: usize = 10; + + // Receive all messages from peers. + for i in 0..LINES_PER_TICK { + // Polling an `UnboundedReceiver` cannot fail, so `unwrap` here is + // safe. + match self.rx.poll().unwrap() { + Async::Ready(Some(v)) => { + // Buffer the line. Once all lines are buffered, they will + // be flushed to the socket (right below). + self.lines.buffer(&v); + + // If this is the last iteration, the loop will break even + // though there could still be lines to read. Because we did + // not reach `Async::NotReady`, we have to notify ourselves + // in order to tell the executor to schedule the task again. + if i+1 == LINES_PER_TICK { + task::current().notify(); + } + } + _ => break, + } + } + + // Flush the write buffer to the socket + let _ = self.lines.poll_flush()?; + + // Read new lines from the socket + while let Async::Ready(line) = self.lines.poll()? { + println!("Received line ({:?}) : {:?}", self.name, line); + + if let Some(message) = line { + // Append the peer's name to the front of the line: + let mut line = self.name.clone(); + line.extend_from_slice(b": "); + line.extend_from_slice(&message); + line.extend_from_slice(b"\r\n"); + + // We're using `Bytes`, which allows zero-copy clones (by + // storing the data in an Arc internally). + // + // However, before cloning, we must freeze the data. This + // converts it from mutable -> immutable, allowing zero copy + // cloning. + let line = line.freeze(); + + // Now, send the line to all other peers + for (addr, tx) in &self.state.lock().unwrap().peers { + // Don't send the message to ourselves + if *addr != self.addr { + // The send only fails if the rx half has been dropped, + // however this is impossible as the `tx` half will be + // removed from the map before the `rx` is dropped. + tx.unbounded_send(line.clone()).unwrap(); + } + } + } else { + // EOF was reached. The remote client has disconnected. There is + // nothing more to do. + return Ok(Async::Ready(())); + } + } + + // As always, it is important to not just return `NotReady` without + // ensuring an inner future also returned `NotReady`. + // + // We know we got a `NotReady` from either `self.rx` or `self.lines`, so + // the contract is respected. + Ok(Async::NotReady) + } +} + +impl Drop for Peer { + fn drop(&mut self) { + self.state.lock().unwrap().peers + .remove(&self.addr); + } +} + +impl Lines { + /// Create a new `Lines` codec backed by the socket + fn new(socket: TcpStream) -> Self { + Lines { + socket, + rd: BytesMut::new(), + wr: BytesMut::new(), + } + } + + /// Buffer a line. + /// + /// This writes the line to an internal buffer. Calls to `poll_flush` will + /// attempt to flush this buffer to the socket. + fn buffer(&mut self, line: &[u8]) { + // Ensure the buffer has capacity. Ideally this would not be unbounded, + // but to keep the example simple, we will not limit this. + self.wr.reserve(line.len()); + + // Push the line onto the end of the write buffer. + // + // The `put` function is from the `BufMut` trait. + self.wr.put(line); + } + + /// Flush the write buffer to the socket + fn poll_flush(&mut self) -> Poll<(), io::Error> { + // As long as there is buffered data to write, try to write it. + while !self.wr.is_empty() { + // Try to read some bytes from the socket + let n = try_ready!(self.socket.poll_write(&self.wr)); + + // As long as the wr is not empty, a successful write should + // never write 0 bytes. + assert!(n > 0); + + // This discards the first `n` bytes of the buffer. + let _ = self.wr.split_to(n); + } + + Ok(Async::Ready(())) + } + + /// Read data from the socket. + /// + /// This only returns `Ready` when the socket has closed. + fn fill_read_buf(&mut self) -> Poll<(), io::Error> { + loop { + // Ensure the read buffer has capacity. + // + // This might result in an internal allocation. + self.rd.reserve(1024); + + // Read data into the buffer. + let n = try_ready!(self.socket.read_buf(&mut self.rd)); + + if n == 0 { + return Ok(Async::Ready(())); + } + } + } +} + +impl Stream for Lines { + type Item = BytesMut; + type Error = io::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + // First, read any new data that might have been received off the socket + let sock_closed = self.fill_read_buf()?.is_ready(); + + // Now, try finding lines + let pos = self.rd.windows(2).enumerate() + .find(|&(_, bytes)| bytes == b"\r\n") + .map(|(i, _)| i); + + if let Some(pos) = pos { + // Remove the line from the read buffer and set it to `line`. + let mut line = self.rd.split_to(pos + 2); + + // Drop the trailing \r\n + line.split_off(pos); + + // Return the line + return Ok(Async::Ready(Some(line))); + } + + if sock_closed { + Ok(Async::Ready(None)) + } else { + Ok(Async::NotReady) + } + } +} + +/// Spawn a task to manage the socket. +/// +/// This will read the first line from the socket to identify the client, then +/// add the client to the set of connected peers in the chat service. +fn process(socket: TcpStream, state: Arc>) { + // Wrap the socket with the `Lines` codec that we wrote above. + // + // By doing this, we can operate at the line level instead of doing raw byte + // manipulation. + let lines = Lines::new(socket); + + // The first line is treated as the client's name. The client is not added + // to the set of connected peers until this line is received. + // + // We use the `into_future` combinator to extract the first item from the + // lines stream. `into_future` takes a `Stream` and converts it to a future + // of `(first, rest)` where `rest` is the original stream instance. + let connection = lines.into_future() + // `into_future` doesn't have the right error type, so map the error to + // make it work. + .map_err(|(e, _)| e) + // Process the first received line as the client's name. + .and_then(|(name, lines)| { + // If `name` is `None`, then the client disconnected without + // actually sending a line of data. + // + // Since the connection is closed, there is no further work that we + // need to do. So, we just terminate processing by returning + // `future::ok()`. + // + // The problem is that only a single future type can be returned + // from a combinator closure, but we want to return both + // `future::ok()` and `Peer` (below). + // + // This is a common problem, so the `futures` crate solves this by + // providing the `Either` helper enum that allows creating a single + // return type that covers two concrete future types. + let name = match name { + Some(name) => name, + None => { + // The remote client closed the connection without sending + // any data. + return Either::A(future::ok(())); + } + }; + + println!("`{:?}` is joining the chat", name); + + // Create the peer. + // + // This is also a future that processes the connection, only + // completing when the socket closes. + let peer = Peer::new( + name, + state, + lines); + + // Wrap `peer` with `Either::B` to make the return type fit. + Either::B(peer) + }) + // Task futures have an error of type `()`, this ensures we handle the + // error. We do this by printing the error to STDOUT. + .map_err(|e| { + println!("connection error = {:?}", e); + }); + + // Spawn the task. Internally, this submits the task to a thread pool. + tokio::spawn(connection); +} + +pub fn main() { + // Create the shared state. This is how all the peers communicate. + // + // The server task will hold a handle to this. For every new client, the + // `state` handle is cloned and passed into the task that processes the + // client connection. + let state = Arc::new(Mutex::new(Shared::new())); + + let addr = "127.0.0.1:6142".parse().unwrap(); + + // Bind a TCP listener to the socket address. + // + // Note that this is the Tokio TcpListener, which is fully async. + let listener = TcpListener::bind(&addr).unwrap(); + + // The server task asynchronously iterates over and processes each + // incoming connection. + let server = listener.incoming().for_each(move |socket| { + // Spawn a task to process the connection + process(socket, state.clone()); + Ok(()) + }) + .map_err(|err| { + // All tasks must have an `Error` type of `()`. This forces error + // handling and helps avoid silencing failures. + // + // In our example, we are only going to log the error to STDOUT. + println!("accept error = {:?}", err); + }); + + println!("server running on localhost:6142"); + + // Start the Tokio runtime. + // + // The Tokio is a pre-configured "out of the box" runtime for building + // asynchronous applications. It includes both a reactor and a task + // scheduler. This means applications are multithreaded by default. + // + // This function blocks until the runtime reaches an idle state. Idle is + // defined as all spawned tasks have completed and all I/O resources (TCP + // sockets in our case) have been dropped. + // + // In our example, we have not defined a shutdown strategy, so this will + // block until `ctrl-c` is pressed at the terminal. + tokio::run(server); +} diff --git a/third_party/rust/tokio/examples/connect.rs b/third_party/rust/tokio/examples/connect.rs new file mode 100644 index 000000000000..5a6b515145ee --- /dev/null +++ b/third_party/rust/tokio/examples/connect.rs @@ -0,0 +1,246 @@ +//! An example of hooking up stdin/stdout to either a TCP or UDP stream. +//! +//! This example will connect to a socket address specified in the argument list +//! and then forward all data read on stdin to the server, printing out all data +//! received on stdout. An optional `--udp` argument can be passed to specify +//! that the connection should be made over UDP instead of TCP, translating each +//! line entered on stdin to a UDP packet to be sent to the remote address. +//! +//! Note that this is not currently optimized for performance, especially +//! around buffer management. Rather it's intended to show an example of +//! working with a client. +//! +//! This example can be quite useful when interacting with the other examples in +//! this repository! Many of them recommend running this as a simple "hook up +//! stdin/stdout to a server" to get up and running. + +#![deny(warnings)] + +extern crate tokio; +extern crate tokio_codec; +extern crate tokio_io; +extern crate futures; +extern crate bytes; + +use std::env; +use std::io::{self, Read, Write}; +use std::net::SocketAddr; +use std::thread; + +use tokio::prelude::*; +use futures::sync::mpsc; + +fn main() { + // Determine if we're going to run in TCP or UDP mode + let mut args = env::args().skip(1).collect::>(); + let tcp = match args.iter().position(|a| a == "--udp") { + Some(i) => { + args.remove(i); + false + } + None => true, + }; + + // Parse what address we're going to connect to + let addr = args.first().unwrap_or_else(|| { + panic!("this program requires at least one argument") + }); + let addr = addr.parse::().unwrap(); + + // Right now Tokio doesn't support a handle to stdin running on the event + // loop, so we farm out that work to a separate thread. This thread will + // read data (with blocking I/O) from stdin and then send it to the event + // loop over a standard futures channel. + let (stdin_tx, stdin_rx) = mpsc::channel(0); + thread::spawn(|| read_stdin(stdin_tx)); + let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx + + // Now that we've got our stdin read we either set up our TCP connection or + // our UDP connection to get a stream of bytes we're going to emit to + // stdout. + let stdout = if tcp { + tcp::connect(&addr, Box::new(stdin_rx)) + } else { + udp::connect(&addr, Box::new(stdin_rx)) + }; + + // And now with our stream of bytes to write to stdout, we execute that in + // the event loop! Note that this is doing blocking I/O to emit data to + // stdout, and in general it's a no-no to do that sort of work on the event + // loop. In this case, though, we know it's ok as the event loop isn't + // otherwise running anything useful. + let mut out = io::stdout(); + + tokio::run({ + stdout + .for_each(move |chunk| { + out.write_all(&chunk) + }) + .map_err(|e| println!("error reading stdout; error = {:?}", e)) + }); +} + +mod codec { + use std::io; + use bytes::{BufMut, BytesMut}; + use tokio_codec::{Encoder, Decoder}; + + /// A simple `Codec` implementation that just ships bytes around. + /// + /// This type is used for "framing" a TCP/UDP stream of bytes but it's really + /// just a convenient method for us to work with streams/sinks for now. + /// This'll just take any data read and interpret it as a "frame" and + /// conversely just shove data into the output location without looking at + /// it. + pub struct Bytes; + + impl Decoder for Bytes { + type Item = BytesMut; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } + } + + impl Encoder for Bytes { + type Item = Vec; + type Error = io::Error; + + fn encode(&mut self, data: Vec, buf: &mut BytesMut) -> io::Result<()> { + buf.put(&data[..]); + Ok(()) + } + } +} + +mod tcp { + use tokio; + use tokio_codec::Decoder; + use tokio::net::TcpStream; + use tokio::prelude::*; + + use bytes::BytesMut; + use codec::Bytes; + + use std::io; + use std::net::SocketAddr; + + pub fn connect(addr: &SocketAddr, + stdin: Box, Error = io::Error> + Send>) + -> Box + Send> + { + let tcp = TcpStream::connect(addr); + + // After the TCP connection has been established, we set up our client + // to start forwarding data. + // + // First we use the `Io::framed` method with a simple implementation of + // a `Codec` (listed below) that just ships bytes around. We then split + // that in two to work with the stream and sink separately. + // + // Half of the work we're going to do is to take all data we receive on + // `stdin` and send that along the TCP stream (`sink`). The second half + // is to take all the data we receive (`stream`) and then write that to + // stdout. We'll be passing this handle back out from this method. + // + // You'll also note that we *spawn* the work to read stdin and write it + // to the TCP stream. This is done to ensure that happens concurrently + // with us reading data from the stream. + Box::new(tcp.map(move |stream| { + let (sink, stream) = Bytes.framed(stream).split(); + + tokio::spawn(stdin.forward(sink).then(|result| { + if let Err(e) = result { + panic!("failed to write to socket: {}", e) + } + Ok(()) + })); + + stream + }).flatten_stream()) + } +} + +mod udp { + use std::io; + use std::net::SocketAddr; + + use tokio; + use tokio::net::{UdpSocket, UdpFramed}; + use tokio::prelude::*; + use bytes::BytesMut; + + use codec::Bytes; + + pub fn connect(&addr: &SocketAddr, + stdin: Box, Error = io::Error> + Send>) + -> Box + Send> + { + // We'll bind our UDP socket to a local IP/port, but for now we + // basically let the OS pick both of those. + let addr_to_bind = if addr.ip().is_ipv4() { + "0.0.0.0:0".parse().unwrap() + } else { + "[::]:0".parse().unwrap() + }; + let udp = UdpSocket::bind(&addr_to_bind) + .expect("failed to bind socket"); + + // Like above with TCP we use an instance of `Bytes` codec to transform + // this UDP socket into a framed sink/stream which operates over + // discrete values. In this case we're working with *pairs* of socket + // addresses and byte buffers. + let (sink, stream) = UdpFramed::new(udp, Bytes).split(); + + // All bytes from `stdin` will go to the `addr` specified in our + // argument list. Like with TCP this is spawned concurrently + let forward_stdin = stdin.map(move |chunk| { + (chunk, addr) + }).forward(sink).then(|result| { + if let Err(e) = result { + panic!("failed to write to socket: {}", e) + } + Ok(()) + }); + + // With UDP we could receive data from any source, so filter out + // anything coming from a different address + let receive = stream.filter_map(move |(chunk, src)| { + if src == addr { + Some(chunk.into()) + } else { + None + } + }); + + Box::new(future::lazy(|| { + tokio::spawn(forward_stdin); + future::ok(receive) + }).flatten_stream()) + } +} + +// Our helper method which will read data from stdin and send it along the +// sender provided. +fn read_stdin(mut tx: mpsc::Sender>) { + let mut stdin = io::stdin(); + loop { + let mut buf = vec![0; 1024]; + let n = match stdin.read(&mut buf) { + Err(_) | + Ok(0) => break, + Ok(n) => n, + }; + buf.truncate(n); + tx = match tx.send(buf).wait() { + Ok(tx) => tx, + Err(_) => break, + }; + } +} diff --git a/third_party/rust/tokio/examples/echo-udp.rs b/third_party/rust/tokio/examples/echo-udp.rs new file mode 100644 index 000000000000..9a1b9684e12c --- /dev/null +++ b/third_party/rust/tokio/examples/echo-udp.rs @@ -0,0 +1,73 @@ +//! An UDP echo server that just sends back everything that it receives. +//! +//! If you're on unix you can test this out by in one terminal executing: +//! +//! cargo run --example echo-udp +//! +//! and in another terminal you can run: +//! +//! cargo run --example connect -- --udp 127.0.0.1:8080 +//! +//! Each line you type in to the `nc` terminal should be echo'd back to you! + +#![deny(warnings)] + +#[macro_use] +extern crate futures; +extern crate tokio; + +use std::{env, io}; +use std::net::SocketAddr; + +use tokio::prelude::*; +use tokio::net::UdpSocket; + +struct Server { + socket: UdpSocket, + buf: Vec, + to_send: Option<(usize, SocketAddr)>, +} + +impl Future for Server { + type Item = (); + type Error = io::Error; + + fn poll(&mut self) -> Poll<(), io::Error> { + loop { + // First we check to see if there's a message we need to echo back. + // If so then we try to send it back to the original source, waiting + // until it's writable and we're able to do so. + if let Some((size, peer)) = self.to_send { + let amt = try_ready!(self.socket.poll_send_to(&self.buf[..size], &peer)); + println!("Echoed {}/{} bytes to {}", amt, size, peer); + self.to_send = None; + } + + // If we're here then `to_send` is `None`, so we take a look for the + // next message we're going to echo back. + self.to_send = Some(try_ready!(self.socket.poll_recv_from(&mut self.buf))); + } + } +} + +fn main() { + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + + let socket = UdpSocket::bind(&addr).unwrap(); + println!("Listening on: {}", socket.local_addr().unwrap()); + + let server = Server { + socket: socket, + buf: vec![0; 1024], + to_send: None, + }; + + // This starts the server task. + // + // `map_err` handles the error by logging it and maps the future to a type + // that can be spawned. + // + // `tokio::run` spawns the task on the Tokio runtime and starts running. + tokio::run(server.map_err(|e| println!("server error = {:?}", e))); +} diff --git a/third_party/rust/tokio/examples/echo.rs b/third_party/rust/tokio/examples/echo.rs new file mode 100644 index 000000000000..92d65a90ff61 --- /dev/null +++ b/third_party/rust/tokio/examples/echo.rs @@ -0,0 +1,114 @@ +//! A "hello world" echo server with Tokio +//! +//! This server will create a TCP listener, accept connections in a loop, and +//! write back everything that's read off of each TCP connection. +//! +//! Because the Tokio runtime uses a thread pool, each TCP connection is +//! processed concurrently with all other TCP connections across multiple +//! threads. +//! +//! To see this server in action, you can run this in one terminal: +//! +//! cargo run --example echo +//! +//! and in another terminal you can run: +//! +//! cargo run --example connect 127.0.0.1:8080 +//! +//! Each line you type in to the `connect` terminal should be echo'd back to +//! you! If you open up multiple terminals running the `connect` example you +//! should be able to see them all make progress simultaneously. + +#![deny(warnings)] + +extern crate tokio; + +use tokio::io; +use tokio::net::TcpListener; +use tokio::prelude::*; + +use std::env; +use std::net::SocketAddr; + +fn main() { + // Allow passing an address to listen on as the first argument of this + // program, but otherwise we'll just set up our TCP listener on + // 127.0.0.1:8080 for connections. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + + // Next up we create a TCP listener which will listen for incoming + // connections. This TCP listener is bound to the address we determined + // above and must be associated with an event loop, so we pass in a handle + // to our event loop. After the socket's created we inform that we're ready + // to go and start accepting connections. + let socket = TcpListener::bind(&addr).unwrap(); + println!("Listening on: {}", addr); + + // Here we convert the `TcpListener` to a stream of incoming connections + // with the `incoming` method. We then define how to process each element in + // the stream with the `for_each` method. + // + // This combinator, defined on the `Stream` trait, will allow us to define a + // computation to happen for all items on the stream (in this case TCP + // connections made to the server). The return value of the `for_each` + // method is itself a future representing processing the entire stream of + // connections, and ends up being our server. + let done = socket.incoming() + .map_err(|e| println!("failed to accept socket; error = {:?}", e)) + .for_each(move |socket| { + // Once we're inside this closure this represents an accepted client + // from our server. The `socket` is the client connection (similar to + // how the standard library operates). + // + // We just want to copy all data read from the socket back onto the + // socket itself (e.g. "echo"). We can use the standard `io::copy` + // combinator in the `tokio-core` crate to do precisely this! + // + // The `copy` function takes two arguments, where to read from and where + // to write to. We only have one argument, though, with `socket`. + // Luckily there's a method, `Io::split`, which will split an Read/Write + // stream into its two halves. This operation allows us to work with + // each stream independently, such as pass them as two arguments to the + // `copy` function. + // + // The `copy` function then returns a future, and this future will be + // resolved when the copying operation is complete, resolving to the + // amount of data that was copied. + let (reader, writer) = socket.split(); + let amt = io::copy(reader, writer); + + // After our copy operation is complete we just print out some helpful + // information. + let msg = amt.then(move |result| { + match result { + Ok((amt, _, _)) => println!("wrote {} bytes", amt), + Err(e) => println!("error: {}", e), + } + + Ok(()) + }); + + + // And this is where much of the magic of this server happens. We + // crucially want all clients to make progress concurrently, rather than + // blocking one on completion of another. To achieve this we use the + // `tokio::spawn` function to execute the work in the background. + // + // This function will transfer ownership of the future (`msg` in this + // case) to the Tokio runtime thread pool that. The thread pool will + // drive the future to completion. + // + // Essentially here we're executing a new task to run concurrently, + // which will allow all of our clients to be processed concurrently. + tokio::spawn(msg) + }); + + // And finally now that we've define what our server is, we run it! + // + // This starts the Tokio runtime, spawns the server task, and blocks the + // current thread until all tasks complete execution. Since the `done` task + // never completes (it just keeps accepting sockets), `tokio::run` blocks + // forever (until ctrl-c is pressed). + tokio::run(done); +} diff --git a/third_party/rust/tokio/examples/hello_world.rs b/third_party/rust/tokio/examples/hello_world.rs new file mode 100644 index 000000000000..398ec11aac70 --- /dev/null +++ b/third_party/rust/tokio/examples/hello_world.rs @@ -0,0 +1,70 @@ +//! Hello world server. +//! +//! A simple server that accepts connections, writes "hello world\n", and closes +//! the connection. +//! +//! You can test this out by running: +//! +//! cargo run --example hello_world +//! +//! And then in another terminal run: +//! +//! telnet localhost 6142 +//! + +#![deny(warnings)] + +extern crate tokio; + +use tokio::io; +use tokio::net::TcpListener; +use tokio::prelude::*; + +pub fn main() { + let addr = "127.0.0.1:6142".parse().unwrap(); + + // Bind a TCP listener to the socket address. + // + // Note that this is the Tokio TcpListener, which is fully async. + let listener = TcpListener::bind(&addr).unwrap(); + + // The server task asynchronously iterates over and processes each + // incoming connection. + let server = listener.incoming().for_each(|socket| { + println!("accepted socket; addr={:?}", socket.peer_addr().unwrap()); + + let connection = io::write_all(socket, "hello world\n") + .then(|res| { + println!("wrote message; success={:?}", res.is_ok()); + Ok(()) + }); + + // Spawn a new task that processes the socket: + tokio::spawn(connection); + + Ok(()) + }) + .map_err(|err| { + // All tasks must have an `Error` type of `()`. This forces error + // handling and helps avoid silencing failures. + // + // In our example, we are only going to log the error to STDOUT. + println!("accept error = {:?}", err); + }); + + println!("server running on localhost:6142"); + + // Start the Tokio runtime. + // + // The Tokio is a pre-configured "out of the box" runtime for building + // asynchronous applications. It includes both a reactor and a task + // scheduler. This means applications are multithreaded by default. + // + // This function blocks until the runtime reaches an idle state. Idle is + // defined as all spawned tasks have completed and all I/O resources (TCP + // sockets in our case) have been dropped. + // + // In our example, we have not defined a shutdown strategy, so this will + // block until `ctrl-c` is pressed at the terminal. + tokio::run(server); +} diff --git a/third_party/rust/tokio/examples/manual-runtime.rs b/third_party/rust/tokio/examples/manual-runtime.rs new file mode 100644 index 000000000000..bad74851af68 --- /dev/null +++ b/third_party/rust/tokio/examples/manual-runtime.rs @@ -0,0 +1,85 @@ +//! An example how to manually assemble a runtime and run some tasks on it. +//! +//! This is closer to the single-threaded runtime than the default tokio one, as it is simpler to +//! grasp. There are conceptually similar, but the multi-threaded one would be more code. If you +//! just want to *use* a single-threaded runtime, use the one provided by tokio directly +//! (`tokio::runtime::current_thread::Runtime::new()`. This is a demonstration only. +//! +//! Note that the error handling is a bit left out. Also, the `run` could be modified to return the +//! result of the provided future. + +extern crate futures; +extern crate tokio; +extern crate tokio_executor; +extern crate tokio_reactor; +extern crate tokio_timer; + +use std::io::Error as IoError; +use std::time::{Duration, Instant}; + +use futures::{future, Future}; +use tokio::executor::current_thread::{self, CurrentThread}; +use tokio_reactor::Reactor; +use tokio_timer::timer::{self, Timer}; + +/// Creates a „runtime“. +/// +/// This is similar to running `tokio::runtime::current_thread::Runtime::new()`. +fn run>(f: F) -> Result<(), IoError> { + // We need a reactor to receive events about IO objects from kernel + let reactor = Reactor::new()?; + let reactor_handle = reactor.handle(); + // Place a timer wheel on top of the reactor. If there are no timeouts to fire, it'll let the + // reactor pick up some new external events. + let timer = Timer::new(reactor); + let timer_handle = timer.handle(); + // And now put a single-threaded executor on top of the timer. When there are no futures ready + // to do something, it'll let the timer or the reactor generate some new stimuli for the + // futures to continue in their life. + let mut executor = CurrentThread::new_with_park(timer); + // Binds an executor to this thread + let mut enter = tokio_executor::enter().expect("Multiple executors at once"); + // This will set the default handle and timer to use inside the closure and run the future. + tokio_reactor::with_default(&reactor_handle, &mut enter, |enter| { + timer::with_default(&timer_handle, enter, |enter| { + // The TaskExecutor is a fake executor that looks into the current single-threaded + // executor when used. This is a trick, because we need two mutable references to the + // executor (one to run the provided future, another to install as the default one). We + // use the fake one here as the default one. + let mut default_executor = current_thread::TaskExecutor::current(); + tokio_executor::with_default(&mut default_executor, enter, |enter| { + let mut executor = executor.enter(enter); + // Run the provided future + executor.block_on(f).unwrap(); + // Run all the other futures that are still left in the executor + executor.run().unwrap(); + }); + }); + }); + Ok(()) +} + +fn main() { + run(future::lazy(|| { + // Here comes the application logic. It can spawn further tasks by current_thread::spawn(). + // It also can use the default reactor and create timeouts. + + // Connect somewhere. And then do nothing with it. Yes, useless. + // + // This will use the default reactor which runs in the current thread. + let connect = tokio::net::TcpStream::connect(&"127.0.0.1:53".parse().unwrap()) + .map(|_| println!("Connected")) + .map_err(|e| println!("Failed to connect: {}", e)); + // We can spawn it without requiring Send. This would panic if we run it outside of the + // `run` (or outside of anything else) + current_thread::spawn(connect); + + // We can also create timeouts. + let deadline = tokio::timer::Delay::new(Instant::now() + Duration::from_secs(5)) + .map(|()| println!("5 seconds are over")) + .map_err(|e| println!("Failed to wait: {}", e)); + // We can spawn on the default executor, which is also the local one. + tokio::executor::spawn(deadline); + Ok(()) + })).unwrap(); +} diff --git a/third_party/rust/tokio/examples/print_each_packet.rs b/third_party/rust/tokio/examples/print_each_packet.rs new file mode 100644 index 000000000000..5dc5332404df --- /dev/null +++ b/third_party/rust/tokio/examples/print_each_packet.rs @@ -0,0 +1,149 @@ +//! A "print-each-packet" server with Tokio +//! +//! This server will create a TCP listener, accept connections in a loop, and +//! put down in the stdout everything that's read off of each TCP connection. +//! +//! Because the Tokio runtime uses a thread pool, each TCP connection is +//! processed concurrently with all other TCP connections across multiple +//! threads. +//! +//! To see this server in action, you can run this in one terminal: +//! +//! cargo run --example print\_each\_packet +//! +//! and in another terminal you can run: +//! +//! cargo run --example connect 127.0.0.1:8080 +//! +//! Each line you type in to the `connect` terminal should be written to terminal! +//! +//! Minimal js example: +//! +//! ```js +//! var net = require("net"); +//! +//! var listenPort = 8080; +//! +//! var server = net.createServer(function (socket) { +//! socket.on("data", function (bytes) { +//! console.log("bytes", bytes); +//! }); +//! +//! socket.on("end", function() { +//! console.log("Socket received FIN packet and closed connection"); +//! }); +//! socket.on("error", function (error) { +//! console.log("Socket closed with error", error); +//! }); +//! +//! socket.on("close", function (with_error) { +//! if (with_error) { +//! console.log("Socket closed with result: Err(SomeError)"); +//! } else { +//! console.log("Socket closed with result: Ok(())"); +//! } +//! }); +//! +//! }); +//! +//! server.listen(listenPort); +//! +//! console.log("Listening on:", listenPort); +//! ``` +//! + +#![deny(warnings)] + +extern crate tokio; +extern crate tokio_codec; +extern crate tokio_io; + +use tokio_codec::{Decoder, BytesCodec}; +use tokio::net::TcpListener; +use tokio::prelude::*; + +use std::env; +use std::net::SocketAddr; + +fn main() { + // Allow passing an address to listen on as the first argument of this + // program, but otherwise we'll just set up our TCP listener on + // 127.0.0.1:8080 for connections. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + + // Next up we create a TCP listener which will listen for incoming + // connections. This TCP listener is bound to the address we determined + // above and must be associated with an event loop, so we pass in a handle + // to our event loop. After the socket's created we inform that we're ready + // to go and start accepting connections. + let socket = TcpListener::bind(&addr).unwrap(); + println!("Listening on: {}", addr); + + // Here we convert the `TcpListener` to a stream of incoming connections + // with the `incoming` method. We then define how to process each element in + // the stream with the `for_each` method. + // + // This combinator, defined on the `Stream` trait, will allow us to define a + // computation to happen for all items on the stream (in this case TCP + // connections made to the server). The return value of the `for_each` + // method is itself a future representing processing the entire stream of + // connections, and ends up being our server. + let done = socket + .incoming() + .map_err(|e| println!("failed to accept socket; error = {:?}", e)) + .for_each(move |socket| { + // Once we're inside this closure this represents an accepted client + // from our server. The `socket` is the client connection (similar to + // how the standard library operates). + // + // We're parsing each socket with the `BytesCodec` included in `tokio_io`, + // and then we `split` each codec into the reader/writer halves. + // + // See https://docs.rs/tokio-codec/0.1/src/tokio_codec/bytes_codec.rs.html + let framed = BytesCodec::new().framed(socket); + let (_writer, reader) = framed.split(); + + let processor = reader + .for_each(|bytes| { + println!("bytes: {:?}", bytes); + Ok(()) + }) + // After our copy operation is complete we just print out some helpful + // information. + .and_then(|()| { + println!("Socket received FIN packet and closed connection"); + Ok(()) + }) + .or_else(|err| { + println!("Socket closed with error: {:?}", err); + // We have to return the error to catch it in the next ``.then` call + Err(err) + }) + .then(|result| { + println!("Socket closed with result: {:?}", result); + Ok(()) + }); + + // And this is where much of the magic of this server happens. We + // crucially want all clients to make progress concurrently, rather than + // blocking one on completion of another. To achieve this we use the + // `tokio::spawn` function to execute the work in the background. + // + // This function will transfer ownership of the future (`msg` in this + // case) to the Tokio runtime thread pool that. The thread pool will + // drive the future to completion. + // + // Essentially here we're executing a new task to run concurrently, + // which will allow all of our clients to be processed concurrently. + tokio::spawn(processor) + }); + + // And finally now that we've define what our server is, we run it! + // + // This starts the Tokio runtime, spawns the server task, and blocks the + // current thread until all tasks complete execution. Since the `done` task + // never completes (it just keeps accepting sockets), `tokio::run` blocks + // forever (until ctrl-c is pressed). + tokio::run(done); +} diff --git a/third_party/rust/tokio/examples/proxy.rs b/third_party/rust/tokio/examples/proxy.rs new file mode 100644 index 000000000000..bed8314a3119 --- /dev/null +++ b/third_party/rust/tokio/examples/proxy.rs @@ -0,0 +1,128 @@ +//! A proxy that forwards data to another server and forwards that server's +//! responses back to clients. +//! +//! Because the Tokio runtime uses a thread pool, each TCP connection is +//! processed concurrently with all other TCP connections across multiple +//! threads. +//! +//! You can showcase this by running this in one terminal: +//! +//! cargo run --example proxy +//! +//! This in another terminal +//! +//! cargo run --example echo +//! +//! And finally this in another terminal +//! +//! cargo run --example connect 127.0.0.1:8081 +//! +//! This final terminal will connect to our proxy, which will in turn connect to +//! the echo server, and you'll be able to see data flowing between them. + +#![deny(warnings)] + +extern crate tokio; + +use std::sync::{Arc, Mutex}; +use std::env; +use std::net::{Shutdown, SocketAddr}; +use std::io::{self, Read, Write}; + +use tokio::io::{copy, shutdown}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::prelude::*; + +fn main() { + let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string()); + let listen_addr = listen_addr.parse::().unwrap(); + + let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string()); + let server_addr = server_addr.parse::().unwrap(); + + // Create a TCP listener which will listen for incoming connections. + let socket = TcpListener::bind(&listen_addr).unwrap(); + println!("Listening on: {}", listen_addr); + println!("Proxying to: {}", server_addr); + + let done = socket.incoming() + .map_err(|e| println!("error accepting socket; error = {:?}", e)) + .for_each(move |client| { + let server = TcpStream::connect(&server_addr); + let amounts = server.and_then(move |server| { + // Create separate read/write handles for the TCP clients that we're + // proxying data between. Note that typically you'd use + // `AsyncRead::split` for this operation, but we want our writer + // handles to have a custom implementation of `shutdown` which + // actually calls `TcpStream::shutdown` to ensure that EOF is + // transmitted properly across the proxied connection. + // + // As a result, we wrap up our client/server manually in arcs and + // use the impls below on our custom `MyTcpStream` type. + let client_reader = MyTcpStream(Arc::new(Mutex::new(client))); + let client_writer = client_reader.clone(); + let server_reader = MyTcpStream(Arc::new(Mutex::new(server))); + let server_writer = server_reader.clone(); + + // Copy the data (in parallel) between the client and the server. + // After the copy is done we indicate to the remote side that we've + // finished by shutting down the connection. + let client_to_server = copy(client_reader, server_writer) + .and_then(|(n, _, server_writer)| { + shutdown(server_writer).map(move |_| n) + }); + + let server_to_client = copy(server_reader, client_writer) + .and_then(|(n, _, client_writer)| { + shutdown(client_writer).map(move |_| n) + }); + + client_to_server.join(server_to_client) + }); + + let msg = amounts.map(move |(from_client, from_server)| { + println!("client wrote {} bytes and received {} bytes", + from_client, from_server); + }).map_err(|e| { + // Don't panic. Maybe the client just disconnected too soon. + println!("error: {}", e); + }); + + tokio::spawn(msg); + + Ok(()) + }); + + tokio::run(done); +} + +// This is a custom type used to have a custom implementation of the +// `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to +// notify the remote end that we're done writing. +#[derive(Clone)] +struct MyTcpStream(Arc>); + +impl Read for MyTcpStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.lock().unwrap().read(buf) + } +} + +impl Write for MyTcpStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.lock().unwrap().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl AsyncRead for MyTcpStream {} + +impl AsyncWrite for MyTcpStream { + fn shutdown(&mut self) -> Poll<(), io::Error> { + try!(self.0.lock().unwrap().shutdown(Shutdown::Write)); + Ok(().into()) + } +} diff --git a/third_party/rust/tokio/examples/tinydb.rs b/third_party/rust/tokio/examples/tinydb.rs new file mode 100644 index 000000000000..6901a1209662 --- /dev/null +++ b/third_party/rust/tokio/examples/tinydb.rs @@ -0,0 +1,206 @@ +//! A "tiny database" and accompanying protocol +//! +//! This example shows the usage of shared state amongst all connected clients, +//! namely a database of key/value pairs. Each connected client can send a +//! series of GET/SET commands to query the current value of a key or set the +//! value of a key. +//! +//! This example has a simple protocol you can use to interact with the server. +//! To run, first run this in one terminal window: +//! +//! cargo run --example tinydb +//! +//! and next in another windows run: +//! +//! cargo run --example connect 127.0.0.1:8080 +//! +//! In the `connect` window you can type in commands where when you hit enter +//! you'll get a response from the server for that command. An example session +//! is: +//! +//! +//! $ cargo run --example connect 127.0.0.1:8080 +//! GET foo +//! foo = bar +//! GET FOOBAR +//! error: no key FOOBAR +//! SET FOOBAR my awesome string +//! set FOOBAR = `my awesome string`, previous: None +//! SET foo tokio +//! set foo = `tokio`, previous: Some("bar") +//! GET foo +//! foo = tokio +//! +//! Namely you can issue two forms of commands: +//! +//! * `GET $key` - this will fetch the value of `$key` from the database and +//! return it. The server's database is initially populated with the key `foo` +//! set to the value `bar` +//! * `SET $key $value` - this will set the value of `$key` to `$value`, +//! returning the previous value, if any. + +#![deny(warnings)] + +extern crate tokio; + +use std::collections::HashMap; +use std::io::BufReader; +use std::env; +use std::net::SocketAddr; +use std::sync::{Arc, Mutex}; + +use tokio::io::{lines, write_all}; +use tokio::net::TcpListener; +use tokio::prelude::*; + +/// The in-memory database shared amongst all clients. +/// +/// This database will be shared via `Arc`, so to mutate the internal map we're +/// also going to use a `RefCell` for interior mutability. +struct Database { + map: Mutex>, +} + +/// Possible requests our clients can send us +enum Request { + Get { key: String }, + Set { key: String, value: String }, +} + +/// Responses to the `Request` commands above +enum Response { + Value { key: String, value: String }, + Set { key: String, value: String, previous: Option }, + Error { msg: String }, +} + +fn main() { + // Parse the address we're going to run this server on + // and set up our TCP listener to accept connections. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + let listener = TcpListener::bind(&addr).expect("failed to bind"); + println!("Listening on: {}", addr); + + // Create the shared state of this server that will be shared amongst all + // clients. We populate the initial database and then create the `Database` + // structure. Note the usage of `Arc` here which will be used to ensure that + // each independently spawned client will have a reference to the in-memory + // database. + let mut initial_db = HashMap::new(); + initial_db.insert("foo".to_string(), "bar".to_string()); + let db = Arc::new(Database { + map: Mutex::new(initial_db), + }); + + let done = listener.incoming() + .map_err(|e| println!("error accepting socket; error = {:?}", e)) + .for_each(move |socket| { + // As with many other small examples, the first thing we'll do is + // *split* this TCP stream into two separately owned halves. This'll + // allow us to work with the read and write halves independently. + let (reader, writer) = socket.split(); + + // Since our protocol is line-based we use `tokio_io`'s `lines` utility + // to convert our stream of bytes, `reader`, into a `Stream` of lines. + let lines = lines(BufReader::new(reader)); + + // Here's where the meat of the processing in this server happens. First + // we see a clone of the database being created, which is creating a + // new reference for this connected client to use. Also note the `move` + // keyword on the closure here which moves ownership of the reference + // into the closure, which we'll need for spawning the client below. + // + // The `map` function here means that we'll run some code for all + // requests (lines) we receive from the client. The actual handling here + // is pretty simple, first we parse the request and if it's valid we + // generate a response based on the values in the database. + let db = db.clone(); + let responses = lines.map(move |line| { + let request = match Request::parse(&line) { + Ok(req) => req, + Err(e) => return Response::Error { msg: e }, + }; + + let mut db = db.map.lock().unwrap(); + match request { + Request::Get { key } => { + match db.get(&key) { + Some(value) => Response::Value { key, value: value.clone() }, + None => Response::Error { msg: format!("no key {}", key) }, + } + } + Request::Set { key, value } => { + let previous = db.insert(key.clone(), value.clone()); + Response::Set { key, value, previous } + } + } + }); + + // At this point `responses` is a stream of `Response` types which we + // now want to write back out to the client. To do that we use + // `Stream::fold` to perform a loop here, serializing each response and + // then writing it out to the client. + let writes = responses.fold(writer, |writer, response| { + let mut response = response.serialize(); + response.push('\n'); + write_all(writer, response.into_bytes()).map(|(w, _)| w) + }); + + // Like with other small servers, we'll `spawn` this client to ensure it + // runs concurrently with all other clients, for now ignoring any errors + // that we see. + let msg = writes.then(move |_| Ok(())); + + tokio::spawn(msg) + }); + + tokio::run(done); +} + +impl Request { + fn parse(input: &str) -> Result { + let mut parts = input.splitn(3, " "); + match parts.next() { + Some("GET") => { + let key = match parts.next() { + Some(key) => key, + None => return Err(format!("GET must be followed by a key")), + }; + if parts.next().is_some() { + return Err(format!("GET's key must not be followed by anything")) + } + Ok(Request::Get { key: key.to_string() }) + } + Some("SET") => { + let key = match parts.next() { + Some(key) => key, + None => return Err(format!("SET must be followed by a key")), + }; + let value = match parts.next() { + Some(value) => value, + None => return Err(format!("SET needs a value")), + }; + Ok(Request::Set { key: key.to_string(), value: value.to_string() }) + } + Some(cmd) => Err(format!("unknown command: {}", cmd)), + None => Err(format!("empty input")), + } + } +} + +impl Response { + fn serialize(&self) -> String { + match *self { + Response::Value { ref key, ref value } => { + format!("{} = {}", key, value) + } + Response::Set { ref key, ref value, ref previous } => { + format!("set {} = `{}`, previous: {:?}", key, value, previous) + } + Response::Error { ref msg } => { + format!("error: {}", msg) + } + } + } +} diff --git a/third_party/rust/tokio/examples/tinyhttp.rs b/third_party/rust/tokio/examples/tinyhttp.rs new file mode 100644 index 000000000000..d56ff96fddd3 --- /dev/null +++ b/third_party/rust/tokio/examples/tinyhttp.rs @@ -0,0 +1,310 @@ +//! A "tiny" example of HTTP request/response handling using just tokio-core +//! +//! This example is intended for *learning purposes* to see how various pieces +//! hook up together and how HTTP can get up and running. Note that this example +//! is written with the restriction that it *can't* use any "big" library other +//! than tokio-core, if you'd like a "real world" HTTP library you likely want a +//! crate like Hyper. +//! +//! Code here is based on the `echo-threads` example and implements two paths, +//! the `/plaintext` and `/json` routes to respond with some text and json, +//! respectively. By default this will run I/O on all the cores your system has +//! available, and it doesn't support HTTP request bodies. + +#![deny(warnings)] + +extern crate bytes; +extern crate http; +extern crate httparse; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; +extern crate time; +extern crate tokio; +extern crate tokio_codec; +extern crate tokio_io; + +use std::{env, fmt, io}; +use std::net::SocketAddr; + +use tokio::net::{TcpStream, TcpListener}; +use tokio::prelude::*; + +use tokio_codec::{Encoder, Decoder}; + +use bytes::BytesMut; +use http::header::HeaderValue; +use http::{Request, Response, StatusCode}; + +fn main() { + // Parse the arguments, bind the TCP socket we'll be listening to, spin up + // our worker threads, and start shipping sockets to those worker threads. + let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); + let addr = addr.parse::().unwrap(); + + let listener = TcpListener::bind(&addr).expect("failed to bind"); + println!("Listening on: {}", addr); + + tokio::run({ + listener.incoming() + .map_err(|e| println!("failed to accept socket; error = {:?}", e)) + .for_each(|socket| { + process(socket); + Ok(()) + }) + }); +} + +fn process(socket: TcpStream) { + let (tx, rx) = + // Frame the socket using the `Http` protocol. This maps the TCP socket + // to a Stream + Sink of HTTP frames. + Http.framed(socket) + // This splits a single `Stream + Sink` value into two separate handles + // that can be used independently (even on different tasks or threads). + .split(); + + // Map all requests into responses and send them back to the client. + let task = tx.send_all(rx.and_then(respond)) + .then(|res| { + if let Err(e) = res { + println!("failed to process connection; error = {:?}", e); + } + + Ok(()) + }); + + // Spawn the task that handles the connection. + tokio::spawn(task); +} + +/// "Server logic" is implemented in this function. +/// +/// This function is a map from and HTTP request to a future of a response and +/// represents the various handling a server might do. Currently the contents +/// here are pretty uninteresting. +fn respond(req: Request<()>) + -> Box, Error = io::Error> + Send> +{ + let mut ret = Response::builder(); + let body = match req.uri().path() { + "/plaintext" => { + ret.header("Content-Type", "text/plain"); + "Hello, World!".to_string() + } + "/json" => { + ret.header("Content-Type", "application/json"); + + #[derive(Serialize)] + struct Message { + message: &'static str, + } + serde_json::to_string(&Message { message: "Hello, World!" }) + .unwrap() + } + _ => { + ret.status(StatusCode::NOT_FOUND); + String::new() + } + }; + Box::new(future::ok(ret.body(body).unwrap())) +} + +struct Http; + +/// Implementation of encoding an HTTP response into a `BytesMut`, basically +/// just writing out an HTTP/1.1 response. +impl Encoder for Http { + type Item = Response; + type Error = io::Error; + + fn encode(&mut self, item: Response, dst: &mut BytesMut) -> io::Result<()> { + use std::fmt::Write; + + write!(BytesWrite(dst), "\ + HTTP/1.1 {}\r\n\ + Server: Example\r\n\ + Content-Length: {}\r\n\ + Date: {}\r\n\ + ", item.status(), item.body().len(), date::now()).unwrap(); + + for (k, v) in item.headers() { + dst.extend_from_slice(k.as_str().as_bytes()); + dst.extend_from_slice(b": "); + dst.extend_from_slice(v.as_bytes()); + dst.extend_from_slice(b"\r\n"); + } + + dst.extend_from_slice(b"\r\n"); + dst.extend_from_slice(item.body().as_bytes()); + + return Ok(()); + + // Right now `write!` on `Vec` goes through io::Write and is not + // super speedy, so inline a less-crufty implementation here which + // doesn't go through io::Error. + struct BytesWrite<'a>(&'a mut BytesMut); + + impl<'a> fmt::Write for BytesWrite<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.0.extend_from_slice(s.as_bytes()); + Ok(()) + } + + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } + } + } +} + +/// Implementation of decoding an HTTP request from the bytes we've read so far. +/// This leverages the `httparse` crate to do the actual parsing and then we use +/// that information to construct an instance of a `http::Request` object, +/// trying to avoid allocations where possible. +impl Decoder for Http { + type Item = Request<()>; + type Error = io::Error; + + fn decode(&mut self, src: &mut BytesMut) -> io::Result>> { + // TODO: we should grow this headers array if parsing fails and asks + // for more headers + let mut headers = [None; 16]; + let (method, path, version, amt) = { + let mut parsed_headers = [httparse::EMPTY_HEADER; 16]; + let mut r = httparse::Request::new(&mut parsed_headers); + let status = r.parse(src).map_err(|e| { + let msg = format!("failed to parse http request: {:?}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + let amt = match status { + httparse::Status::Complete(amt) => amt, + httparse::Status::Partial => return Ok(None), + }; + + let toslice = |a: &[u8]| { + let start = a.as_ptr() as usize - src.as_ptr() as usize; + assert!(start < src.len()); + (start, start + a.len()) + }; + + for (i, header) in r.headers.iter().enumerate() { + let k = toslice(header.name.as_bytes()); + let v = toslice(header.value); + headers[i] = Some((k, v)); + } + + (toslice(r.method.unwrap().as_bytes()), + toslice(r.path.unwrap().as_bytes()), + r.version.unwrap(), + amt) + }; + if version != 1 { + return Err(io::Error::new(io::ErrorKind::Other, "only HTTP/1.1 accepted")) + } + let data = src.split_to(amt).freeze(); + let mut ret = Request::builder(); + ret.method(&data[method.0..method.1]); + ret.uri(data.slice(path.0, path.1)); + ret.version(http::Version::HTTP_11); + for header in headers.iter() { + let (k, v) = match *header { + Some((ref k, ref v)) => (k, v), + None => break, + }; + let value = unsafe { + HeaderValue::from_shared_unchecked(data.slice(v.0, v.1)) + }; + ret.header(&data[k.0..k.1], value); + } + + let req = ret.body(()).map_err(|e| { + io::Error::new(io::ErrorKind::Other, e) + })?; + Ok(Some(req)) + } +} + +mod date { + use std::cell::RefCell; + use std::fmt::{self, Write}; + use std::str; + + use time::{self, Duration}; + + pub struct Now(()); + + /// Returns a struct, which when formatted, renders an appropriate `Date` + /// header value. + pub fn now() -> Now { + Now(()) + } + + // Gee Alex, doesn't this seem like premature optimization. Well you see + // there Billy, you're absolutely correct! If your server is *bottlenecked* + // on rendering the `Date` header, well then boy do I have news for you, you + // don't need this optimization. + // + // In all seriousness, though, a simple "hello world" benchmark which just + // sends back literally "hello world" with standard headers actually is + // bottlenecked on rendering a date into a byte buffer. Since it was at the + // top of a profile, and this was done for some competitive benchmarks, this + // module was written. + // + // Just to be clear, though, I was not intending on doing this because it + // really does seem kinda absurd, but it was done by someone else [1], so I + // blame them! :) + // + // [1]: https://github.com/rapidoid/rapidoid/blob/f1c55c0555007e986b5d069fe1086e6d09933f7b/rapidoid-commons/src/main/java/org/rapidoid/commons/Dates.java#L48-L66 + + struct LastRenderedNow { + bytes: [u8; 128], + amt: usize, + next_update: time::Timespec, + } + + thread_local!(static LAST: RefCell = RefCell::new(LastRenderedNow { + bytes: [0; 128], + amt: 0, + next_update: time::Timespec::new(0, 0), + })); + + impl fmt::Display for Now { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + LAST.with(|cache| { + let mut cache = cache.borrow_mut(); + let now = time::get_time(); + if now >= cache.next_update { + cache.update(now); + } + f.write_str(cache.buffer()) + }) + } + } + + impl LastRenderedNow { + fn buffer(&self) -> &str { + str::from_utf8(&self.bytes[..self.amt]).unwrap() + } + + fn update(&mut self, now: time::Timespec) { + self.amt = 0; + write!(LocalBuffer(self), "{}", time::at(now).rfc822()).unwrap(); + self.next_update = now + Duration::seconds(1); + self.next_update.nsec = 0; + } + } + + struct LocalBuffer<'a>(&'a mut LastRenderedNow); + + impl<'a> fmt::Write for LocalBuffer<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let start = self.0.amt; + let end = start + s.len(); + self.0.bytes[start..end].copy_from_slice(s.as_bytes()); + self.0.amt += s.len(); + Ok(()) + } + } +} diff --git a/third_party/rust/tokio/examples/udp-client.rs b/third_party/rust/tokio/examples/udp-client.rs new file mode 100644 index 000000000000..3af7c3beaaba --- /dev/null +++ b/third_party/rust/tokio/examples/udp-client.rs @@ -0,0 +1,74 @@ +//! A UDP client that just sends everything it gets via `stdio` in a single datagram, and then +//! waits for a reply. +//! +//! For the reasons of simplicity data from `stdio` is read until `EOF` in a blocking manner. +//! +//! You can test this out by running an echo server: +//! +//! ``` +//! $ cargo run --example echo-udp -- 127.0.0.1:8080 +//! ``` +//! +//! and running the client in another terminal: +//! +//! ``` +//! $ cargo run --example udp-client +//! ``` +//! +//! You can optionally provide any custom endpoint address for the client: +//! +//! ``` +//! $ cargo run --example udp-client -- 127.0.0.1:8080 +//! ``` +//! +//! Don't forget to pass `EOF` to the standard input of the client! +//! +//! Please mind that since the UDP protocol doesn't have any capabilities to detect a broken +//! connection the server needs to be run first, otherwise the client will block forever. + +extern crate futures; +extern crate tokio; + +use std::env; +use std::io::stdin; +use std::net::SocketAddr; +use tokio::net::UdpSocket; +use tokio::prelude::*; + +fn get_stdin_data() -> Vec { + let mut buf = Vec::new(); + stdin().read_to_end(&mut buf).unwrap(); + buf +} + +fn main() { + let remote_addr: SocketAddr = env::args() + .nth(1) + .unwrap_or("127.0.0.1:8080".into()) + .parse() + .unwrap(); + // We use port 0 to let the operating system allocate an available port for us. + let local_addr: SocketAddr = if remote_addr.is_ipv4() { + "0.0.0.0:0" + } else { + "[::]:0" + }.parse() + .unwrap(); + let socket = UdpSocket::bind(&local_addr).unwrap(); + const MAX_DATAGRAM_SIZE: usize = 65_507; + let processing = socket + .send_dgram(get_stdin_data(), &remote_addr) + .and_then(|(socket, _)| socket.recv_dgram(vec![0u8; MAX_DATAGRAM_SIZE])) + .map(|(_, data, len, _)| { + println!( + "Received {} bytes:\n{}", + len, + String::from_utf8_lossy(&data[..len]) + ) + }) + .wait(); + match processing { + Ok(_) => {} + Err(e) => eprintln!("Encountered an error: {}", e), + } +} diff --git a/third_party/rust/tokio/examples/udp-codec.rs b/third_party/rust/tokio/examples/udp-codec.rs new file mode 100644 index 000000000000..b273a360618a --- /dev/null +++ b/third_party/rust/tokio/examples/udp-codec.rs @@ -0,0 +1,64 @@ +//! This example leverages `BytesCodec` to create a UDP client and server which +//! speak a custom protocol. +//! +//! Here we're using the codec from tokio-io to convert a UDP socket to a stream of +//! client messages. These messages are then processed and returned back as a +//! new message with a new destination. Overall, we then use this to construct a +//! "ping pong" pair where two sockets are sending messages back and forth. + +#![deny(warnings)] + +extern crate tokio; +extern crate tokio_codec; +extern crate tokio_io; +extern crate env_logger; + +use std::net::SocketAddr; + +use tokio::prelude::*; +use tokio::net::{UdpSocket, UdpFramed}; +use tokio_codec::BytesCodec; + +fn main() { + let _ = env_logger::init(); + + let addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); + + // Bind both our sockets and then figure out what ports we got. + let a = UdpSocket::bind(&addr).unwrap(); + let b = UdpSocket::bind(&addr).unwrap(); + let b_addr = b.local_addr().unwrap(); + + // We're parsing each socket with the `BytesCodec` included in `tokio_io`, and then we + // `split` each codec into the sink/stream halves. + let (a_sink, a_stream) = UdpFramed::new(a, BytesCodec::new()).split(); + let (b_sink, b_stream) = UdpFramed::new(b, BytesCodec::new()).split(); + + // Start off by sending a ping from a to b, afterwards we just print out + // what they send us and continually send pings + // let pings = stream::iter((0..5).map(Ok)); + let a = a_sink.send(("PING".into(), b_addr)).and_then(|a_sink| { + let mut i = 0; + let a_stream = a_stream.take(4).map(move |(msg, addr)| { + i += 1; + println!("[a] recv: {}", String::from_utf8_lossy(&msg)); + (format!("PING {}", i).into(), addr) + }); + a_sink.send_all(a_stream) + }); + + // The second client we have will receive the pings from `a` and then send + // back pongs. + let b_stream = b_stream.map(|(msg, addr)| { + println!("[b] recv: {}", String::from_utf8_lossy(&msg)); + ("PONG".into(), addr) + }); + let b = b_sink.send_all(b_stream); + + // Spawn the sender of pongs and then wait for our pinger to finish. + tokio::run({ + b.join(a) + .map(|_| ()) + .map_err(|e| println!("error = {:?}", e)) + }); +} diff --git a/third_party/rust/tokio/src/clock.rs b/third_party/rust/tokio/src/clock.rs new file mode 100644 index 000000000000..313416690cf7 --- /dev/null +++ b/third_party/rust/tokio/src/clock.rs @@ -0,0 +1,15 @@ +//! A configurable source of time. +//! +//! This module provides the [`now`][n] function, which returns an `Instant` +//! representing "now". The source of time used by this function is configurable +//! (via the [`tokio-timer`] crate) and allows mocking out the source of time in +//! tests or performing caching operations to reduce the number of syscalls. +//! +//! Note that, because the source of time is configurable, it is possible to +//! observe non-monotonic behavior when calling [`now`] from different +//! executors. +//! +//! [n]: fn.now.html +//! [`tokio-timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/clock/index.html + +pub use tokio_timer::clock::now; diff --git a/third_party/rust/tokio/src/executor/current_thread/mod.rs b/third_party/rust/tokio/src/executor/current_thread/mod.rs new file mode 100644 index 000000000000..4b25d7eb3bd0 --- /dev/null +++ b/third_party/rust/tokio/src/executor/current_thread/mod.rs @@ -0,0 +1,835 @@ +//! Execute many tasks concurrently on the current thread. +//! +//! [`CurrentThread`] is an executor that keeps tasks on the same thread that +//! they were spawned from. This allows it to execute futures that are not +//! `Send`. +//! +//! A single [`CurrentThread`] instance is able to efficiently manage a large +//! number of tasks and will attempt to schedule all tasks fairly. +//! +//! All tasks that are being managed by a [`CurrentThread`] executor are able to +//! spawn additional tasks by calling [`spawn`]. This function only works from +//! within the context of a running [`CurrentThread`] instance. +//! +//! The easiest way to start a new [`CurrentThread`] executor is to call +//! [`block_on_all`] with an initial task to seed the executor. +//! +//! For example: +//! +//! ``` +//! # extern crate tokio; +//! # extern crate futures; +//! # use tokio::executor::current_thread; +//! use futures::future::lazy; +//! +//! // Calling execute here results in a panic +//! // current_thread::spawn(my_future); +//! +//! # pub fn main() { +//! current_thread::block_on_all(lazy(|| { +//! // The execution context is setup, futures may be executed. +//! current_thread::spawn(lazy(|| { +//! println!("called from the current thread executor"); +//! Ok(()) +//! })); +//! +//! Ok::<_, ()>(()) +//! })); +//! # } +//! ``` +//! +//! The `block_on_all` function will block the current thread until **all** +//! tasks that have been spawned onto the [`CurrentThread`] instance have +//! completed. +//! +//! More fine-grain control can be achieved by using [`CurrentThread`] directly. +//! +//! ``` +//! # extern crate tokio; +//! # extern crate futures; +//! # use tokio::executor::current_thread::CurrentThread; +//! use futures::future::{lazy, empty}; +//! use std::time::Duration; +//! +//! // Calling execute here results in a panic +//! // current_thread::spawn(my_future); +//! +//! # pub fn main() { +//! let mut current_thread = CurrentThread::new(); +//! +//! // Spawn a task, the task is not executed yet. +//! current_thread.spawn(lazy(|| { +//! println!("Spawning a task"); +//! Ok(()) +//! })); +//! +//! // Spawn a task that never completes +//! current_thread.spawn(empty()); +//! +//! // Run the executor, but only until the provided future completes. This +//! // provides the opportunity to start executing previously spawned tasks. +//! let res = current_thread.block_on(lazy(|| { +//! Ok::<_, ()>("Hello") +//! })).unwrap(); +//! +//! // Now, run the executor for *at most* 1 second. Since a task was spawned +//! // that never completes, this function will return with an error. +//! current_thread.run_timeout(Duration::from_secs(1)).unwrap_err(); +//! # } +//! ``` +//! +//! # Execution model +//! +//! Internally, [`CurrentThread`] maintains a queue. When one of its tasks is +//! notified, the task gets added to the queue. The executor will pop tasks from +//! the queue and call [`Future::poll`]. If the task gets notified while it is +//! being executed, it won't get re-executed until all other tasks currently in +//! the queue get polled. +//! +//! Before the task is polled, a thread-local variable referencing the current +//! [`CurrentThread`] instance is set. This enables [`spawn`] to spawn new tasks +//! onto the same executor without having to thread through a handle value. +//! +//! If the [`CurrentThread`] instance still has uncompleted tasks, but none of +//! these tasks are ready to be polled, the current thread is put to sleep. When +//! a task is notified, the thread is woken up and processing resumes. +//! +//! All tasks managed by [`CurrentThread`] remain on the current thread. When a +//! task completes, it is dropped. +//! +//! [`spawn`]: fn.spawn.html +//! [`block_on_all`]: fn.block_on_all.html +//! [`CurrentThread`]: struct.CurrentThread.html +//! [`Future::poll`]: https://docs.rs/futures/0.1/futures/future/trait.Future.html#tymethod.poll + +#![allow(deprecated)] + +mod scheduler; +use self::scheduler::Scheduler; + +use tokio_executor::{self, Enter, SpawnError}; +use tokio_executor::park::{Park, Unpark, ParkThread}; + +use futures::{executor, Async, Future}; +use futures::future::{self, Executor, ExecuteError, ExecuteErrorKind}; + +use std::fmt; +use std::cell::Cell; +use std::marker::PhantomData; +use std::rc::Rc; +use std::time::{Duration, Instant}; +use std::sync::mpsc; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Executes tasks on the current thread +pub struct CurrentThread { + /// Execute futures and receive unpark notifications. + scheduler: Scheduler, + + /// Current number of futures being executed + num_futures: usize, + + /// Thread park handle + park: P, + + /// Handle for spawning new futures from other threads + spawn_handle: Handle, + + /// Receiver for futures spawned from other threads + spawn_receiver: mpsc::Receiver + Send + 'static>>, +} + +/// Executes futures on the current thread. +/// +/// All futures executed using this executor will be executed on the current +/// thread. As such, `run` will wait for these futures to complete before +/// returning. +/// +/// For more details, see the [module level](index.html) documentation. +#[derive(Debug, Clone)] +pub struct TaskExecutor { + // Prevent the handle from moving across threads. + _p: ::std::marker::PhantomData>, +} + +/// Returned by the `turn` function. +#[derive(Debug)] +pub struct Turn { + polled: bool +} + +impl Turn { + /// `true` if any futures were polled at all and `false` otherwise. + pub fn has_polled(&self) -> bool { + self.polled + } +} + +/// A `CurrentThread` instance bound to a supplied execution context. +pub struct Entered<'a, P: Park + 'a> { + executor: &'a mut CurrentThread

    , + enter: &'a mut Enter, +} + +#[deprecated(since = "0.1.2", note = "use block_on_all instead")] +#[doc(hidden)] +#[derive(Debug)] +pub struct Context<'a> { + cancel: Cell, + _p: PhantomData<&'a ()>, +} + +/// Error returned by the `run` function. +#[derive(Debug)] +pub struct RunError { + _p: (), +} + +/// Error returned by the `run_timeout` function. +#[derive(Debug)] +pub struct RunTimeoutError { + timeout: bool, +} + +/// Error returned by the `turn` function. +#[derive(Debug)] +pub struct TurnError { + _p: (), +} + +/// Error returned by the `block_on` function. +#[derive(Debug)] +pub struct BlockError { + inner: Option, +} + +/// This is mostly split out to make the borrow checker happy. +struct Borrow<'a, U: 'a> { + scheduler: &'a mut Scheduler, + num_futures: &'a mut usize, +} + +trait SpawnLocal { + fn spawn_local(&mut self, future: Box>); +} + +struct CurrentRunner { + spawn: Cell>, +} + +/// Current thread's task runner. This is set in `TaskRunner::with` +thread_local!(static CURRENT: CurrentRunner = CurrentRunner { + spawn: Cell::new(None), +}); + +#[deprecated(since = "0.1.2", note = "use block_on_all instead")] +#[doc(hidden)] +#[allow(deprecated)] +pub fn run(f: F) -> R +where F: FnOnce(&mut Context) -> R +{ + let mut context = Context { + cancel: Cell::new(false), + _p: PhantomData, + }; + + let mut current_thread = CurrentThread::new(); + + let ret = current_thread + .block_on(future::lazy(|| Ok::<_, ()>(f(&mut context)))) + .unwrap(); + + if context.cancel.get() { + return ret; + } + + current_thread.run().unwrap(); + ret +} + +/// Run the executor bootstrapping the execution with the provided future. +/// +/// This creates a new [`CurrentThread`] executor, spawns the provided future, +/// and blocks the current thread until the provided future and **all** +/// subsequently spawned futures complete. In other words: +/// +/// * If the provided bootstrap future does **not** spawn any additional tasks, +/// `block_on_all` returns once `future` completes. +/// * If the provided bootstrap future **does** spawn additional tasks, then +/// `block_on_all` returns once **all** spawned futures complete. +/// +/// See [module level][mod] documentation for more details. +/// +/// [`CurrentThread`]: struct.CurrentThread.html +/// [mod]: index.html +pub fn block_on_all(future: F) -> Result +where F: Future, +{ + let mut current_thread = CurrentThread::new(); + + let ret = current_thread.block_on(future); + current_thread.run().unwrap(); + + ret.map_err(|e| e.into_inner().expect("unexpected execution error")) +} + +/// Executes a future on the current thread. +/// +/// The provided future must complete or be canceled before `run` will return. +/// +/// Unlike [`tokio::spawn`], this function will always spawn on a +/// `CurrentThread` executor and is able to spawn futures that are not `Send`. +/// +/// # Panics +/// +/// This function can only be invoked from the context of a `run` call; any +/// other use will result in a panic. +/// +/// [`tokio::spawn`]: ../fn.spawn.html +pub fn spawn(future: F) +where F: Future + 'static +{ + TaskExecutor::current() + .spawn_local(Box::new(future)) + .unwrap(); +} + +// ===== impl CurrentThread ===== + +impl CurrentThread { + /// Create a new instance of `CurrentThread`. + pub fn new() -> Self { + CurrentThread::new_with_park(ParkThread::new()) + } +} + +impl CurrentThread

    { + /// Create a new instance of `CurrentThread` backed by the given park + /// handle. + pub fn new_with_park(park: P) -> Self { + let unpark = park.unpark(); + + let (spawn_sender, spawn_receiver) = mpsc::channel(); + + let scheduler = Scheduler::new(unpark); + let notify = scheduler.notify(); + + CurrentThread { + scheduler: scheduler, + num_futures: 0, + park, + spawn_handle: Handle { sender: spawn_sender, notify: notify }, + spawn_receiver: spawn_receiver, + } + } + + /// Returns `true` if the executor is currently idle. + /// + /// An idle executor is defined by not currently having any spawned tasks. + pub fn is_idle(&self) -> bool { + self.num_futures == 0 + } + + /// Spawn the future on the executor. + /// + /// This internally queues the future to be executed once `run` is called. + pub fn spawn(&mut self, future: F) -> &mut Self + where F: Future + 'static, + { + self.borrow().spawn_local(Box::new(future)); + self + } + + /// Synchronously waits for the provided `future` to complete. + /// + /// This function can be used to synchronously block the current thread + /// until the provided `future` has resolved either successfully or with an + /// error. The result of the future is then returned from this function + /// call. + /// + /// Note that this function will **also** execute any spawned futures on the + /// current thread, but will **not** block until these other spawned futures + /// have completed. + /// + /// The caller is responsible for ensuring that other spawned futures + /// complete execution. + pub fn block_on(&mut self, future: F) + -> Result> + where F: Future + { + let mut enter = tokio_executor::enter().unwrap(); + self.enter(&mut enter).block_on(future) + } + + /// Run the executor to completion, blocking the thread until **all** + /// spawned futures have completed. + pub fn run(&mut self) -> Result<(), RunError> { + let mut enter = tokio_executor::enter().unwrap(); + self.enter(&mut enter).run() + } + + /// Run the executor to completion, blocking the thread until all + /// spawned futures have completed **or** `duration` time has elapsed. + pub fn run_timeout(&mut self, duration: Duration) + -> Result<(), RunTimeoutError> + { + let mut enter = tokio_executor::enter().unwrap(); + self.enter(&mut enter).run_timeout(duration) + } + + /// Perform a single iteration of the event loop. + /// + /// This function blocks the current thread even if the executor is idle. + pub fn turn(&mut self, duration: Option) + -> Result + { + let mut enter = tokio_executor::enter().unwrap(); + self.enter(&mut enter).turn(duration) + } + + /// Bind `CurrentThread` instance with an execution context. + pub fn enter<'a>(&'a mut self, enter: &'a mut Enter) -> Entered<'a, P> { + Entered { + executor: self, + enter, + } + } + + /// Returns a reference to the underlying `Park` instance. + pub fn get_park(&self) -> &P { + &self.park + } + + /// Returns a mutable reference to the underlying `Park` instance. + pub fn get_park_mut(&mut self) -> &mut P { + &mut self.park + } + + fn borrow(&mut self) -> Borrow { + Borrow { + scheduler: &mut self.scheduler, + num_futures: &mut self.num_futures, + } + } + + /// Get a new handle to spawn futures on the executor + /// + /// Different to the executor itself, the handle can be sent to different + /// threads and can be used to spawn futures on the executor. + pub fn handle(&self) -> Handle { + self.spawn_handle.clone() + } +} + +impl tokio_executor::Executor for CurrentThread { + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + self.borrow().spawn_local(future); + Ok(()) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, _future: Box + Send>) + -> Result<(), futures2::executor::SpawnError> + { + panic!("Futures 0.2 integration is not available for current_thread"); + } +} + +impl fmt::Debug for CurrentThread

    { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("CurrentThread") + .field("scheduler", &self.scheduler) + .field("num_futures", &self.num_futures) + .finish() + } +} + +// ===== impl Entered ===== + +impl<'a, P: Park> Entered<'a, P> { + /// Spawn the future on the executor. + /// + /// This internally queues the future to be executed once `run` is called. + pub fn spawn(&mut self, future: F) -> &mut Self + where F: Future + 'static, + { + self.executor.borrow().spawn_local(Box::new(future)); + self + } + + /// Synchronously waits for the provided `future` to complete. + /// + /// This function can be used to synchronously block the current thread + /// until the provided `future` has resolved either successfully or with an + /// error. The result of the future is then returned from this function + /// call. + /// + /// Note that this function will **also** execute any spawned futures on the + /// current thread, but will **not** block until these other spawned futures + /// have completed. + /// + /// The caller is responsible for ensuring that other spawned futures + /// complete execution. + pub fn block_on(&mut self, future: F) + -> Result> + where F: Future + { + let mut future = executor::spawn(future); + let notify = self.executor.scheduler.notify(); + + loop { + let res = self.executor.borrow().enter(self.enter, || { + future.poll_future_notify(¬ify, 0) + }); + + match res { + Ok(Async::Ready(e)) => return Ok(e), + Err(e) => return Err(BlockError { inner: Some(e) }), + Ok(Async::NotReady) => {} + } + + self.tick(); + + if let Err(_) = self.executor.park.park() { + return Err(BlockError { inner: None }); + } + } + } + + /// Run the executor to completion, blocking the thread until **all** + /// spawned futures have completed. + pub fn run(&mut self) -> Result<(), RunError> { + self.run_timeout2(None) + .map_err(|_| RunError { _p: () }) + } + + /// Run the executor to completion, blocking the thread until all + /// spawned futures have completed **or** `duration` time has elapsed. + pub fn run_timeout(&mut self, duration: Duration) + -> Result<(), RunTimeoutError> + { + self.run_timeout2(Some(duration)) + } + + /// Perform a single iteration of the event loop. + /// + /// This function blocks the current thread even if the executor is idle. + pub fn turn(&mut self, duration: Option) + -> Result + { + let res = if self.executor.scheduler.has_pending_futures() { + self.executor.park.park_timeout(Duration::from_millis(0)) + } else { + match duration { + Some(duration) => self.executor.park.park_timeout(duration), + None => self.executor.park.park(), + } + }; + + if res.is_err() { + return Err(TurnError { _p: () }); + } + + let polled = self.tick(); + + Ok(Turn { polled }) + } + + /// Returns a reference to the underlying `Park` instance. + pub fn get_park(&self) -> &P { + &self.executor.park + } + + /// Returns a mutable reference to the underlying `Park` instance. + pub fn get_park_mut(&mut self) -> &mut P { + &mut self.executor.park + } + + fn run_timeout2(&mut self, dur: Option) + -> Result<(), RunTimeoutError> + { + if self.executor.is_idle() { + // Nothing to do + return Ok(()); + } + + let mut time = dur.map(|dur| (Instant::now() + dur, dur)); + + loop { + self.tick(); + + if self.executor.is_idle() { + return Ok(()); + } + + match time { + Some((until, rem)) => { + if let Err(_) = self.executor.park.park_timeout(rem) { + return Err(RunTimeoutError::new(false)); + } + + let now = Instant::now(); + + if now >= until { + return Err(RunTimeoutError::new(true)); + } + + time = Some((until, until - now)); + } + None => { + if let Err(_) = self.executor.park.park() { + return Err(RunTimeoutError::new(false)); + } + } + } + } + } + + /// Returns `true` if any futures were processed + fn tick(&mut self) -> bool { + // Spawn any futures that were spawned from other threads by manually + // looping over the receiver stream + + // FIXME: Slightly ugly but needed to make the borrow checker happy + let (mut borrow, spawn_receiver) = ( + Borrow { + scheduler: &mut self.executor.scheduler, + num_futures: &mut self.executor.num_futures, + }, + &mut self.executor.spawn_receiver, + ); + + while let Ok(future) = spawn_receiver.try_recv() { + borrow.spawn_local(future); + } + + // After any pending futures were scheduled, do the actual tick + borrow.scheduler.tick( + &mut *self.enter, + borrow.num_futures) + } +} + +impl<'a, P: Park> fmt::Debug for Entered<'a, P> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Entered") + .field("executor", &self.executor) + .field("enter", &self.enter) + .finish() + } +} + +// ===== impl Handle ===== + +/// Handle to spawn a future on the corresponding `CurrentThread` instance +#[derive(Clone)] +pub struct Handle { + sender: mpsc::Sender + Send + 'static>>, + notify: executor::NotifyHandle, +} + +// Manual implementation because the Sender does not implement Debug +impl fmt::Debug for Handle { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Handle") + .finish() + } +} + +impl Handle { + /// Spawn a future onto the `CurrentThread` instance corresponding to this handle + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the `CurrentThread` + /// instance of the `Handle` does not exist anymore. + pub fn spawn(&self, future: F) -> Result<(), SpawnError> + where F: Future + Send + 'static { + self.sender.send(Box::new(future)) + .expect("CurrentThread does not exist anymore"); + // use 0 for the id, CurrentThread does not make use of it + self.notify.notify(0); + + Ok(()) + } +} + +// ===== impl TaskExecutor ===== + +#[deprecated(since = "0.1.2", note = "use TaskExecutor::current instead")] +#[doc(hidden)] +pub fn task_executor() -> TaskExecutor { + TaskExecutor { + _p: ::std::marker::PhantomData, + } +} + +impl TaskExecutor { + /// Returns an executor that executes futures on the current thread. + /// + /// The user of `TaskExecutor` must ensure that when a future is submitted, + /// that it is done within the context of a call to `run`. + /// + /// For more details, see the [module level](index.html) documentation. + pub fn current() -> TaskExecutor { + TaskExecutor { + _p: ::std::marker::PhantomData, + } + } + + /// Spawn a future onto the current `CurrentThread` instance. + pub fn spawn_local(&mut self, future: Box>) + -> Result<(), SpawnError> + { + CURRENT.with(|current| { + match current.spawn.get() { + Some(spawn) => { + unsafe { (*spawn).spawn_local(future) }; + Ok(()) + } + None => { + Err(SpawnError::shutdown()) + } + } + }) + } +} + +impl tokio_executor::Executor for TaskExecutor { + fn spawn(&mut self, future: Box + Send>) + -> Result<(), SpawnError> + { + self.spawn_local(future) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, _future: Box + Send>) + -> Result<(), futures2::executor::SpawnError> + { + panic!("Futures 0.2 integration is not available for current_thread"); + } + + fn status(&self) -> Result<(), SpawnError> { + CURRENT.with(|current| { + if current.spawn.get().is_some() { + Ok(()) + } else { + Err(SpawnError::shutdown()) + } + }) + } +} + +impl Executor for TaskExecutor +where F: Future + 'static +{ + fn execute(&self, future: F) -> Result<(), ExecuteError> { + CURRENT.with(|current| { + match current.spawn.get() { + Some(spawn) => { + unsafe { (*spawn).spawn_local(Box::new(future)) }; + Ok(()) + } + None => { + Err(ExecuteError::new(ExecuteErrorKind::Shutdown, future)) + } + } + }) + } +} + +// ===== impl Context ===== + +impl<'a> Context<'a> { + /// Cancels *all* executing futures. + pub fn cancel_all_spawned(&self) { + self.cancel.set(true); + } +} + +// ===== impl Borrow ===== + +impl<'a, U: Unpark> Borrow<'a, U> { + fn enter(&mut self, _: &mut Enter, f: F) -> R + where F: FnOnce() -> R, + { + CURRENT.with(|current| { + current.set_spawn(self, || { + f() + }) + }) + } +} + +impl<'a, U: Unpark> SpawnLocal for Borrow<'a, U> { + fn spawn_local(&mut self, future: Box>) { + *self.num_futures += 1; + self.scheduler.schedule(future); + } +} + +// ===== impl CurrentRunner ===== + +impl CurrentRunner { + fn set_spawn(&self, spawn: &mut SpawnLocal, f: F) -> R + where F: FnOnce() -> R + { + struct Reset<'a>(&'a CurrentRunner); + + impl<'a> Drop for Reset<'a> { + fn drop(&mut self) { + self.0.spawn.set(None); + } + } + + let _reset = Reset(self); + + let spawn = unsafe { hide_lt(spawn as *mut SpawnLocal) }; + self.spawn.set(Some(spawn)); + + f() + } +} + +unsafe fn hide_lt<'a>(p: *mut (SpawnLocal + 'a)) -> *mut (SpawnLocal + 'static) { + use std::mem; + mem::transmute(p) +} + +// ===== impl RunTimeoutError ===== + +impl RunTimeoutError { + fn new(timeout: bool) -> Self { + RunTimeoutError { timeout } + } + + /// Returns `true` if the error was caused by the operation timing out. + pub fn is_timeout(&self) -> bool { + self.timeout + } +} + +impl From for RunTimeoutError { + fn from(_: tokio_executor::EnterError) -> Self { + RunTimeoutError::new(false) + } +} + +// ===== impl BlockError ===== + +impl BlockError { + /// Returns the error yielded by the future being blocked on + pub fn into_inner(self) -> Option { + self.inner + } +} + +impl From for BlockError { + fn from(_: tokio_executor::EnterError) -> Self { + BlockError { inner: None } + } +} diff --git a/third_party/rust/tokio/src/executor/current_thread/scheduler.rs b/third_party/rust/tokio/src/executor/current_thread/scheduler.rs new file mode 100644 index 000000000000..c66523bf22db --- /dev/null +++ b/third_party/rust/tokio/src/executor/current_thread/scheduler.rs @@ -0,0 +1,772 @@ +use super::Borrow; +use tokio_executor::Enter; +use tokio_executor::park::Unpark; + +use futures::{Future, Async}; +use futures::executor::{self, Spawn, UnsafeNotify, NotifyHandle}; + +use std::cell::UnsafeCell; +use std::fmt::{self, Debug}; +use std::mem; +use std::ptr; +use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel}; +use std::sync::atomic::{AtomicPtr, AtomicBool, AtomicUsize}; +use std::sync::{Arc, Weak}; +use std::usize; +use std::thread; +use std::marker::PhantomData; + +/// A generic task-aware scheduler. +/// +/// This is used both by `FuturesUnordered` and the current-thread executor. +pub struct Scheduler { + inner: Arc>, + nodes: List, +} + +pub struct Notify<'a, U: 'a>(&'a Arc>); + +// A linked-list of nodes +struct List { + len: usize, + head: *const Node, + tail: *const Node, +} + +// Scheduler is implemented using two linked lists. The first linked list tracks +// all items managed by a `Scheduler`. This list is stored on the `Scheduler` +// struct and is **not** thread safe. The second linked list is an +// implementation of the intrusive MPSC queue algorithm described by +// 1024cores.net and is stored on `Inner`. This linked list can push items to +// the back concurrently but only one consumer may pop from the front. To +// enforce this requirement, all popping will be performed via fns on +// `Scheduler` that take `&mut self`. +// +// When a item is submitted to the set a node is allocated and inserted in +// both linked lists. This means that all insertion operations **must** be +// originated from `Scheduler` with `&mut self` The next call to `tick` will +// (eventually) see this node and call `poll` on the item. +// +// Nodes are wrapped in `Arc` cells which manage the lifetime of the node. +// However, `Arc` handles are sometimes cast to `*const Node` pointers. +// Specifically, when a node is stored in at least one of the two lists +// described above, this represents a logical `Arc` handle. This is how +// `Scheduler` maintains its reference to all nodes it manages. Each +// `NotifyHandle` instance is an `Arc` as well. +// +// When `Scheduler` drops, it clears the linked list of all nodes that it +// manages. When doing so, it must attempt to decrement the reference count (by +// dropping an Arc handle). However, it can **only** decrement the reference +// count if the node is not currently stored in the mpsc channel. If the node +// **is** "queued" in the mpsc channel, then the arc reference count cannot be +// decremented. Once the node is popped from the mpsc channel, then the final +// arc reference count can be decremented, thus freeing the node. + +struct Inner { + // Thread unpark handle + unpark: U, + + // Tick number + tick_num: AtomicUsize, + + // Head/tail of the readiness queue + head_readiness: AtomicPtr>, + tail_readiness: UnsafeCell<*const Node>, + + // Used as part of the MPSC queue algorithm + stub: Arc>, +} + +unsafe impl Send for Inner {} +unsafe impl Sync for Inner {} + +impl executor::Notify for Inner { + fn notify(&self, _: usize) { + self.unpark.unpark(); + } +} + +struct Node { + // The item + item: UnsafeCell>, + + // The tick at which this node was notified + notified_at: AtomicUsize, + + // Next pointer for linked list tracking all active nodes + next_all: UnsafeCell<*const Node>, + + // Previous node in linked list tracking all active nodes + prev_all: UnsafeCell<*const Node>, + + // Next pointer in readiness queue + next_readiness: AtomicPtr>, + + // Whether or not this node is currently in the mpsc queue. + queued: AtomicBool, + + // Queue that we'll be enqueued to when notified + queue: Weak>, +} + +/// Returned by `Inner::dequeue`, representing either a dequeue success (with +/// the dequeued node), an empty list, or an inconsistent state. +/// +/// The inconsistent state is described in more detail at [1024cores], but +/// roughly indicates that a node will be ready to dequeue sometime shortly in +/// the future and the caller should try again soon. +/// +/// [1024cores]: http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue +enum Dequeue { + Data(*const Node), + Empty, + Yield, + Inconsistent, +} + +/// Wraps a spawned boxed future +struct Task(Spawn>>); + +/// A task that is scheduled. `turn` must be called +pub struct Scheduled<'a, U: 'a> { + task: &'a mut Task, + notify: &'a Notify<'a, U>, + done: &'a mut bool, +} + +impl Scheduler +where U: Unpark, +{ + /// Constructs a new, empty `Scheduler` + /// + /// The returned `Scheduler` does not contain any items and, in this + /// state, `Scheduler::poll` will return `Ok(Async::Ready(None))`. + pub fn new(unpark: U) -> Self { + let stub = Arc::new(Node { + item: UnsafeCell::new(None), + notified_at: AtomicUsize::new(0), + next_all: UnsafeCell::new(ptr::null()), + prev_all: UnsafeCell::new(ptr::null()), + next_readiness: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + queue: Weak::new(), + }); + let stub_ptr = &*stub as *const Node; + let inner = Arc::new(Inner { + unpark, + tick_num: AtomicUsize::new(0), + head_readiness: AtomicPtr::new(stub_ptr as *mut _), + tail_readiness: UnsafeCell::new(stub_ptr), + stub: stub, + }); + + Scheduler { + inner: inner, + nodes: List::new(), + } + } + + pub fn notify(&self) -> NotifyHandle { + self.inner.clone().into() + } + + pub fn schedule(&mut self, item: Box>) { + // Get the current scheduler tick + let tick_num = self.inner.tick_num.load(SeqCst); + + let node = Arc::new(Node { + item: UnsafeCell::new(Some(Task::new(item))), + notified_at: AtomicUsize::new(tick_num), + next_all: UnsafeCell::new(ptr::null_mut()), + prev_all: UnsafeCell::new(ptr::null_mut()), + next_readiness: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + queue: Arc::downgrade(&self.inner), + }); + + // Right now our node has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` function below. + let ptr = self.nodes.push_back(node); + + // We'll need to get the item "into the system" to start tracking it, + // e.g. getting its unpark notifications going to us tracking which + // items are ready. To do that we unconditionally enqueue it for + // polling here. + self.inner.enqueue(ptr); + } + + /// Returns `true` if there are currently any pending futures + pub fn has_pending_futures(&mut self) -> bool { + // See function definition for why the unsafe is needed and + // correctly used here + unsafe { + self.inner.has_pending_futures() + } + } + + /// Advance the scheduler state, returning `true` if any futures were + /// processed. + /// + /// This function should be called whenever the caller is notified via a + /// wakeup. + pub fn tick(&mut self, enter: &mut Enter, num_futures: &mut usize) -> bool + { + let mut ret = false; + let tick = self.inner.tick_num.fetch_add(1, SeqCst) + .wrapping_add(1); + + loop { + let node = match unsafe { self.inner.dequeue(Some(tick)) } { + Dequeue::Empty => { + return ret; + } + Dequeue::Yield => { + self.inner.unpark.unpark(); + return ret; + } + Dequeue::Inconsistent => { + thread::yield_now(); + continue; + } + Dequeue::Data(node) => node, + }; + + ret = true; + + debug_assert!(node != self.inner.stub()); + + unsafe { + if (*(*node).item.get()).is_none() { + // The node has already been released. However, while it was + // being released, another thread notified it, which + // resulted in it getting pushed into the mpsc channel. + // + // In this case, we just dec the ref count. + let node = ptr2arc(node); + assert!((*node.next_all.get()).is_null()); + assert!((*node.prev_all.get()).is_null()); + continue + }; + + // We're going to need to be very careful if the `poll` + // function below panics. We need to (a) not leak memory and + // (b) ensure that we still don't have any use-after-frees. To + // manage this we do a few things: + // + // * This "bomb" here will call `release_node` if dropped + // abnormally. That way we'll be sure the memory management + // of the `node` is managed correctly. + // + // * We unlink the node from our internal queue to preemptively + // assume is is complete (will return Ready or panic), in + // which case we'll want to discard it regardless. + // + struct Bomb<'a, U: Unpark + 'a> { + borrow: &'a mut Borrow<'a, U>, + enter: &'a mut Enter, + node: Option>>, + } + + impl<'a, U: Unpark> Drop for Bomb<'a, U> { + fn drop(&mut self) { + if let Some(node) = self.node.take() { + self.borrow.enter(self.enter, || release_node(node)) + } + } + } + + let node = self.nodes.remove(node); + + let mut borrow = Borrow { + scheduler: self, + num_futures, + }; + + let mut bomb = Bomb { + node: Some(node), + enter: enter, + borrow: &mut borrow, + }; + + let mut done = false; + + // Now that the bomb holds the node, create a new scope. This + // scope ensures that the borrow will go out of scope before we + // mutate the node pointer in `bomb` again + { + let node = bomb.node.as_ref().unwrap(); + + // Get a reference to the inner future. We already ensured + // that the item `is_some`. + let item = (*node.item.get()).as_mut().unwrap(); + + // Unset queued flag... this must be done before + // polling. This ensures that the item gets + // rescheduled if it is notified **during** a call + // to `poll`. + let prev = (*node).queued.swap(false, SeqCst); + assert!(prev); + + // Poll the underlying item with the appropriate `notify` + // implementation. This is where a large bit of the unsafety + // starts to stem from internally. The `notify` instance itself + // is basically just our `Arc` and tracks the mpsc + // queue of ready items. + // + // Critically though `Node` won't actually access `Task`, the + // item, while it's floating around inside of `Task` + // instances. These structs will basically just use `T` to size + // the internal allocation, appropriately accessing fields and + // deallocating the node if need be. + let borrow = &mut *bomb.borrow; + let enter = &mut *bomb.enter; + let notify = Notify(bomb.node.as_ref().unwrap()); + + let mut scheduled = Scheduled { + task: item, + notify: ¬ify, + done: &mut done, + }; + + if borrow.enter(enter, || scheduled.tick()) { + *borrow.num_futures -= 1; + } + } + + if !done { + // The future is not done, push it back into the "all + // node" list. + let node = bomb.node.take().unwrap(); + bomb.borrow.scheduler.nodes.push_back(node); + } + } + } + } +} + +impl<'a, U: Unpark> Scheduled<'a, U> { + /// Polls the task, returns `true` if the task has completed. + pub fn tick(&mut self) -> bool { + // Tick the future + let ret = match self.task.0.poll_future_notify(self.notify, 0) { + Ok(Async::Ready(_)) | Err(_) => true, + Ok(Async::NotReady) => false, + }; + + *self.done = ret; + ret + } +} + +impl Task { + pub fn new(future: Box + 'static>) -> Self { + Task(executor::spawn(future)) + } +} + +impl fmt::Debug for Task { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Task") + .finish() + } +} + +fn release_node(node: Arc>) { + // The item is done, try to reset the queued flag. This will prevent + // `notify` from doing any work in the item + let prev = node.queued.swap(true, SeqCst); + + // Drop the item, even if it hasn't finished yet. This is safe + // because we're dropping the item on the thread that owns + // `Scheduler`, which correctly tracks T's lifetimes and such. + unsafe { + drop((*node.item.get()).take()); + } + + // If the queued flag was previously set then it means that this node + // is still in our internal mpsc queue. We then transfer ownership + // of our reference count to the mpsc queue, and it'll come along and + // free it later, noticing that the item is `None`. + // + // If, however, the queued flag was *not* set then we're safe to + // release our reference count on the internal node. The queued flag + // was set above so all item `enqueue` operations will not actually + // enqueue the node, so our node will never see the mpsc queue again. + // The node itself will be deallocated once all reference counts have + // been dropped by the various owning tasks elsewhere. + if prev { + mem::forget(node); + } +} + +impl Debug for Scheduler { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Scheduler {{ ... }}") + } +} + +impl Drop for Scheduler { + fn drop(&mut self) { + // When a `Scheduler` is dropped we want to drop all items associated + // with it. At the same time though there may be tons of `Task` handles + // flying around which contain `Node` references inside them. We'll + // let those naturally get deallocated when the `Task` itself goes out + // of scope or gets notified. + while let Some(node) = self.nodes.pop_front() { + release_node(node); + } + + // Note that at this point we could still have a bunch of nodes in the + // mpsc queue. None of those nodes, however, have items associated + // with them so they're safe to destroy on any thread. At this point + // the `Scheduler` struct, the owner of the one strong reference + // to `Inner` will drop the strong reference. At that point + // whichever thread releases the strong refcount last (be it this + // thread or some other thread as part of an `upgrade`) will clear out + // the mpsc queue and free all remaining nodes. + // + // While that freeing operation isn't guaranteed to happen here, it's + // guaranteed to happen "promptly" as no more "blocking work" will + // happen while there's a strong refcount held. + } +} + +impl Inner { + /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. + fn enqueue(&self, node: *const Node) { + unsafe { + debug_assert!((*node).queued.load(Relaxed)); + + // This action does not require any coordination + (*node).next_readiness.store(ptr::null_mut(), Relaxed); + + // Note that these atomic orderings come from 1024cores + let node = node as *mut _; + let prev = self.head_readiness.swap(node, AcqRel); + (*prev).next_readiness.store(node, Release); + } + } + + /// Returns `true` if there are currently any pending futures + /// + /// See `dequeue` for an explanation why this function is unsafe. + unsafe fn has_pending_futures(&self) -> bool { + let tail = *self.tail_readiness.get(); + let next = (*tail).next_readiness.load(Acquire); + + if tail == self.stub() { + if next.is_null() { + return false; + } + } + + true + } + + /// The dequeue function from the 1024cores intrusive MPSC queue algorithm + /// + /// Note that this unsafe as it required mutual exclusion (only one thread + /// can call this) to be guaranteed elsewhere. + unsafe fn dequeue(&self, tick: Option) -> Dequeue { + let mut tail = *self.tail_readiness.get(); + let mut next = (*tail).next_readiness.load(Acquire); + + if tail == self.stub() { + if next.is_null() { + return Dequeue::Empty; + } + + *self.tail_readiness.get() = next; + tail = next; + next = (*next).next_readiness.load(Acquire); + } + + if let Some(tick) = tick { + let actual = (*tail).notified_at.load(SeqCst); + + // Only dequeue if the node was not scheduled during the current + // tick. + if actual == tick { + // Only doing the check above **should** be enough in + // practice. However, technically there is a potential for + // deadlocking if there are `usize::MAX` ticks while the thread + // scheduling the task is frozen. + // + // If, for some reason, this is not enough, calling `unpark` + // here will resolve the issue. + return Dequeue::Yield; + } + } + + if !next.is_null() { + *self.tail_readiness.get() = next; + debug_assert!(tail != self.stub()); + return Dequeue::Data(tail); + } + + if self.head_readiness.load(Acquire) as *const _ != tail { + return Dequeue::Inconsistent; + } + + self.enqueue(self.stub()); + + next = (*tail).next_readiness.load(Acquire); + + if !next.is_null() { + *self.tail_readiness.get() = next; + return Dequeue::Data(tail); + } + + Dequeue::Inconsistent + } + + fn stub(&self) -> *const Node { + &*self.stub + } +} + +impl Drop for Inner { + fn drop(&mut self) { + // Once we're in the destructor for `Inner` we need to clear out the + // mpsc queue of nodes if there's anything left in there. + // + // Note that each node has a strong reference count associated with it + // which is owned by the mpsc queue. All nodes should have had their + // items dropped already by the `Scheduler` destructor above, + // so we're just pulling out nodes and dropping their refcounts. + unsafe { + loop { + match self.dequeue(None) { + Dequeue::Empty => break, + Dequeue::Yield => unreachable!(), + Dequeue::Inconsistent => abort("inconsistent in drop"), + Dequeue::Data(ptr) => drop(ptr2arc(ptr)), + } + } + } + } +} + +impl List { + fn new() -> Self { + List { + len: 0, + head: ptr::null_mut(), + tail: ptr::null_mut(), + } + } + + /// Prepends an element to the back of the list + fn push_back(&mut self, node: Arc>) -> *const Node { + let ptr = arc2ptr(node); + + unsafe { + // Point to the current last node in the list + *(*ptr).prev_all.get() = self.tail; + *(*ptr).next_all.get() = ptr::null_mut(); + + if !self.tail.is_null() { + *(*self.tail).next_all.get() = ptr; + self.tail = ptr; + } else { + // This is the first node + self.tail = ptr; + self.head = ptr; + } + } + + self.len += 1; + + return ptr + } + + /// Pop an element from the front of the list + fn pop_front(&mut self) -> Option>> { + if self.head.is_null() { + // The list is empty + return None; + } + + self.len -= 1; + + unsafe { + // Convert the ptr to Arc<_> + let node = ptr2arc(self.head); + + // Update the head pointer + self.head = *node.next_all.get(); + + // If the pointer is null, then the list is empty + if self.head.is_null() { + self.tail = ptr::null_mut(); + } else { + *(*self.head).prev_all.get() = ptr::null_mut(); + } + + Some(node) + } + } + + /// Remove a specific node + unsafe fn remove(&mut self, node: *const Node) -> Arc> { + let node = ptr2arc(node); + let next = *node.next_all.get(); + let prev = *node.prev_all.get(); + *node.next_all.get() = ptr::null_mut(); + *node.prev_all.get() = ptr::null_mut(); + + if !next.is_null() { + *(*next).prev_all.get() = prev; + } else { + self.tail = prev; + } + + if !prev.is_null() { + *(*prev).next_all.get() = next; + } else { + self.head = next; + } + + self.len -= 1; + + return node + } +} + +impl<'a, U> Clone for Notify<'a, U> { + fn clone(&self) -> Self { + Notify(self.0) + } +} + +impl<'a, U> fmt::Debug for Notify<'a, U> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Notify").finish() + } +} + +impl<'a, U: Unpark> From> for NotifyHandle { + fn from(handle: Notify<'a, U>) -> NotifyHandle { + unsafe { + let ptr = handle.0.clone(); + let ptr = mem::transmute::>, *mut ArcNode>(ptr); + NotifyHandle::new(hide_lt(ptr)) + } + } +} + +struct ArcNode(PhantomData); + +// We should never touch `Task` on any thread other than the one owning +// `Scheduler`, so this should be a safe operation. +unsafe impl Send for ArcNode {} +unsafe impl Sync for ArcNode {} + +impl executor::Notify for ArcNode { + fn notify(&self, _id: usize) { + unsafe { + let me: *const ArcNode = self; + let me: *const *const ArcNode = &me; + let me = me as *const Arc>; + Node::notify(&*me) + } + } +} + +unsafe impl UnsafeNotify for ArcNode { + unsafe fn clone_raw(&self) -> NotifyHandle { + let me: *const ArcNode = self; + let me: *const *const ArcNode = &me; + let me = &*(me as *const Arc>); + Notify(me).into() + } + + unsafe fn drop_raw(&self) { + let mut me: *const ArcNode = self; + let me = &mut me as *mut *const ArcNode as *mut Arc>; + ptr::drop_in_place(me); + } +} + +unsafe fn hide_lt(p: *mut ArcNode) -> *mut UnsafeNotify { + mem::transmute(p as *mut UnsafeNotify) +} + +impl Node { + fn notify(me: &Arc>) { + let inner = match me.queue.upgrade() { + Some(inner) => inner, + None => return, + }; + + // It's our job to notify the node that it's ready to get polled, + // meaning that we need to enqueue it into the readiness queue. To + // do this we flag that we're ready to be queued, and if successful + // we then do the literal queueing operation, ensuring that we're + // only queued once. + // + // Once the node is inserted we be sure to notify the parent task, + // as it'll want to come along and pick up our node now. + // + // Note that we don't change the reference count of the node here, + // we're just enqueueing the raw pointer. The `Scheduler` + // implementation guarantees that if we set the `queued` flag true that + // there's a reference count held by the main `Scheduler` queue + // still. + let prev = me.queued.swap(true, SeqCst); + if !prev { + // Get the current scheduler tick + let tick_num = inner.tick_num.load(SeqCst); + me.notified_at.store(tick_num, SeqCst); + + inner.enqueue(&**me); + inner.unpark.unpark(); + } + } +} + +impl Drop for Node { + fn drop(&mut self) { + // Currently a `Node` is sent across all threads for any lifetime, + // regardless of `T`. This means that for memory safety we can't + // actually touch `T` at any time except when we have a reference to the + // `Scheduler` itself. + // + // Consequently it *should* be the case that we always drop items from + // the `Scheduler` instance, but this is a bomb in place to catch + // any bugs in that logic. + unsafe { + if (*self.item.get()).is_some() { + abort("item still here when dropping"); + } + } + } +} + +fn arc2ptr(ptr: Arc) -> *const T { + let addr = &*ptr as *const T; + mem::forget(ptr); + return addr +} + +unsafe fn ptr2arc(ptr: *const T) -> Arc { + let anchor = mem::transmute::>(0x10); + let addr = &*anchor as *const T; + mem::forget(anchor); + let offset = addr as isize - 0x10; + mem::transmute::>(ptr as isize - offset) +} + +fn abort(s: &str) -> ! { + struct DoublePanic; + + impl Drop for DoublePanic { + fn drop(&mut self) { + panic!("panicking twice to abort the program"); + } + } + + let _bomb = DoublePanic; + panic!("{}", s); +} diff --git a/third_party/rust/tokio/src/executor/mod.rs b/third_party/rust/tokio/src/executor/mod.rs new file mode 100644 index 000000000000..7b4b202c26ce --- /dev/null +++ b/third_party/rust/tokio/src/executor/mod.rs @@ -0,0 +1,239 @@ +//! Task execution utilities. +//! +//! In the Tokio execution model, futures are lazy. When a future is created, no +//! work is performed. In order for the work defined by the future to happen, +//! the future must be submitted to an executor. A future that is submitted to +//! an executor is called a "task". +//! +//! The executor executor is responsible for ensuring that [`Future::poll`] is +//! called whenever the task is [notified]. Notification happens when the +//! internal state of a task transitions from "not ready" to ready. For +//! example, a socket might have received data and a call to `read` will now be +//! able to succeed. +//! +//! The specific strategy used to manage the tasks is left up to the +//! executor. There are two main flavors of executors: single-threaded and +//! multithreaded. This module provides both. +//! +//! * **[`current_thread`]**: A single-threaded executor that support spawning +//! tasks that are not `Send`. It guarantees that tasks will be executed on +//! the same thread from which they are spawned. +//! +//! * **[`thread_pool`]**: A multi-threaded executor that maintains a pool of +//! threads. Tasks are spawned to one of the threads in the pool and executed. +//! The pool employs a [work-stealing] strategy for optimizing how tasks get +//! spread across the available threads. +//! +//! # `Executor` trait. +//! +//! This module provides the [`Executor`] trait (re-exported from +//! [`tokio-executor`]), which describes the API that all executors must +//! implement. +//! +//! A free [`spawn`] function is provided that allows spawning futures onto the +//! default executor (tracked via a thread-local variable) without referencing a +//! handle. It is expected that all executors will set a value for the default +//! executor. This value will often be set to the executor itself, but it is +//! possible that the default executor might be set to a different executor. +//! +//! For example, the [`current_thread`] executor might set the default executor +//! to a thread pool instead of itself, allowing futures to spawn new tasks onto +//! the thread pool when those tasks are `Send`. +//! +//! [`Future::poll`]: https://docs.rs/futures/0.1/futures/future/trait.Future.html#tymethod.poll +//! [notified]: https://docs.rs/futures/0.1/futures/executor/trait.Notify.html#tymethod.notify +//! [`current_thread`]: current_thread/index.html +//! [`thread_pool`]: thread_pool/index.html +//! [work-stealing]: https://en.wikipedia.org/wiki/Work_stealing +//! [`tokio-executor`]: # +//! [`Executor`]: # +//! [`spawn`]: # + +pub mod current_thread; + +pub mod thread_pool { + //! Maintains a pool of threads across which the set of spawned tasks are + //! executed. + //! + //! [`ThreadPool`] is an executor that uses a thread pool for executing + //! tasks concurrently across multiple cores. It uses a thread pool that is + //! optimized for use cases that involve multiplexing large number of + //! independent tasks that perform short(ish) amounts of computation and are + //! mainly waiting on I/O, i.e. the Tokio use case. + //! + //! Usually, users of [`ThreadPool`] will not create pool instances. + //! Instead, they will create a [`Runtime`] instance, which comes with a + //! pre-configured thread pool. + //! + //! At the core, [`ThreadPool`] uses a work-stealing based scheduling + //! strategy. When spawning a task while *external* to the thread pool + //! (i.e., from a thread that is not part of the thread pool), the task is + //! randomly assigned to a worker thread. When spawning a task while + //! *internal* to the thread pool, the task is assigned to the current + //! worker. + //! + //! Each worker maintains its own queue and first focuses on processing all + //! tasks in its queue. When the worker's queue is empty, the worker will + //! attempt to *steal* tasks from other worker queues. This strategy helps + //! ensure that work is evenly distributed across threads while minimizing + //! synchronization between worker threads. + //! + //! # Usage + //! + //! Thread pool instances are created using [`ThreadPool::new`] or + //! [`Builder::new`]. The first option returns a thread pool with default + //! configuration values. The second option allows configuring the thread + //! pool before instantiating it. + //! + //! Once an instance is obtained, futures may be spawned onto it using the + //! [`spawn`] function. + //! + //! A handle to the thread pool is obtained using [`ThreadPool::sender`]. + //! This handle is **only** able to spawn futures onto the thread pool. It + //! is unable to affect the lifecycle of the thread pool in any way. This + //! handle can be passed into functions or stored in structs as a way to + //! grant the capability of spawning futures. + //! + //! # Examples + //! + //! ```rust + //! # extern crate tokio; + //! # extern crate futures; + //! # use tokio::executor::thread_pool::ThreadPool; + //! use futures::future::{Future, lazy}; + //! + //! # pub fn main() { + //! // Create a thread pool with default configuration values + //! let thread_pool = ThreadPool::new(); + //! + //! thread_pool.spawn(lazy(|| { + //! println!("called from a worker thread"); + //! Ok(()) + //! })); + //! + //! // Gracefully shutdown the threadpool + //! thread_pool.shutdown().wait().unwrap(); + //! # } + //! ``` + //! + //! [`ThreadPool`]: struct.ThreadPool.html + //! [`ThreadPool::new`]: struct.ThreadPool.html#method.new + //! [`ThreadPool::sender`]: struct.ThreadPool.html#method.sender + //! [`spawn`]: struct.ThreadPool.html#method.spawn + //! [`Builder::new`]: struct.Builder.html#method.new + //! [`Runtime`]: ../../runtime/struct.Runtime.html + + pub use tokio_threadpool::{ + Builder, + Sender, + Shutdown, + ThreadPool, + }; +} + +pub use tokio_executor::{Executor, DefaultExecutor, SpawnError}; + +use futures::{Future, IntoFuture}; +use futures::future::{self, FutureResult}; + +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Return value from the `spawn` function. +/// +/// Currently this value doesn't actually provide any functionality. However, it +/// provides a way to add functionality later without breaking backwards +/// compatibility. +/// +/// This also implements `IntoFuture` so that it can be used as the return value +/// in a `for_each` loop. +/// +/// See [`spawn`] for more details. +/// +/// [`spawn`]: fn.spawn.html +#[derive(Debug)] +pub struct Spawn(()); + +/// Spawns a future on the default executor. +/// +/// In order for a future to do work, it must be spawned on an executor. The +/// `spawn` function is the easiest way to do this. It spawns a future on the +/// [default executor] for the current execution context (tracked using a +/// thread-local variable). +/// +/// The default executor is **usually** a thread pool. +/// +/// # Examples +/// +/// In this example, a server is started and `spawn` is used to start a new task +/// that processes each received connection. +/// +/// ```rust +/// # extern crate tokio; +/// # extern crate futures; +/// # use futures::{Future, Stream}; +/// use tokio::net::TcpListener; +/// +/// # fn process(_: T) -> Box + Send> { +/// # unimplemented!(); +/// # } +/// # fn dox() { +/// # let addr = "127.0.0.1:8080".parse().unwrap(); +/// let listener = TcpListener::bind(&addr).unwrap(); +/// +/// let server = listener.incoming() +/// .map_err(|e| println!("error = {:?}", e)) +/// .for_each(|socket| { +/// tokio::spawn(process(socket)) +/// }); +/// +/// tokio::run(server); +/// # } +/// # pub fn main() {} +/// ``` +/// +/// [default executor]: struct.DefaultExecutor.html +/// +/// # Panics +/// +/// This function will panic if the default executor is not set or if spawning +/// onto the default executor returns an error. To avoid the panic, use +/// [`DefaultExecutor`]. +/// +/// [`DefaultExecutor`]: struct.DefaultExecutor.html +pub fn spawn(f: F) -> Spawn +where F: Future + 'static + Send +{ + ::tokio_executor::spawn(f); + Spawn(()) +} + +/// Like `spawn`, but compatible with futures 0.2 +#[cfg(feature = "unstable-futures")] +pub fn spawn2(f: F) -> Spawn + where F: futures2::Future + 'static + Send +{ + ::tokio_executor::spawn2(f); + Spawn(()) +} + +impl IntoFuture for Spawn { + type Future = FutureResult<(), ()>; + type Item = (); + type Error = (); + + fn into_future(self) -> Self::Future { + future::ok(()) + } +} + +#[cfg(feature = "unstable-futures")] +impl futures2::IntoFuture for Spawn { + type Future = futures2::future::FutureResult<(), ()>; + type Item = (); + type Error = (); + + fn into_future(self) -> Self::Future { + futures2::future::ok(()) + } +} diff --git a/third_party/rust/tokio/src/fs.rs b/third_party/rust/tokio/src/fs.rs new file mode 100644 index 000000000000..f3d5eb243d5b --- /dev/null +++ b/third_party/rust/tokio/src/fs.rs @@ -0,0 +1,13 @@ +//! Asynchronous filesystem manipulation operations. +//! +//! This module contains basic methods and types for manipulating the contents +//! of the local filesystem from within the context of the Tokio runtime. +//! +//! Unlike *most* other Tokio APIs, the filesystem APIs **must** be used from +//! the context of the Tokio runtime as they require Tokio specific features to +//! function. + +pub use tokio_fs::{ + file, + File, +}; diff --git a/third_party/rust/tokio/src/lib.rs b/third_party/rust/tokio/src/lib.rs new file mode 100644 index 000000000000..7ff0b2fd1588 --- /dev/null +++ b/third_party/rust/tokio/src/lib.rs @@ -0,0 +1,235 @@ +//! A runtime for writing reliable, asynchronous, and slim applications. +//! +//! Tokio is an event-driven, non-blocking I/O platform for writing asynchronous +//! applications with the Rust programming language. At a high level, it +//! provides a few major components: +//! +//! * A multi threaded, work-stealing based task [scheduler][runtime]. +//! * A [reactor][reactor] backed by the operating system's event queue (epoll, kqueue, +//! IOCP, etc...). +//! * Asynchronous [TCP and UDP][net] sockets. +//! * Asynchronous [filesystem][fs] operations. +//! * [Timer][timer] API for scheduling work in the future. +//! +//! Tokio is built using [futures] as the abstraction for managing the +//! complexity of asynchronous programming. +//! +//! Guide level documentation is found on the [website]. +//! +//! [website]: https://tokio.rs/docs/getting-started/hello-world/ +//! [futures]: http://docs.rs/futures +//! +//! # Examples +//! +//! A simple TCP echo server: +//! +//! ```no_run +//! extern crate tokio; +//! +//! use tokio::prelude::*; +//! use tokio::io::copy; +//! use tokio::net::TcpListener; +//! +//! fn main() { +//! // Bind the server's socket. +//! let addr = "127.0.0.1:12345".parse().unwrap(); +//! let listener = TcpListener::bind(&addr) +//! .expect("unable to bind TCP listener"); +//! +//! // Pull out a stream of sockets for incoming connections +//! let server = listener.incoming() +//! .map_err(|e| eprintln!("accept failed = {:?}", e)) +//! .for_each(|sock| { +//! // Split up the reading and writing parts of the +//! // socket. +//! let (reader, writer) = sock.split(); +//! +//! // A future that echos the data and returns how +//! // many bytes were copied... +//! let bytes_copied = copy(reader, writer); +//! +//! // ... after which we'll print what happened. +//! let handle_conn = bytes_copied.map(|amt| { +//! println!("wrote {:?} bytes", amt) +//! }).map_err(|err| { +//! eprintln!("IO error {:?}", err) +//! }); +//! +//! // Spawn the future as a concurrent task. +//! tokio::spawn(handle_conn) +//! }); +//! +//! // Start the Tokio runtime +//! tokio::run(server); +//! } +//! ``` + +#![doc(html_root_url = "https://docs.rs/tokio/0.1.5")] +#![deny(missing_docs, warnings, missing_debug_implementations)] + +#[macro_use] +extern crate futures; +extern crate mio; +extern crate tokio_io; +extern crate tokio_executor; +extern crate tokio_fs; +extern crate tokio_reactor; +extern crate tokio_threadpool; +extern crate tokio_timer; +extern crate tokio_tcp; +extern crate tokio_udp; + +#[cfg(feature = "unstable-futures")] +extern crate futures2; + +pub mod clock; +pub mod executor; +pub mod fs; +pub mod net; +pub mod reactor; +pub mod runtime; +pub mod timer; +pub mod util; + +pub use executor::spawn; +#[cfg(feature = "unstable-futures")] +pub use executor::spawn2; + +pub use runtime::run; + +pub mod io { + //! Asynchronous I/O. + //! + //! This module is the asynchronous version of `std::io`. Primarily, it + //! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which extend the + //! `Read` and `Write` traits of the standard library. + //! + //! # AsyncRead and AsyncWrite + //! + //! [`AsyncRead`] and [`AsyncWrite`] must only be implemented for + //! non-blocking I/O types that integrate with the futures type system. In + //! other words, these types must never block the thread, and instead the + //! current task is notified when the I/O resource is ready. + //! + //! # Standard input and output + //! + //! Tokio provides asynchronous APIs to standard [input], [output], and [error]. + //! These APIs are very similar to the ones provided by `std`, but they also + //! implement [`AsyncRead`] and [`AsyncWrite`]. + //! + //! Unlike *most* other Tokio APIs, the standard input / output APIs + //! **must** be used from the context of the Tokio runtime as they require + //! Tokio specific features to function. + //! + //! [input]: fn.stdin.html + //! [output]: fn.stdout.html + //! [error]: fn.stderr.html + //! + //! # Utility functions + //! + //! Utilities functions are provided for working with [`AsyncRead`] / + //! [`AsyncWrite`] types. For example, [`copy`] asynchronously copies all + //! data from a source to a destination. + //! + //! # `std` re-exports + //! + //! Additionally, [`Read`], [`Write`], [`Error`], [`ErrorKind`], and + //! [`Result`] are re-exported from `std::io` for ease of use. + //! + //! [`AsyncRead`]: trait.AsyncRead.html + //! [`AsyncWrite`]: trait.AsyncWrite.html + //! [`copy`]: fn.copy.html + //! [`Read`]: trait.Read.html + //! [`Write`]: trait.Write.html + //! [`Error`]: struct.Error.html + //! [`ErrorKind`]: enum.ErrorKind.html + //! [`Result`]: type.Result.html + + pub use tokio_io::{ + AsyncRead, + AsyncWrite, + }; + + // standard input, output, and error + pub use tokio_fs::{ + stdin, + Stdin, + stdout, + Stdout, + stderr, + Stderr, + }; + + // Utils + pub use tokio_io::io::{ + copy, + Copy, + flush, + Flush, + lines, + Lines, + read_exact, + ReadExact, + read_to_end, + ReadToEnd, + read_until, + ReadUntil, + ReadHalf, + shutdown, + Shutdown, + write_all, + WriteAll, + WriteHalf, + }; + + // Re-export io::Error so that users don't have to deal + // with conflicts when `use`ing `futures::io` and `std::io`. + pub use ::std::io::{ + Error, + ErrorKind, + Result, + Read, + Write, + }; +} + +pub mod prelude { + //! A "prelude" for users of the `tokio` crate. + //! + //! This prelude is similar to the standard library's prelude in that you'll + //! almost always want to import its entire contents, but unlike the standard + //! library's prelude you'll have to do so manually: + //! + //! ``` + //! use tokio::prelude::*; + //! ``` + //! + //! The prelude may grow over time as additional items see ubiquitous use. + + pub use tokio_io::{ + AsyncRead, + AsyncWrite, + }; + + pub use util::{ + FutureExt, + }; + + pub use ::std::io::{ + Read, + Write, + }; + + pub use futures::{ + Future, + future, + Stream, + stream, + Sink, + IntoFuture, + Async, + AsyncSink, + Poll, + task, + }; +} diff --git a/third_party/rust/tokio/src/net.rs b/third_party/rust/tokio/src/net.rs new file mode 100644 index 000000000000..3336acd77410 --- /dev/null +++ b/third_party/rust/tokio/src/net.rs @@ -0,0 +1,41 @@ +//! TCP/UDP bindings for `tokio`. +//! +//! This module contains the TCP/UDP networking types, similar to the standard +//! library, which can be used to implement networking protocols. +//! +//! # TCP +//! +//! Connecting to an address, via TCP, can be done using [`TcpStream`]'s +//! [`connect`] method, which returns [`ConnectFuture`]. `ConnectFuture` +//! implements a future which returns a `TcpStream`. +//! +//! To listen on an address [`TcpListener`] can be used. `TcpListener`'s +//! [`incoming`][incoming_method] method can be used to accept new connections. +//! It return the [`Incoming`] struct, which implements a stream which returns +//! `TcpStream`s. +//! +//! [`TcpStream`]: struct.TcpStream.html +//! [`connect`]: struct.TcpStream.html#method.connect +//! [`ConnectFuture`]: struct.ConnectFuture.html +//! [`TcpListener`]: struct.TcpListener.html +//! [incoming_method]: struct.TcpListener.html#method.incoming +//! [`Incoming`]: struct.Incoming.html +//! +//! # UDP +//! +//! The main struct for UDP is the [`UdpSocket`], which represents a UDP socket. +//! Reading and writing to it can be done using futures, which return the +//! [`RecvDgram`] and [`SendDgram`] structs respectively. +//! +//! For convenience it's also possible to convert raw datagrams into higher-level +//! frames. +//! +//! [`UdpSocket`]: struct.UdpSocket.html +//! [`RecvDgram`]: struct.RecvDgram.html +//! [`SendDgram`]: struct.SendDgram.html +//! [`UdpFramed`]: struct.UdpFramed.html +//! [`framed`]: struct.UdpSocket.html#method.framed + +pub use tokio_tcp::{TcpStream, ConnectFuture}; +pub use tokio_tcp::{TcpListener, Incoming}; +pub use tokio_udp::{UdpSocket, UdpFramed, SendDgram, RecvDgram}; diff --git a/third_party/rust/tokio/src/reactor/mod.rs b/third_party/rust/tokio/src/reactor/mod.rs new file mode 100644 index 000000000000..3f7603ee5e26 --- /dev/null +++ b/third_party/rust/tokio/src/reactor/mod.rs @@ -0,0 +1,149 @@ +//! Event loop that drives Tokio I/O resources. +//! +//! This module contains [`Reactor`], which is the event loop that drives all +//! Tokio I/O resources. It is the reactor's job to receive events from the +//! operating system ([epoll], [kqueue], [IOCP], etc...) and forward them to +//! waiting tasks. It is the bridge between operating system and the futures +//! model. +//! +//! # Overview +//! +//! When using Tokio, all operations are asynchronous and represented by +//! futures. These futures, representing the application logic, are scheduled by +//! an executor (see [runtime model] for more details). Executors wait for +//! notifications before scheduling the future for execution time, i.e., nothing +//! happens until an event is received indicating that the task can make +//! progress. +//! +//! The reactor receives events from the operating system and notifies the +//! executor. +//! +//! Let's start with a basic example, establishing a TCP connection. +//! +//! ```rust +//! # extern crate tokio; +//! # fn dox() { +//! use tokio::prelude::*; +//! use tokio::net::TcpStream; +//! +//! let addr = "93.184.216.34:9243".parse().unwrap(); +//! +//! let connect_future = TcpStream::connect(&addr); +//! +//! let task = connect_future +//! .and_then(|socket| { +//! println!("successfully connected"); +//! Ok(()) +//! }) +//! .map_err(|e| println!("failed to connect; err={:?}", e)); +//! +//! tokio::run(task); +//! # } +//! # fn main() {} +//! ``` +//! +//! Establishing a TCP connection usually cannot be completed immediately. +//! [`TcpStream::connect`] does not block the current thread. Instead, it +//! returns a [future][connect-future] that resolves once the TCP connection has +//! been established. The connect future itself has no way of knowing when the +//! TCP connection has been established. +//! +//! Before returning the future, [`TcpStream::connect`] registers the socket +//! with a reactor. This registration process, handled by [`Registration`], is +//! what links the [`TcpStream`] with the [`Reactor`] instance. At this point, +//! the reactor starts listening for connection events from the operating system +//! for that socket. +//! +//! Once the connect future is passed to [`tokio::run`], it is spawned onto a +//! thread pool. The thread pool waits until it is notified that the connection +//! has completed. +//! +//! When the TCP connection is established, the reactor receives an event from +//! the operating system. It then notifies the thread pool, telling it that the +//! connect future can complete. At this point, the thread pool will schedule +//! the task to run on one of its worker threads. This results in the `and_then` +//! closure to get executed. +//! +//! ## Lazy registration +//! +//! Notice how the snippet above does not explicitly reference a reactor. When +//! [`TcpStream::connect`] is called, it registers the socket with a reactor, +//! but no reactor is specified. This works because the registration process +//! mentioned above is actually lazy. It doesn't *actually* happen in the +//! [`connect`] function. Instead, the registration is established the first +//! time that the task is polled (again, see [runtime model]). +//! +//! A reactor instance is automatically made available when using the Tokio +//! [runtime], which is done using [`tokio::run`]. The Tokio runtime's executor +//! sets a thread-local variable referencing the associated [`Reactor`] instance +//! and [`Handle::current`] (used by [`Registration`]) returns the reference. +//! +//! ## Implementation +//! +//! The reactor implementation uses [`mio`] to interface with the operating +//! system's event queue. A call to [`Reactor::poll`] results in in a single +//! call to [`Poll::poll`] which in turn results in a single call to the +//! operating system's selector. +//! +//! The reactor maintains state for each registered I/O resource. This tracks +//! the executor task to notify when events are provided by the operating +//! system's selector. This state is stored in a `Sync` data structure and +//! referenced by [`Registration`]. When the [`Registration`] instance is +//! dropped, this state is cleaned up. Because the state is stored in a `Sync` +//! data structure, the [`Registration`] instance is able to be moved to other +//! threads. +//! +//! By default, a runtime's default reactor runs on a background thread. This +//! ensures that application code cannot significantly impact the reactor's +//! responsiveness. +//! +//! ## Integrating with the reactor +//! +//! Tokio comes with a number of I/O resources, like TCP and UDP sockets, that +//! automatically integrate with the reactor. However, library authors or +//! applications may wish to implement their own resources that are also backed +//! by the reactor. +//! +//! There are a couple of ways to do this. +//! +//! If the custom I/O resource implements [`mio::Evented`] and implements +//! [`std::Read`] and / or [`std::Write`], then [`PollEvented`] is the most +//! suited. +//! +//! Otherwise, [`Registration`] can be used directly. This provides the lowest +//! level primitive needed for integrating with the reactor: a stream of +//! readiness events. +//! +//! [`Reactor`]: struct.Reactor.html +//! [`Registration`]: struct.Registration.html +//! [runtime model]: https://tokio.rs/docs/getting-started/runtime-model/ +//! [epoll]: http://man7.org/linux/man-pages/man7/epoll.7.html +//! [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 +//! [IOCP]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx +//! [`TcpStream::connect`]: ../net/struct.TcpStream.html#method.connect +//! [`connect`]: ../net/struct.TcpStream.html#method.connect +//! [connect-future]: ../net/struct.ConnectFuture.html +//! [`tokio::run`]: ../runtime/fn.run.html +//! [`TcpStream`]: ../net/struct.TcpStream.html +//! [runtime]: ../runtime +//! [`Handle::current`]: struct.Handle.html#method.current +//! [`mio`]: https://github.com/carllerche/mio +//! [`Reactor::poll`]: struct.Reactor.html#method.poll +//! [`Poll::poll`]: https://docs.rs/mio/0.6/mio/struct.Poll.html#method.poll +//! [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html +//! [`PollEvented`]: struct.PollEvented.html +//! [`std::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +//! [`std::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html + +pub use tokio_reactor::{ + Reactor, + Handle, + Background, + Turn, + Registration, + PollEvented as PollEvented2, +}; + +mod poll_evented; +#[allow(deprecated)] +pub use self::poll_evented::PollEvented; diff --git a/third_party/rust/tokio/src/reactor/poll_evented.rs b/third_party/rust/tokio/src/reactor/poll_evented.rs new file mode 100644 index 000000000000..d5f6750b6ba3 --- /dev/null +++ b/third_party/rust/tokio/src/reactor/poll_evented.rs @@ -0,0 +1,539 @@ +//! Readiness tracking streams, backing I/O objects. +//! +//! This module contains the core type which is used to back all I/O on object +//! in `tokio-core`. The `PollEvented` type is the implementation detail of +//! all I/O. Each `PollEvented` manages registration with a reactor, +//! acquisition of a token, and tracking of the readiness state on the +//! underlying I/O primitive. + +#![allow(deprecated, warnings)] + +use std::fmt; +use std::io::{self, Read, Write}; +use std::sync::Mutex; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; + +use futures::{task, Async, Poll}; +use mio::event::Evented; +use mio::Ready; +use tokio_io::{AsyncRead, AsyncWrite}; + +use reactor::{Handle, Registration}; + +#[deprecated(since = "0.1.2", note = "PollEvented2 instead")] +#[doc(hidden)] +pub struct PollEvented { + io: E, + inner: Inner, + handle: Handle, +} + +struct Inner { + registration: Mutex, + + /// Currently visible read readiness + read_readiness: AtomicUsize, + + /// Currently visible write readiness + write_readiness: AtomicUsize, +} + +impl fmt::Debug for PollEvented { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PollEvented") + .field("io", &self.io) + .finish() + } +} + +impl PollEvented { + /// Creates a new readiness stream associated with the provided + /// `loop_handle` and for the given `source`. + pub fn new(io: E, handle: &Handle) -> io::Result> + where E: Evented, + { + let registration = Registration::new(); + registration.register(&io)?; + + Ok(PollEvented { + io: io, + inner: Inner { + registration: Mutex::new(registration), + read_readiness: AtomicUsize::new(0), + write_readiness: AtomicUsize::new(0), + }, + handle: handle.clone(), + }) + } + + /// Tests to see if this source is ready to be read from or not. + /// + /// If this stream is not ready for a read then `Async::NotReady` will be + /// returned and the current task will be scheduled to receive a + /// notification when the stream is readable again. In other words, this + /// method is only safe to call from within the context of a future's task, + /// typically done in a `Future::poll` method. + /// + /// This is mostly equivalent to `self.poll_ready(Ready::readable())`. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_read(&mut self) -> Async<()> { + if self.poll_read2().is_ready() { + return ().into(); + } + + Async::NotReady + } + + fn poll_read2(&self) -> Async { + let r = self.inner.registration.lock().unwrap(); + + // Load the cached readiness + match self.inner.read_readiness.load(Relaxed) { + 0 => {} + mut n => { + // Check what's new with the reactor. + if let Some(ready) = r.take_read_ready().unwrap() { + n |= ready2usize(ready); + self.inner.read_readiness.store(n, Relaxed); + } + + return usize2ready(n).into(); + } + } + + let ready = match r.poll_read_ready().unwrap() { + Async::Ready(r) => r, + _ => return Async::NotReady, + }; + + // Cache the value + self.inner.read_readiness.store(ready2usize(ready), Relaxed); + + ready.into() + } + + /// Tests to see if this source is ready to be written to or not. + /// + /// If this stream is not ready for a write then `Async::NotReady` will be returned + /// and the current task will be scheduled to receive a notification when + /// the stream is writable again. In other words, this method is only safe + /// to call from within the context of a future's task, typically done in a + /// `Future::poll` method. + /// + /// This is mostly equivalent to `self.poll_ready(Ready::writable())`. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_write(&mut self) -> Async<()> { + let r = self.inner.registration.lock().unwrap(); + + match self.inner.write_readiness.load(Relaxed) { + 0 => {} + mut n => { + // Check what's new with the reactor. + if let Some(ready) = r.take_write_ready().unwrap() { + n |= ready2usize(ready); + self.inner.write_readiness.store(n, Relaxed); + } + + return ().into(); + } + } + + let ready = match r.poll_write_ready().unwrap() { + Async::Ready(r) => r, + _ => return Async::NotReady, + }; + + // Cache the value + self.inner.write_readiness.store(ready2usize(ready), Relaxed); + + ().into() + } + + /// Test to see whether this source fulfills any condition listed in `mask` + /// provided. + /// + /// The `mask` given here is a mio `Ready` set of possible events. This can + /// contain any events like read/write but also platform-specific events + /// such as hup and error. The `mask` indicates events that are interested + /// in being ready. + /// + /// If any event in `mask` is ready then it is returned through + /// `Async::Ready`. The `Ready` set returned is guaranteed to not be empty + /// and contains all events that are currently ready in the `mask` provided. + /// + /// If no events are ready in the `mask` provided then the current task is + /// scheduled to receive a notification when any of them become ready. If + /// the `writable` event is contained within `mask` then this + /// `PollEvented`'s `write` task will be blocked and otherwise the `read` + /// task will be blocked. This is generally only relevant if you're working + /// with this `PollEvented` object on multiple tasks. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn poll_ready(&mut self, mask: Ready) -> Async { + let mut ret = Ready::empty(); + + if mask.is_empty() { + return ret.into(); + } + + if mask.is_writable() { + if self.poll_write().is_ready() { + ret = Ready::writable(); + } + } + + let mask = mask - Ready::writable(); + + if !mask.is_empty() { + if let Async::Ready(v) = self.poll_read2() { + ret |= v & mask; + } + } + + if ret.is_empty() { + if mask.is_writable() { + let _ = self.need_write(); + } + + if mask.is_readable() { + let _ = self.need_read(); + } + + Async::NotReady + } else { + ret.into() + } + } + + /// Indicates to this source of events that the corresponding I/O object is + /// no longer readable, but it needs to be. + /// + /// This function, like `poll_read`, is only safe to call from the context + /// of a future's task (typically in a `Future::poll` implementation). It + /// informs this readiness stream that the underlying object is no longer + /// readable, typically because a "would block" error was seen. + /// + /// *All* readiness bits associated with this stream except the writable bit + /// will be reset when this method is called. The current task is then + /// scheduled to receive a notification whenever anything changes other than + /// the writable bit. Note that this typically just means the readable bit + /// is used here, but if you're using a custom I/O object for events like + /// hup/error this may also be relevant. + /// + /// Note that it is also only valid to call this method if `poll_read` + /// previously indicated that the object is readable. That is, this function + /// must always be paired with calls to `poll_read` previously. + /// + /// # Errors + /// + /// This function will return an error if the `Reactor` that this `PollEvented` + /// is associated with has gone away (been destroyed). The error means that + /// the ambient futures task could not be scheduled to receive a + /// notification and typically means that the error should be propagated + /// outwards. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn need_read(&mut self) -> io::Result<()> { + self.inner.read_readiness.store(0, Relaxed); + + if self.poll_read().is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Indicates to this source of events that the corresponding I/O object is + /// no longer writable, but it needs to be. + /// + /// This function, like `poll_write`, is only safe to call from the context + /// of a future's task (typically in a `Future::poll` implementation). It + /// informs this readiness stream that the underlying object is no longer + /// writable, typically because a "would block" error was seen. + /// + /// The flag indicating that this stream is writable is unset and the + /// current task is scheduled to receive a notification when the stream is + /// then again writable. + /// + /// Note that it is also only valid to call this method if `poll_write` + /// previously indicated that the object is writable. That is, this function + /// must always be paired with calls to `poll_write` previously. + /// + /// # Errors + /// + /// This function will return an error if the `Reactor` that this `PollEvented` + /// is associated with has gone away (been destroyed). The error means that + /// the ambient futures task could not be scheduled to receive a + /// notification and typically means that the error should be propagated + /// outwards. + /// + /// # Panics + /// + /// This function will panic if called outside the context of a future's + /// task. + pub fn need_write(&mut self) -> io::Result<()> { + self.inner.write_readiness.store(0, Relaxed); + + if self.poll_write().is_ready() { + // Notify the current task + task::current().notify(); + } + + Ok(()) + } + + /// Returns a reference to the event loop handle that this readiness stream + /// is associated with. + pub fn handle(&self) -> &Handle { + &self.handle + } + + /// Returns a shared reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_ref(&self) -> &E { + &self.io + } + + /// Returns a mutable reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_mut(&mut self) -> &mut E { + &mut self.io + } + + /// Consumes the `PollEvented` and returns the underlying I/O object + pub fn into_inner(self) -> E { + self.io + } + + /// Deregisters this source of events from the reactor core specified. + /// + /// This method can optionally be called to unregister the underlying I/O + /// object with the event loop that the `handle` provided points to. + /// Typically this method is not required as this automatically happens when + /// `E` is dropped, but for some use cases the `E` object doesn't represent + /// an owned reference, so dropping it won't automatically unregister with + /// the event loop. + /// + /// This consumes `self` as it will no longer provide events after the + /// method is called, and will likely return an error if this `PollEvented` + /// was created on a separate event loop from the `handle` specified. + pub fn deregister(&self) -> io::Result<()> + where E: Evented, + { + self.inner.registration.lock().unwrap() + .deregister(&self.io) + } +} + +impl Read for PollEvented { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if let Async::NotReady = self.poll_read() { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().read(buf); + + if is_wouldblock(&r) { + self.need_read()?; + } + + return r + } +} + +impl Write for PollEvented { + fn write(&mut self, buf: &[u8]) -> io::Result { + if let Async::NotReady = self.poll_write() { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().write(buf); + + if is_wouldblock(&r) { + self.need_write()?; + } + + return r + } + + fn flush(&mut self) -> io::Result<()> { + if let Async::NotReady = self.poll_write() { + return Err(io::ErrorKind::WouldBlock.into()) + } + + let r = self.get_mut().flush(); + + if is_wouldblock(&r) { + self.need_write()?; + } + + return r + } +} + +impl AsyncRead for PollEvented { +} + +impl AsyncWrite for PollEvented { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +fn is_wouldblock(r: &io::Result) -> bool { + match *r { + Ok(_) => false, + Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, + } +} + +const READ: usize = 1 << 0; +const WRITE: usize = 1 << 1; + +fn ready2usize(ready: Ready) -> usize { + let mut bits = 0; + if ready.is_readable() { + bits |= READ; + } + if ready.is_writable() { + bits |= WRITE; + } + bits | platform::ready2usize(ready) +} + +fn usize2ready(bits: usize) -> Ready { + let mut ready = Ready::empty(); + if bits & READ != 0 { + ready.insert(Ready::readable()); + } + if bits & WRITE != 0 { + ready.insert(Ready::writable()); + } + ready | platform::usize2ready(bits) +} + +#[cfg(unix)] +mod platform { + use mio::Ready; + use mio::unix::UnixReady; + + const HUP: usize = 1 << 2; + const ERROR: usize = 1 << 3; + const AIO: usize = 1 << 4; + const LIO: usize = 1 << 5; + + #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] + fn is_aio(ready: &Ready) -> bool { + UnixReady::from(*ready).is_aio() + } + + #[cfg(not(any(target_os = "dragonfly", target_os = "freebsd")))] + fn is_aio(_ready: &Ready) -> bool { + false + } + + #[cfg(target_os = "freebsd")] + fn is_lio(ready: &Ready) -> bool { + UnixReady::from(*ready).is_lio() + } + + #[cfg(not(target_os = "freebsd"))] + fn is_lio(_ready: &Ready) -> bool { + false + } + + pub fn ready2usize(ready: Ready) -> usize { + let ready = UnixReady::from(ready); + let mut bits = 0; + if is_aio(&ready) { + bits |= AIO; + } + if is_lio(&ready) { + bits |= LIO; + } + if ready.is_error() { + bits |= ERROR; + } + if ready.is_hup() { + bits |= HUP; + } + bits + } + + #[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", + target_os = "macos"))] + fn usize2ready_aio(ready: &mut UnixReady) { + ready.insert(UnixReady::aio()); + } + + #[cfg(not(any(target_os = "dragonfly", + target_os = "freebsd", target_os = "ios", target_os = "macos")))] + fn usize2ready_aio(_ready: &mut UnixReady) { + // aio not available here → empty + } + + #[cfg(target_os = "freebsd")] + fn usize2ready_lio(ready: &mut UnixReady) { + ready.insert(UnixReady::lio()); + } + + #[cfg(not(target_os = "freebsd"))] + fn usize2ready_lio(_ready: &mut UnixReady) { + // lio not available here → empty + } + + pub fn usize2ready(bits: usize) -> Ready { + let mut ready = UnixReady::from(Ready::empty()); + if bits & AIO != 0 { + usize2ready_aio(&mut ready); + } + if bits & LIO != 0 { + usize2ready_lio(&mut ready); + } + if bits & HUP != 0 { + ready.insert(UnixReady::hup()); + } + if bits & ERROR != 0 { + ready.insert(UnixReady::error()); + } + ready.into() + } +} + +#[cfg(windows)] +mod platform { + use mio::Ready; + + pub fn all() -> Ready { + // No platform-specific Readinesses for Windows + Ready::empty() + } + + pub fn hup() -> Ready { + Ready::empty() + } + + pub fn ready2usize(_r: Ready) -> usize { + 0 + } + + pub fn usize2ready(_r: usize) -> Ready { + Ready::empty() + } +} diff --git a/third_party/rust/tokio/src/runtime/builder.rs b/third_party/rust/tokio/src/runtime/builder.rs new file mode 100644 index 000000000000..c4e048b43529 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/builder.rs @@ -0,0 +1,148 @@ +use runtime::{Inner, Runtime}; + +use reactor::Reactor; + +use std::io; + +use tokio_reactor; +use tokio_threadpool::Builder as ThreadPoolBuilder; +use tokio_threadpool::park::DefaultPark; +use tokio_timer::clock::{self, Clock}; +use tokio_timer::timer::{self, Timer}; + +/// Builds Tokio Runtime with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. The +/// Runtime is constructed by calling [`build`]. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various configuration +/// settings. +/// +/// [`build`]: #method.build +/// [`Builder::new`]: #method.new +/// +/// # Examples +/// +/// ``` +/// # extern crate tokio; +/// # extern crate tokio_threadpool; +/// # use tokio::runtime::Builder; +/// +/// # pub fn main() { +/// // create and configure ThreadPool +/// let mut threadpool_builder = tokio_threadpool::Builder::new(); +/// threadpool_builder +/// .name_prefix("my-runtime-worker-") +/// .pool_size(4); +/// +/// // build Runtime +/// let runtime = Builder::new() +/// .threadpool_builder(threadpool_builder) +/// .build(); +/// // ... call runtime.run(...) +/// # let _ = runtime; +/// # } +/// ``` +#[derive(Debug)] +pub struct Builder { + /// Thread pool specific builder + threadpool_builder: ThreadPoolBuilder, + + /// The clock to use + clock: Clock, +} + +impl Builder { + /// Returns a new runtime builder initialized with default configuration + /// values. + /// + /// Configuration methods can be chained on the return value. + pub fn new() -> Builder { + let mut threadpool_builder = ThreadPoolBuilder::new(); + threadpool_builder.name_prefix("tokio-runtime-worker-"); + + Builder { + threadpool_builder, + clock: Clock::new(), + } + } + + /// Set the `Clock` instance that will be used by the runtime. + pub fn clock(&mut self, clock: Clock) -> &mut Self { + self.clock = clock; + self + } + + /// Set builder to set up the thread pool instance. + pub fn threadpool_builder(&mut self, val: ThreadPoolBuilder) -> &mut Self { + self.threadpool_builder = val; + self + } + + /// Create the configured `Runtime`. + /// + /// The returned `ThreadPool` instance is ready to spawn tasks. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio; + /// # use tokio::runtime::Builder; + /// # pub fn main() { + /// let runtime = Builder::new().build().unwrap(); + /// // ... call runtime.run(...) + /// # let _ = runtime; + /// # } + /// ``` + pub fn build(&mut self) -> io::Result { + use std::collections::HashMap; + use std::sync::{Arc, Mutex}; + + // Get a handle to the clock for the runtime. + let clock1 = self.clock.clone(); + let clock2 = clock1.clone(); + + let timers = Arc::new(Mutex::new(HashMap::<_, timer::Handle>::new())); + let t1 = timers.clone(); + + // Spawn a reactor on a background thread. + let reactor = Reactor::new()?.background()?; + + // Get a handle to the reactor. + let reactor_handle = reactor.handle().clone(); + + let pool = self.threadpool_builder + .around_worker(move |w, enter| { + let timer_handle = t1.lock().unwrap() + .get(w.id()).unwrap() + .clone(); + + tokio_reactor::with_default(&reactor_handle, enter, |enter| { + clock::with_default(&clock1, enter, |enter| { + timer::with_default(&timer_handle, enter, |_| { + w.run(); + }); + }) + }); + }) + .custom_park(move |worker_id| { + // Create a new timer + let timer = Timer::new_with_now(DefaultPark::new(), clock2.clone()); + + timers.lock().unwrap() + .insert(worker_id.clone(), timer.handle()); + + timer + }) + .build(); + + Ok(Runtime { + inner: Some(Inner { + reactor, + pool, + }), + }) + } +} diff --git a/third_party/rust/tokio/src/runtime/current_thread/builder.rs b/third_party/rust/tokio/src/runtime/current_thread/builder.rs new file mode 100644 index 000000000000..72960fadf2ef --- /dev/null +++ b/third_party/rust/tokio/src/runtime/current_thread/builder.rs @@ -0,0 +1,88 @@ +use executor::current_thread::CurrentThread; +use runtime::current_thread::Runtime; + +use tokio_reactor::Reactor; +use tokio_timer::clock::Clock; +use tokio_timer::timer::Timer; + +use std::io; + +/// Builds a Single-threaded runtime with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. The +/// Runtime is constructed by calling [`build`]. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various configuration +/// settings. +/// +/// [`build`]: #method.build +/// [`Builder::new`]: #method.new +/// +/// # Examples +/// +/// ``` +/// extern crate tokio; +/// extern crate tokio_timer; +/// +/// use tokio::runtime::current_thread::Builder; +/// use tokio_timer::clock::Clock; +/// +/// # pub fn main() { +/// // build Runtime +/// let runtime = Builder::new() +/// .clock(Clock::new()) +/// .build(); +/// // ... call runtime.run(...) +/// # let _ = runtime; +/// # } +/// ``` +#[derive(Debug)] +pub struct Builder { + /// The clock to use + clock: Clock, +} + +impl Builder { + /// Returns a new runtime builder initialized with default configuration + /// values. + /// + /// Configuration methods can be chained on the return value. + pub fn new() -> Builder { + Builder { + clock: Clock::new(), + } + } + + /// Set the `Clock` instance that will be used by the runtime. + pub fn clock(&mut self, clock: Clock) -> &mut Self { + self.clock = clock; + self + } + + /// Create the configured `Runtime`. + pub fn build(&mut self) -> io::Result { + // We need a reactor to receive events about IO objects from kernel + let reactor = Reactor::new()?; + let reactor_handle = reactor.handle(); + + // Place a timer wheel on top of the reactor. If there are no timeouts to fire, it'll let the + // reactor pick up some new external events. + let timer = Timer::new_with_now(reactor, self.clock.clone()); + let timer_handle = timer.handle(); + + // And now put a single-threaded executor on top of the timer. When there are no futures ready + // to do something, it'll let the timer or the reactor to generate some new stimuli for the + // futures to continue in their life. + let executor = CurrentThread::new_with_park(timer); + + let runtime = Runtime::new2( + reactor_handle, + timer_handle, + self.clock.clone(), + executor); + + Ok(runtime) + } +} diff --git a/third_party/rust/tokio/src/runtime/current_thread/mod.rs b/third_party/rust/tokio/src/runtime/current_thread/mod.rs new file mode 100644 index 000000000000..30bd8f6dd78d --- /dev/null +++ b/third_party/rust/tokio/src/runtime/current_thread/mod.rs @@ -0,0 +1,70 @@ +//! A runtime implementation that runs everything on the current thread. +//! +//! [`current_thread::Runtime`][rt] is similar to the primary +//! [`Runtime`][concurrent-rt] except that it runs all components on the current +//! thread instead of using a thread pool. This means that it is able to spawn +//! futures that do not implement `Send`. +//! +//! Same as the default [`Runtime`][concurrent-rt], the +//! [`current_thread::Runtime`][rt] includes: +//! +//! * A [reactor] to drive I/O resources. +//! * An [executor] to execute tasks that use these I/O resources. +//! * A [timer] for scheduling work to run after a set period of time. +//! +//! Note that [`current_thread::Runtime`][rt] does not implement `Send` itself +//! and cannot be safely moved to other threads. +//! +//! # Spawning from other threads +//! +//! While [`current_thread::Runtime`][rt] does not implement `Send` and cannot +//! safely be moved to other threads, it provides a `Handle` that can be sent +//! to other threads and allows to spawn new tasks from there. +//! +//! For example: +//! +//! ``` +//! # extern crate tokio; +//! # extern crate futures; +//! use tokio::runtime::current_thread::Runtime; +//! use tokio::prelude::*; +//! use std::thread; +//! +//! # fn main() { +//! let mut runtime = Runtime::new().unwrap(); +//! let handle = runtime.handle(); +//! +//! thread::spawn(move || { +//! handle.spawn(future::ok(())); +//! }).join().unwrap(); +//! +//! # /* +//! runtime.run().unwrap(); +//! # */ +//! # } +//! ``` +//! +//! # Examples +//! +//! Creating a new `Runtime` and running a future `f` until its completion and +//! returning its result. +//! +//! ``` +//! use tokio::runtime::current_thread::Runtime; +//! use tokio::prelude::*; +//! +//! let mut runtime = Runtime::new().unwrap(); +//! +//! // Use the runtime... +//! // runtime.block_on(f); // where f is a future +//! ``` +//! +//! [rt]: struct.Runtime.html +//! [concurrent-rt]: ../struct.Runtime.html +//! [chan]: https://docs.rs/futures/0.1/futures/sync/mpsc/fn.channel.html + +mod builder; +mod runtime; + +pub use self::builder::Builder; +pub use self::runtime::{Runtime, Handle}; diff --git a/third_party/rust/tokio/src/runtime/current_thread/runtime.rs b/third_party/rust/tokio/src/runtime/current_thread/runtime.rs new file mode 100644 index 000000000000..8939f08cc44b --- /dev/null +++ b/third_party/rust/tokio/src/runtime/current_thread/runtime.rs @@ -0,0 +1,185 @@ +use executor::current_thread::{self, CurrentThread}; +use executor::current_thread::Handle as ExecutorHandle; +use runtime::current_thread::Builder; + +use tokio_reactor::{self, Reactor}; +use tokio_timer::clock::{self, Clock}; +use tokio_timer::timer::{self, Timer}; +use tokio_executor; + +use futures::Future; + +use std::io; + +/// Single-threaded runtime provides a way to start reactor +/// and executor on the current thread. +/// +/// See [module level][mod] documentation for more details. +/// +/// [mod]: index.html +#[derive(Debug)] +pub struct Runtime { + reactor_handle: tokio_reactor::Handle, + timer_handle: timer::Handle, + clock: Clock, + executor: CurrentThread>, +} + +/// Handle to spawn a future on the corresponding `CurrentThread` runtime instance +#[derive(Debug, Clone)] +pub struct Handle(ExecutorHandle); + +impl Handle { + /// Spawn a future onto the `CurrentThread` runtime instance corresponding to this handle + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the `CurrentThread` + /// instance of the `Handle` does not exist anymore. + pub fn spawn(&self, future: F) -> Result<(), tokio_executor::SpawnError> + where F: Future + Send + 'static { + self.0.spawn(future) + } +} + +/// Error returned by the `run` function. +#[derive(Debug)] +pub struct RunError { + inner: current_thread::RunError, +} + +impl Runtime { + /// Returns a new runtime initialized with default configuration values. + pub fn new() -> io::Result { + Builder::new().build() + } + + pub(super) fn new2( + reactor_handle: tokio_reactor::Handle, + timer_handle: timer::Handle, + clock: Clock, + executor: CurrentThread>) -> Runtime + { + Runtime { + reactor_handle, + timer_handle, + clock, + executor, + } + } + + /// Get a new handle to spawn futures on the single-threaded Tokio runtime + /// + /// Different to the runtime itself, the handle can be sent to different + /// threads. + pub fn handle(&self) -> Handle { + Handle(self.executor.handle().clone()) + } + + /// Spawn a future onto the single-threaded Tokio runtime. + /// + /// See [module level][mod] documentation for more details. + /// + /// [mod]: index.html + /// + /// # Examples + /// + /// ```rust + /// # extern crate tokio; + /// # extern crate futures; + /// # use futures::{future, Future, Stream}; + /// use tokio::runtime::current_thread::Runtime; + /// + /// # fn dox() { + /// // Create the runtime + /// let mut rt = Runtime::new().unwrap(); + /// + /// // Spawn a future onto the runtime + /// rt.spawn(future::lazy(|| { + /// println!("running on the runtime"); + /// Ok(()) + /// })); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + pub fn spawn(&mut self, future: F) -> &mut Self + where F: Future + 'static, + { + self.executor.spawn(future); + self + } + + /// Runs the provided future, blocking the current thread until the future + /// completes. + /// + /// This function can be used to synchronously block the current thread + /// until the provided `future` has resolved either successfully or with an + /// error. The result of the future is then returned from this function + /// call. + /// + /// Note that this function will **also** execute any spawned futures on the + /// current thread, but will **not** block until these other spawned futures + /// have completed. Once the function returns, any uncompleted futures + /// remain pending in the `Runtime` instance. These futures will not run + /// until `block_on` or `run` is called again. + /// + /// The caller is responsible for ensuring that other spawned futures + /// complete execution by calling `block_on` or `run`. + pub fn block_on(&mut self, f: F) -> Result + where F: Future + { + self.enter(|executor| { + // Run the provided future + let ret = executor.block_on(f); + ret.map_err(|e| e.into_inner().expect("unexpected execution error")) + }) + } + + /// Run the executor to completion, blocking the thread until **all** + /// spawned futures have completed. + pub fn run(&mut self) -> Result<(), RunError> { + self.enter(|executor| executor.run()) + .map_err(|e| RunError { + inner: e, + }) + } + + fn enter(&mut self, f: F) -> R + where F: FnOnce(&mut current_thread::Entered>) -> R + { + let Runtime { + ref reactor_handle, + ref timer_handle, + ref clock, + ref mut executor, + .. + } = *self; + + // Binds an executor to this thread + let mut enter = tokio_executor::enter().expect("Multiple executors at once"); + + // This will set the default handle and timer to use inside the closure + // and run the future. + tokio_reactor::with_default(&reactor_handle, &mut enter, |enter| { + clock::with_default(clock, enter, |enter| { + timer::with_default(&timer_handle, enter, |enter| { + // The TaskExecutor is a fake executor that looks into the + // current single-threaded executor when used. This is a trick, + // because we need two mutable references to the executor (one + // to run the provided future, another to install as the default + // one). We use the fake one here as the default one. + let mut default_executor = current_thread::TaskExecutor::current(); + tokio_executor::with_default(&mut default_executor, enter, |enter| { + let mut executor = executor.enter(enter); + f(&mut executor) + }) + }) + }) + }) + } +} diff --git a/third_party/rust/tokio/src/runtime/mod.rs b/third_party/rust/tokio/src/runtime/mod.rs new file mode 100644 index 000000000000..ed416fd19241 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/mod.rs @@ -0,0 +1,495 @@ +//! A batteries included runtime for applications using Tokio. +//! +//! Applications using Tokio require some runtime support in order to work: +//! +//! * A [reactor] to drive I/O resources. +//! * An [executor] to execute tasks that use these I/O resources. +//! * A [timer] for scheduling work to run after a set period of time. +//! +//! While it is possible to setup each component manually, this involves a bunch +//! of boilerplate. +//! +//! [`Runtime`] bundles all of these various runtime components into a single +//! handle that can be started and shutdown together, eliminating the necessary +//! boilerplate to run a Tokio application. +//! +//! Most applications wont need to use [`Runtime`] directly. Instead, they will +//! use the [`run`] function, which uses [`Runtime`] under the hood. +//! +//! Creating a [`Runtime`] does the following: +//! +//! * Spawn a background thread running a [`Reactor`] instance. +//! * Start a [`ThreadPool`] for executing futures. +//! * Run an instance of [`Timer`] **per** thread pool worker thread. +//! +//! The thread pool uses a work-stealing strategy and is configured to start a +//! worker thread for each CPU core available on the system. This tends to be +//! the ideal setup for Tokio applications. +//! +//! A timer per thread pool worker thread is used to minimize the amount of +//! synchronization that is required for working with the timer. +//! +//! # Usage +//! +//! Most applications will use the [`run`] function. This takes a future to +//! "seed" the application, blocking the thread until the runtime becomes +//! [idle]. +//! +//! ```rust +//! # extern crate tokio; +//! # extern crate futures; +//! # use futures::{Future, Stream}; +//! use tokio::net::TcpListener; +//! +//! # fn process(_: T) -> Box + Send> { +//! # unimplemented!(); +//! # } +//! # fn dox() { +//! # let addr = "127.0.0.1:8080".parse().unwrap(); +//! let listener = TcpListener::bind(&addr).unwrap(); +//! +//! let server = listener.incoming() +//! .map_err(|e| println!("error = {:?}", e)) +//! .for_each(|socket| { +//! tokio::spawn(process(socket)) +//! }); +//! +//! tokio::run(server); +//! # } +//! # pub fn main() {} +//! ``` +//! +//! In this function, the `run` function blocks until the runtime becomes idle. +//! See [`shutdown_on_idle`][idle] for more shutdown details. +//! +//! From within the context of the runtime, additional tasks are spawned using +//! the [`tokio::spawn`] function. Futures spawned using this function will be +//! executed on the same thread pool used by the [`Runtime`]. +//! +//! A [`Runtime`] instance can also be used directly. +//! +//! ```rust +//! # extern crate tokio; +//! # extern crate futures; +//! # use futures::{Future, Stream}; +//! use tokio::runtime::Runtime; +//! use tokio::net::TcpListener; +//! +//! # fn process(_: T) -> Box + Send> { +//! # unimplemented!(); +//! # } +//! # fn dox() { +//! # let addr = "127.0.0.1:8080".parse().unwrap(); +//! let listener = TcpListener::bind(&addr).unwrap(); +//! +//! let server = listener.incoming() +//! .map_err(|e| println!("error = {:?}", e)) +//! .for_each(|socket| { +//! tokio::spawn(process(socket)) +//! }); +//! +//! // Create the runtime +//! let mut rt = Runtime::new().unwrap(); +//! +//! // Spawn the server task +//! rt.spawn(server); +//! +//! // Wait until the runtime becomes idle and shut it down. +//! rt.shutdown_on_idle() +//! .wait().unwrap(); +//! # } +//! # pub fn main() {} +//! ``` +//! +//! [reactor]: ../reactor/struct.Reactor.html +//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors +//! [timer]: ../timer/index.html +//! [`Runtime`]: struct.Runtime.html +//! [`Reactor`]: ../reactor/struct.Reactor.html +//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html +//! [`run`]: fn.run.html +//! [idle]: struct.Runtime.html#method.shutdown_on_idle +//! [`tokio::spawn`]: ../executor/fn.spawn.html +//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html + +mod builder; +pub mod current_thread; +mod shutdown; +mod task_executor; + +pub use self::builder::Builder; +pub use self::shutdown::Shutdown; +pub use self::task_executor::TaskExecutor; + +use reactor::{Background, Handle}; + +use std::io; + +use tokio_threadpool as threadpool; + +use futures; +use futures::future::Future; +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Handle to the Tokio runtime. +/// +/// The Tokio runtime includes a reactor as well as an executor for running +/// tasks. +/// +/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However, +/// most users will use [`tokio::run`], which uses a `Runtime` internally. +/// +/// See [module level][mod] documentation for more details. +/// +/// [mod]: index.html +/// [`new`]: #method.new +/// [`Builder`]: struct.Builder.html +/// [`tokio::run`]: fn.run.html +#[derive(Debug)] +pub struct Runtime { + inner: Option, +} + +#[derive(Debug)] +struct Inner { + /// Reactor running on a background thread. + reactor: Background, + + /// Task execution pool. + pool: threadpool::ThreadPool, +} + +// ===== impl Runtime ===== + +/// Start the Tokio runtime using the supplied future to bootstrap execution. +/// +/// This function is used to bootstrap the execution of a Tokio application. It +/// does the following: +/// +/// * Start the Tokio runtime using a default configuration. +/// * Spawn the given future onto the thread pool. +/// * Block the current thread until the runtime shuts down. +/// +/// Note that the function will not return immediately once `future` has +/// completed. Instead it waits for the entire runtime to become idle. +/// +/// See the [module level][mod] documentation for more details. +/// +/// # Examples +/// +/// ```rust +/// # extern crate tokio; +/// # extern crate futures; +/// # use futures::{Future, Stream}; +/// use tokio::net::TcpListener; +/// +/// # fn process(_: T) -> Box + Send> { +/// # unimplemented!(); +/// # } +/// # fn dox() { +/// # let addr = "127.0.0.1:8080".parse().unwrap(); +/// let listener = TcpListener::bind(&addr).unwrap(); +/// +/// let server = listener.incoming() +/// .map_err(|e| println!("error = {:?}", e)) +/// .for_each(|socket| { +/// tokio::spawn(process(socket)) +/// }); +/// +/// tokio::run(server); +/// # } +/// # pub fn main() {} +/// ``` +/// +/// # Panics +/// +/// This function panics if called from the context of an executor. +/// +/// [mod]: ../index.html +pub fn run(future: F) +where F: Future + Send + 'static, +{ + let mut runtime = Runtime::new().unwrap(); + runtime.spawn(future); + runtime.shutdown_on_idle().wait().unwrap(); +} + +/// Start the Tokio runtime using the supplied future to bootstrap execution. +/// +/// Identical to `run` but works with futures 0.2-style futures. +#[cfg(feature = "unstable-futures")] +pub fn run2(future: F) + where F: futures2::Future + Send + 'static, +{ + let mut runtime = Runtime::new().unwrap(); + runtime.spawn2(future); + runtime.shutdown_on_idle().wait().unwrap(); +} + +impl Runtime { + /// Create a new runtime instance with default configuration values. + /// + /// This results in a reactor, thread pool, and timer being initialized. The + /// thread pool will not spawn any worker threads until it needs to, i.e. + /// tasks are scheduled to run. + /// + /// Most users will not need to call this function directly, instead they + /// will use [`tokio::run`](fn.run.html). + /// + /// See [module level][mod] documentation for more details. + /// + /// # Examples + /// + /// Creating a new `Runtime` with default configuration values. + /// + /// ``` + /// use tokio::runtime::Runtime; + /// use tokio::prelude::*; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// // Use the runtime... + /// + /// // Shutdown the runtime + /// rt.shutdown_now() + /// .wait().unwrap(); + /// ``` + /// + /// [mod]: index.html + pub fn new() -> io::Result { + Builder::new().build() + } + + #[deprecated(since = "0.1.5", note = "use `reactor` instead")] + #[doc(hidden)] + pub fn handle(&self) -> &Handle { + self.reactor() + } + + /// Return a reference to the reactor handle for this runtime instance. + /// + /// The returned handle reference can be cloned in order to get an owned + /// value of the handle. This handle can be used to initialize I/O resources + /// (like TCP or UDP sockets) that will not be used on the runtime. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// let reactor_handle = rt.reactor().clone(); + /// + /// // use `reactor_handle` + /// ``` + pub fn reactor(&self) -> &Handle { + self.inner().reactor.handle() + } + + /// Return a handle to the runtime's executor. + /// + /// The returned handle can be used to spawn tasks that run on this runtime. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// let executor_handle = rt.executor(); + /// + /// // use `executor_handle` + /// ``` + pub fn executor(&self) -> TaskExecutor { + let inner = self.inner().pool.sender().clone(); + TaskExecutor { inner } + } + + /// Spawn a future onto the Tokio runtime. + /// + /// This spawns the given future onto the runtime's executor, usually a + /// thread pool. The thread pool is then responsible for polling the future + /// until it completes. + /// + /// See [module level][mod] documentation for more details. + /// + /// [mod]: index.html + /// + /// # Examples + /// + /// ```rust + /// # extern crate tokio; + /// # extern crate futures; + /// # use futures::{future, Future, Stream}; + /// use tokio::runtime::Runtime; + /// + /// # fn dox() { + /// // Create the runtime + /// let mut rt = Runtime::new().unwrap(); + /// + /// // Spawn a future onto the runtime + /// rt.spawn(future::lazy(|| { + /// println!("now running on a worker thread"); + /// Ok(()) + /// })); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + pub fn spawn(&mut self, future: F) -> &mut Self + where F: Future + Send + 'static, + { + self.inner_mut().pool.sender().spawn(future).unwrap(); + self + } + + /// Spawn a futures 0.2-style future onto the Tokio runtime. + /// + /// Otherwise identical to `spawn` + #[cfg(feature = "unstable-futures")] + pub fn spawn2(&mut self, future: F) -> &mut Self + where F: futures2::Future + Send + 'static, + { + futures2::executor::Executor::spawn( + self.inner_mut().pool.sender_mut(), Box::new(future) + ).unwrap(); + self + } + + /// Run a future to completion on the Tokio runtime. + /// + /// This runs the given future on the runtime, blocking until it is + /// complete, and yielding its resolved result. Any tasks or timers which + /// the future spawns internally will be executed on the runtime. + /// + /// This method should not be called from an asynchrounous context. + /// + /// # Panics + /// + /// This function panics if the executor is at capacity, if the provided + /// future panics, or if called within an asynchronous execution context. + pub fn block_on(&mut self, future: F) -> Result + where + F: Send + 'static + Future, + R: Send + 'static, + E: Send + 'static, + { + let (tx, rx) = futures::sync::oneshot::channel(); + self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!()))); + rx.wait().unwrap() + } + + /// Signals the runtime to shutdown once it becomes idle. + /// + /// Returns a future that completes once the shutdown operation has + /// completed. + /// + /// This function can be used to perform a graceful shutdown of the runtime. + /// + /// The runtime enters an idle state once **all** of the following occur. + /// + /// * The thread pool has no tasks to execute, i.e., all tasks that were + /// spawned have completed. + /// * The reactor is not managing any I/O resources. + /// + /// See [module level][mod] documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// use tokio::prelude::*; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// // Use the runtime... + /// + /// // Shutdown the runtime + /// rt.shutdown_on_idle() + /// .wait().unwrap(); + /// ``` + /// + /// [mod]: index.html + pub fn shutdown_on_idle(mut self) -> Shutdown { + let inner = self.inner.take().unwrap(); + + let inner = Box::new({ + let pool = inner.pool; + let reactor = inner.reactor; + + pool.shutdown_on_idle().and_then(|_| { + reactor.shutdown_on_idle() + }) + }); + + Shutdown { inner } + } + + /// Signals the runtime to shutdown immediately. + /// + /// Returns a future that completes once the shutdown operation has + /// completed. + /// + /// This function will forcibly shutdown the runtime, causing any + /// in-progress work to become canceled. The shutdown steps are: + /// + /// * Drain any scheduled work queues. + /// * Drop any futures that have not yet completed. + /// * Drop the reactor. + /// + /// Once the reactor has dropped, any outstanding I/O resources bound to + /// that reactor will no longer function. Calling any method on them will + /// result in an error. + /// + /// See [module level][mod] documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// use tokio::prelude::*; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// // Use the runtime... + /// + /// // Shutdown the runtime + /// rt.shutdown_now() + /// .wait().unwrap(); + /// ``` + /// + /// [mod]: index.html + pub fn shutdown_now(mut self) -> Shutdown { + let inner = self.inner.take().unwrap(); + Shutdown::shutdown_now(inner) + } + + fn inner(&self) -> &Inner { + self.inner.as_ref().unwrap() + } + + fn inner_mut(&mut self) -> &mut Inner { + self.inner.as_mut().unwrap() + } +} + +impl Drop for Runtime { + fn drop(&mut self) { + if let Some(inner) = self.inner.take() { + let shutdown = Shutdown::shutdown_now(inner); + let _ = shutdown.wait(); + } + } +} diff --git a/third_party/rust/tokio/src/runtime/shutdown.rs b/third_party/rust/tokio/src/runtime/shutdown.rs new file mode 100644 index 000000000000..1aca557277a2 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/shutdown.rs @@ -0,0 +1,46 @@ +use runtime::Inner; + +use std::fmt; + +use futures::{Future, Poll}; + +/// A future that resolves when the Tokio `Runtime` is shut down. +pub struct Shutdown { + pub(super) inner: Box + Send>, +} + +impl Shutdown { + pub(super) fn shutdown_now(inner: Inner) -> Self { + let inner = Box::new({ + let pool = inner.pool; + let reactor = inner.reactor; + + pool.shutdown_now().and_then(|_| { + reactor.shutdown_now() + .then(|_| { + Ok(()) + }) + }) + }); + + Shutdown { inner } + } +} + +impl Future for Shutdown { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + try_ready!(self.inner.poll()); + Ok(().into()) + } +} + +impl fmt::Debug for Shutdown { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Shutdown") + .field("inner", &"Box>") + .finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/task_executor.rs b/third_party/rust/tokio/src/runtime/task_executor.rs new file mode 100644 index 000000000000..ed918be5f1bb --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task_executor.rs @@ -0,0 +1,98 @@ + +use tokio_threadpool::Sender; + +use futures::future::{self, Future}; +#[cfg(feature = "unstable-futures")] +use futures2; + +/// Executes futures on the runtime +/// +/// All futures spawned using this executor will be submitted to the associated +/// Runtime's executor. This executor is usually a thread pool. +/// +/// For more details, see the [module level](index.html) documentation. +#[derive(Debug, Clone)] +pub struct TaskExecutor { + pub(super) inner: Sender, +} + +impl TaskExecutor { + /// Spawn a future onto the Tokio runtime. + /// + /// This spawns the given future onto the runtime's executor, usually a + /// thread pool. The thread pool is then responsible for polling the future + /// until it completes. + /// + /// See [module level][mod] documentation for more details. + /// + /// [mod]: index.html + /// + /// # Examples + /// + /// ```rust + /// # extern crate tokio; + /// # extern crate futures; + /// # use futures::{future, Future, Stream}; + /// use tokio::runtime::Runtime; + /// + /// # fn dox() { + /// // Create the runtime + /// let mut rt = Runtime::new().unwrap(); + /// let executor = rt.executor(); + /// + /// // Spawn a future onto the runtime + /// executor.spawn(future::lazy(|| { + /// println!("now running on a worker thread"); + /// Ok(()) + /// })); + /// # } + /// # pub fn main() {} + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + pub fn spawn(&self, future: F) + where F: Future + Send + 'static, + { + self.inner.spawn(future).unwrap(); + } +} + +impl future::Executor for TaskExecutor +where T: Future + Send + 'static, +{ + fn execute(&self, future: T) -> Result<(), future::ExecuteError> { + self.inner.execute(future) + } +} + +impl ::executor::Executor for TaskExecutor { + fn spawn(&mut self, future: Box + Send>) + -> Result<(), ::executor::SpawnError> + { + self.inner.spawn(future) + } + + #[cfg(feature = "unstable-futures")] + fn spawn2(&mut self, future: Box + Send>) + -> Result<(), futures2::executor::SpawnError> + { + self.inner.spawn2(future) + } +} + +#[cfg(feature = "unstable-futures")] +type Task2 = Box + Send>; + +#[cfg(feature = "unstable-futures")] +impl futures2::executor::Executor for TaskExecutor { + fn spawn(&mut self, f: Task2) -> Result<(), futures2::executor::SpawnError> { + futures2::executor::Executor::spawn(&mut self.inner, f) + } + + fn status(&self) -> Result<(), futures2::executor::SpawnError> { + futures2::executor::Executor::status(&self.inner) + } +} diff --git a/third_party/rust/tokio/src/timer.rs b/third_party/rust/tokio/src/timer.rs new file mode 100644 index 000000000000..1147a5db60bb --- /dev/null +++ b/third_party/rust/tokio/src/timer.rs @@ -0,0 +1,86 @@ +//! Utilities for tracking time. +//! +//! This module provides a number of types for executing code after a set period +//! of time. +//! +//! * [`Delay`][Delay] is a future that does no work and completes at a specific `Instant` +//! in time. +//! +//! * [`Interval`][Interval] is a stream yielding a value at a fixed period. It +//! is initialized with a `Duration` and repeatedly yields each time the +//! duration elapses. +//! +//! * [`Deadline`][Deadline] wraps a future, requiring that it completes before +//! a specified `Instant` in time. If the future does not complete in time, +//! then it is canceled and an error is returned. +//! +//! These types are sufficient for handling a large number of scenarios +//! involving time. +//! +//! These types must be used from within the context of the +//! [`Runtime`][runtime] or a timer context must be setup explicitly. See the +//! [`tokio-timer`][tokio-timer] crate for more details on how to setup a timer +//! context. +//! +//! # Examples +//! +//! Wait 100ms and print "Hello World!" +//! +//! ``` +//! use tokio::prelude::*; +//! use tokio::timer::Delay; +//! +//! use std::time::{Duration, Instant}; +//! +//! let when = Instant::now() + Duration::from_millis(100); +//! +//! tokio::run({ +//! Delay::new(when) +//! .map_err(|e| panic!("timer failed; err={:?}", e)) +//! .and_then(|_| { +//! println!("Hello world!"); +//! Ok(()) +//! }) +//! }) +//! ``` +//! +//! Require that an operation takes no more than 300ms. Note that this uses the +//! [`deadline`][ext] function on the [`FutureExt`][ext] trait. This trait is +//! included in the prelude. +//! +//! ``` +//! # extern crate futures; +//! # extern crate tokio; +//! use tokio::prelude::*; +//! +//! use std::time::{Duration, Instant}; +//! +//! fn long_op() -> Box + Send> { +//! // ... +//! # Box::new(futures::future::ok(())) +//! } +//! +//! # fn main() { +//! let when = Instant::now() + Duration::from_millis(300); +//! +//! tokio::run({ +//! long_op() +//! .deadline(when) +//! .map_err(|e| { +//! println!("operation timed out"); +//! }) +//! }) +//! # } +//! ``` +//! +//! [runtime]: ../runtime/struct.Runtime.html +//! [tokio-timer]: https://docs.rs/tokio-timer +//! [ext]: ../util/trait.FutureExt.html#method.deadline + +pub use tokio_timer::{ + Deadline, + DeadlineError, + Error, + Interval, + Delay, +}; diff --git a/third_party/rust/tokio/src/util/future.rs b/third_party/rust/tokio/src/util/future.rs new file mode 100644 index 000000000000..d03ce7fad1d8 --- /dev/null +++ b/third_party/rust/tokio/src/util/future.rs @@ -0,0 +1,61 @@ +use tokio_timer::Deadline; + +use futures::Future; + +use std::time::Instant; + + +/// An extension trait for `Future` that provides a variety of convenient +/// combinator functions. +/// +/// Currently, there only is a [`deadline`] function, but this will increase +/// over time. +/// +/// Users are not expected to implement this trait. All types that implement +/// `Future` already implement `FutureExt`. +/// +/// This trait can be imported directly or via the Tokio prelude: `use +/// tokio::prelude::*`. +/// +/// [`deadline`]: #method.deadline +pub trait FutureExt: Future { + + /// Creates a new future which allows `self` until `deadline`. + /// + /// This combinator creates a new future which wraps the receiving future + /// with a deadline. The returned future is allowed to execute until it + /// completes or `deadline` is reached, whichever happens first. + /// + /// If the future completes before `deadline` then the future will resolve + /// with that item. Otherwise the future will resolve to an error once + /// `deadline` is reached. + /// + /// # Examples + /// + /// ``` + /// # extern crate tokio; + /// # extern crate futures; + /// use tokio::prelude::*; + /// use std::time::{Duration, Instant}; + /// # use futures::future::{self, FutureResult}; + /// + /// # fn long_future() -> FutureResult<(), ()> { + /// # future::ok(()) + /// # } + /// # + /// # fn main() { + /// let future = long_future() + /// .deadline(Instant::now() + Duration::from_secs(1)) + /// .map_err(|e| println!("error = {:?}", e)); + /// + /// tokio::run(future); + /// # } + /// ``` + fn deadline(self, deadline: Instant) -> Deadline + where Self: Sized, + { + Deadline::new(self, deadline) + } +} + +impl FutureExt for T where T: Future {} diff --git a/third_party/rust/tokio/src/util/mod.rs b/third_party/rust/tokio/src/util/mod.rs new file mode 100644 index 000000000000..490d0cf6e82a --- /dev/null +++ b/third_party/rust/tokio/src/util/mod.rs @@ -0,0 +1,9 @@ +//! Utilities for working with Tokio. +//! +//! This module contains utilities that are useful for working with Tokio. +//! Currently, this only includes [`FutureExt`][FutureExt]. However, this will +//! include over time. + +mod future; + +pub use self::future::FutureExt; diff --git a/third_party/rust/tokio/tests/buffered.rs b/third_party/rust/tokio/tests/buffered.rs new file mode 100644 index 000000000000..2ba16b04255f --- /dev/null +++ b/third_party/rust/tokio/tests/buffered.rs @@ -0,0 +1,63 @@ +extern crate env_logger; +extern crate futures; +extern crate tokio; +extern crate tokio_io; + +use std::net::TcpStream; +use std::thread; +use std::io::{Read, Write, BufReader, BufWriter}; + +use futures::Future; +use futures::stream::Stream; +use tokio_io::io::copy; +use tokio::net::TcpListener; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn echo_server() { + const N: usize = 1024; + drop(env_logger::init()); + + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let msg = "foo bar baz"; + let t = thread::spawn(move || { + let mut s = t!(TcpStream::connect(&addr)); + + let t2 = thread::spawn(move || { + let mut s = t!(TcpStream::connect(&addr)); + let mut b = vec![0; msg.len() * N]; + t!(s.read_exact(&mut b)); + b + }); + + let mut expected = Vec::::new(); + for _i in 0..N { + expected.extend(msg.as_bytes()); + assert_eq!(t!(s.write(msg.as_bytes())), msg.len()); + } + (expected, t2) + }); + + let clients = srv.incoming().take(2).collect(); + let copied = clients.and_then(|clients| { + let mut clients = clients.into_iter(); + let a = BufReader::new(clients.next().unwrap()); + let b = BufWriter::new(clients.next().unwrap()); + copy(a, b) + }); + + let (amt, _, _) = t!(copied.wait()); + let (expected, t2) = t.join().unwrap(); + let actual = t2.join().unwrap(); + + assert!(expected == actual); + assert_eq!(amt, msg.len() as u64 * 1024); +} diff --git a/third_party/rust/tokio/tests/clock.rs b/third_party/rust/tokio/tests/clock.rs new file mode 100644 index 000000000000..ee99329f0edb --- /dev/null +++ b/third_party/rust/tokio/tests/clock.rs @@ -0,0 +1,69 @@ +extern crate futures; +extern crate tokio; +extern crate tokio_timer; +extern crate env_logger; + +use tokio::prelude::*; +use tokio::runtime::{self, current_thread}; +use tokio::timer::*; +use tokio_timer::clock::Clock; + +use std::sync::mpsc; +use std::time::{Duration, Instant}; + +struct MockNow(Instant); + +impl tokio_timer::clock::Now for MockNow { + fn now(&self) -> Instant { + self.0 + } +} + +#[test] +fn clock_and_timer_concurrent() { + let _ = env_logger::init(); + + let when = Instant::now() + Duration::from_millis(5_000); + let clock = Clock::new_with_now(MockNow(when)); + + let mut rt = runtime::Builder::new() + .clock(clock) + .build() + .unwrap(); + + let (tx, rx) = mpsc::channel(); + + rt.spawn({ + Delay::new(when) + .map_err(|e| panic!("unexpected error; err={:?}", e)) + .and_then(move |_| { + assert!(Instant::now() < when); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.recv().unwrap(); +} + +#[test] +fn clock_and_timer_single_threaded() { + let _ = env_logger::init(); + + let when = Instant::now() + Duration::from_millis(5_000); + let clock = Clock::new_with_now(MockNow(when)); + + let mut rt = current_thread::Builder::new() + .clock(clock) + .build() + .unwrap(); + + rt.block_on({ + Delay::new(when) + .map_err(|e| panic!("unexpected error; err={:?}", e)) + .and_then(move |_| { + assert!(Instant::now() < when); + Ok(()) + }) + }).unwrap(); +} diff --git a/third_party/rust/tokio/tests/current_thread.rs b/third_party/rust/tokio/tests/current_thread.rs new file mode 100644 index 000000000000..79e6785e938b --- /dev/null +++ b/third_party/rust/tokio/tests/current_thread.rs @@ -0,0 +1,622 @@ +#![cfg(not(feature = "unstable-futures"))] + +extern crate tokio; +extern crate tokio_executor; +extern crate futures; + +use tokio::executor::current_thread::{self, block_on_all, CurrentThread}; + +use std::any::Any; +use std::cell::{Cell, RefCell}; +use std::rc::Rc; +use std::thread; +use std::time::Duration; + +use futures::task; +use futures::future::{self, lazy}; +use futures::prelude::*; +use futures::sync::oneshot; + +#[test] +fn spawn_from_block_on_all() { + let cnt = Rc::new(Cell::new(0)); + let c = cnt.clone(); + + let msg = current_thread::block_on_all(lazy(move || { + c.set(1 + c.get()); + + // Spawn! + current_thread::spawn(lazy(move || { + c.set(1 + c.get()); + Ok::<(), ()>(()) + })); + + Ok::<_, ()>("hello") + })).unwrap(); + + assert_eq!(2, cnt.get()); + assert_eq!(msg, "hello"); +} + +#[test] +fn block_waits() { + let (tx, rx) = oneshot::channel(); + + thread::spawn(|| { + thread::sleep(Duration::from_millis(1000)); + tx.send(()).unwrap(); + }); + + let cnt = Rc::new(Cell::new(0)); + let cnt2 = cnt.clone(); + + block_on_all(rx.then(move |_| { + cnt.set(1 + cnt.get()); + Ok::<_, ()>(()) + })).unwrap(); + + assert_eq!(1, cnt2.get()); +} + +#[test] +fn spawn_many() { + const ITER: usize = 200; + + let cnt = Rc::new(Cell::new(0)); + let mut current_thread = CurrentThread::new(); + + for _ in 0..ITER { + let cnt = cnt.clone(); + current_thread.spawn(lazy(move || { + cnt.set(1 + cnt.get()); + Ok::<(), ()>(()) + })); + } + + current_thread.run().unwrap(); + + assert_eq!(cnt.get(), ITER); +} + +#[test] +fn does_not_set_global_executor_by_default() { + use tokio_executor::Executor; + + block_on_all(lazy(|| { + tokio_executor::DefaultExecutor::current() + .spawn(Box::new(lazy(|| ok()))) + .unwrap_err(); + + ok() + })).unwrap(); +} + +#[test] +fn spawn_from_block_on_future() { + let cnt = Rc::new(Cell::new(0)); + + let mut current_thread = CurrentThread::new(); + + current_thread.block_on(lazy(|| { + let cnt = cnt.clone(); + + current_thread::spawn(lazy(move || { + cnt.set(1 + cnt.get()); + Ok(()) + })); + + Ok::<_, ()>(()) + })).unwrap(); + + current_thread.run().unwrap(); + + assert_eq!(1, cnt.get()); +} + +struct Never(Rc<()>); + +impl Future for Never { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + Ok(Async::NotReady) + } +} + +#[test] +fn outstanding_tasks_are_dropped_when_executor_is_dropped() { + let mut rc = Rc::new(()); + + let mut current_thread = CurrentThread::new(); + current_thread.spawn(Never(rc.clone())); + + drop(current_thread); + + // Ensure the daemon is dropped + assert!(Rc::get_mut(&mut rc).is_some()); + + // Using the global spawn fn + + let mut rc = Rc::new(()); + + let mut current_thread = CurrentThread::new(); + + current_thread.block_on(lazy(|| { + current_thread::spawn(Never(rc.clone())); + Ok::<_, ()>(()) + })).unwrap(); + + drop(current_thread); + + // Ensure the daemon is dropped + assert!(Rc::get_mut(&mut rc).is_some()); +} + +#[test] +#[should_panic] +fn nesting_run() { + block_on_all(lazy(|| { + block_on_all(lazy(|| { + ok() + })).unwrap(); + + ok() + })).unwrap(); +} + +#[test] +#[should_panic] +fn run_in_future() { + block_on_all(lazy(|| { + current_thread::spawn(lazy(|| { + block_on_all(lazy(|| { + ok() + })).unwrap(); + ok() + })); + ok() + })).unwrap(); +} + +#[test] +fn tick_on_infini_future() { + let num = Rc::new(Cell::new(0)); + + struct Infini { + num: Rc>, + } + + impl Future for Infini { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + self.num.set(1 + self.num.get()); + task::current().notify(); + Ok(Async::NotReady) + } + } + + CurrentThread::new() + .spawn(Infini { + num: num.clone(), + }) + .turn(None) + .unwrap(); + + assert_eq!(1, num.get()); +} + +#[test] +fn tasks_are_scheduled_fairly() { + let state = Rc::new(RefCell::new([0, 0])); + + struct Spin { + state: Rc>, + idx: usize, + } + + impl Future for Spin { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + let mut state = self.state.borrow_mut(); + + if self.idx == 0 { + let diff = state[0] - state[1]; + + assert!(diff.abs() <= 1); + + if state[0] >= 50 { + return Ok(().into()); + } + } + + state[self.idx] += 1; + + if state[self.idx] >= 100 { + return Ok(().into()); + } + + task::current().notify(); + Ok(Async::NotReady) + } + } + + block_on_all(lazy(|| { + current_thread::spawn(Spin { + state: state.clone(), + idx: 0, + }); + + current_thread::spawn(Spin { + state: state, + idx: 1, + }); + + ok() + })).unwrap(); +} + +#[test] +fn spawn_and_turn() { + let cnt = Rc::new(Cell::new(0)); + let c = cnt.clone(); + + let mut current_thread = CurrentThread::new(); + + // Spawn a basic task to get the executor to turn + current_thread.spawn(lazy(move || { + Ok(()) + })); + + // Turn once... + current_thread.turn(None).unwrap(); + + current_thread.spawn(lazy(move || { + c.set(1 + c.get()); + + // Spawn! + current_thread::spawn(lazy(move || { + c.set(1 + c.get()); + Ok::<(), ()>(()) + })); + + Ok(()) + })); + + // This does not run the newly spawned thread + current_thread.turn(None).unwrap(); + assert_eq!(1, cnt.get()); + + // This runs the newly spawned thread + current_thread.turn(None).unwrap(); + assert_eq!(2, cnt.get()); +} + +#[test] +fn spawn_in_drop() { + let mut current_thread = CurrentThread::new(); + + let (tx, rx) = oneshot::channel(); + + current_thread.spawn({ + struct OnDrop(Option); + + impl Drop for OnDrop { + fn drop(&mut self) { + (self.0.take().unwrap())(); + } + } + + struct MyFuture { + _data: Box, + } + + impl Future for MyFuture { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + Ok(().into()) + } + } + + MyFuture { + _data: Box::new(OnDrop(Some(move || { + current_thread::spawn(lazy(move || { + tx.send(()).unwrap(); + Ok(()) + })); + }))), + } + }); + + current_thread.block_on(rx).unwrap(); + current_thread.run().unwrap(); +} + +#[test] +fn hammer_turn() { + use futures::sync::mpsc; + + const ITER: usize = 100; + const N: usize = 100; + const THREADS: usize = 4; + + for _ in 0..ITER { + let mut ths = vec![]; + + // Add some jitter + for _ in 0..THREADS { + let th = thread::spawn(|| { + let mut current_thread = CurrentThread::new(); + + let (tx, rx) = mpsc::unbounded(); + + current_thread.spawn({ + let cnt = Rc::new(Cell::new(0)); + let c = cnt.clone(); + + rx.for_each(move |_| { + c.set(1 + c.get()); + Ok(()) + }) + .map_err(|e| panic!("err={:?}", e)) + .map(move |v| { + assert_eq!(N, cnt.get()); + v + }) + }); + + thread::spawn(move || { + for _ in 0..N { + tx.unbounded_send(()).unwrap(); + thread::yield_now(); + } + }); + + while !current_thread.is_idle() { + current_thread.turn(None).unwrap(); + } + }); + + ths.push(th); + } + + for th in ths { + th.join().unwrap(); + } + } +} + +#[test] +fn turn_has_polled() { + let mut current_thread = CurrentThread::new(); + + // Spawn oneshot receiver + let (sender, receiver) = oneshot::channel::<()>(); + current_thread.spawn(receiver.then(|_| Ok(()))); + + // Turn once... + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + + // Should've polled the receiver once, but considered it not ready + assert!(res.has_polled()); + + // Turn another time + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + + // Should've polled nothing, the receiver is not ready yet + assert!(!res.has_polled()); + + // Make the receiver ready + sender.send(()).unwrap(); + + // Turn another time + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + + // Should've polled the receiver, it's ready now + assert!(res.has_polled()); + + // Now the executor should be empty + assert!(current_thread.is_idle()); + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + + // So should've polled nothing + assert!(!res.has_polled()); +} + +// Our own mock Park that is never really waiting and the only +// thing it does is to send, on request, something (once) to a onshot +// channel +struct MyPark { + sender: Option>, + send_now: Rc>, +} + +struct MyUnpark; + +impl tokio_executor::park::Park for MyPark { + type Unpark = MyUnpark; + type Error = (); + + fn unpark(&self) -> Self::Unpark { + MyUnpark + } + + fn park(&mut self) -> Result<(), Self::Error> { + // If called twice with send_now, this will intentionally panic + if self.send_now.get() { + self.sender.take().unwrap().send(()).unwrap(); + } + + Ok(()) + } + + fn park_timeout(&mut self, _duration: Duration) -> Result<(), Self::Error> { + self.park() + } +} + +impl tokio_executor::park::Unpark for MyUnpark { + fn unpark(&self) {} +} + +#[test] +fn turn_fair() { + let send_now = Rc::new(Cell::new(false)); + + let (sender, receiver) = oneshot::channel::<()>(); + let (sender_2, receiver_2) = oneshot::channel::<()>(); + let (sender_3, receiver_3) = oneshot::channel::<()>(); + + let my_park = MyPark { + sender: Some(sender_3), + send_now: send_now.clone(), + }; + + let mut current_thread = CurrentThread::new_with_park(my_park); + + let receiver_1_done = Rc::new(Cell::new(false)); + let receiver_1_done_clone = receiver_1_done.clone(); + + // Once an item is received on the oneshot channel, it will immediately + // immediately make the second oneshot channel ready + current_thread.spawn(receiver + .map_err(|_| unreachable!()) + .and_then(move |_| { + sender_2.send(()).unwrap(); + receiver_1_done_clone.set(true); + + Ok(()) + }) + ); + + let receiver_2_done = Rc::new(Cell::new(false)); + let receiver_2_done_clone = receiver_2_done.clone(); + + current_thread.spawn(receiver_2 + .map_err(|_| unreachable!()) + .and_then(move |_| { + receiver_2_done_clone.set(true); + Ok(()) + }) + ); + + // The third receiver is only woken up from our Park implementation, it simulates + // e.g. a socket that first has to be polled to know if it is ready now + let receiver_3_done = Rc::new(Cell::new(false)); + let receiver_3_done_clone = receiver_3_done.clone(); + + current_thread.spawn(receiver_3 + .map_err(|_| unreachable!()) + .and_then(move |_| { + receiver_3_done_clone.set(true); + Ok(()) + }) + ); + + // First turn should've polled both and considered them not ready + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + assert!(res.has_polled()); + + // Next turn should've polled nothing + let res = current_thread.turn(Some(Duration::from_millis(0))).unwrap(); + assert!(!res.has_polled()); + + assert!(!receiver_1_done.get()); + assert!(!receiver_2_done.get()); + assert!(!receiver_3_done.get()); + + // After this the receiver future will wake up the second receiver future, + // so there are pending futures again + sender.send(()).unwrap(); + + // Now the first receiver should be done, the second receiver should be ready + // to be polled again and the socket not yet + let res = current_thread.turn(None).unwrap(); + assert!(res.has_polled()); + + assert!(receiver_1_done.get()); + assert!(!receiver_2_done.get()); + assert!(!receiver_3_done.get()); + + // Now let our park implementation know that it should send something to sender 3 + send_now.set(true); + + // This should resolve the second receiver directly, but also poll the socket + // and read the packet from it. If it didn't do both here, we would handle + // futures that are woken up from the reactor and directly unfairly and would + // favour the ones that are woken up directly. + let res = current_thread.turn(None).unwrap(); + assert!(res.has_polled()); + + assert!(receiver_1_done.get()); + assert!(receiver_2_done.get()); + assert!(receiver_3_done.get()); + + // Don't send again + send_now.set(false); + + // Now we should be idle and turning should not poll anything + assert!(current_thread.is_idle()); + let res = current_thread.turn(None).unwrap(); + assert!(!res.has_polled()); +} + +#[test] +fn spawn_from_other_thread() { + let mut current_thread = CurrentThread::new(); + + let handle = current_thread.handle(); + let (sender, receiver) = oneshot::channel::<()>(); + + thread::spawn(move || { + handle.spawn(lazy(move || { + sender.send(()).unwrap(); + Ok(()) + })).unwrap(); + }); + + let _ = current_thread.block_on(receiver).unwrap(); +} + +#[test] +fn spawn_from_other_thread_unpark() { + use std::sync::mpsc::channel as mpsc_channel; + + let mut current_thread = CurrentThread::new(); + + let handle = current_thread.handle(); + let (sender_1, receiver_1) = oneshot::channel::<()>(); + let (sender_2, receiver_2) = mpsc_channel::<()>(); + + thread::spawn(move || { + let _ = receiver_2.recv().unwrap(); + + handle.spawn(lazy(move || { + sender_1.send(()).unwrap(); + Ok(()) + })).unwrap(); + }); + + // Ensure that unparking the executor works correctly. It will first + // check if there are new futures (there are none), then execute the + // lazy future below which will cause the future to be spawned from + // the other thread. Then the executor will park but should be woken + // up because *now* we have a new future to schedule + let _ = current_thread.block_on( + lazy(move || { + sender_2.send(()).unwrap(); + Ok(()) + }) + .and_then(|_| receiver_1) + ).unwrap(); +} + +fn ok() -> future::FutureResult<(), ()> { + future::ok(()) +} diff --git a/third_party/rust/tokio/tests/drop-core.rs b/third_party/rust/tokio/tests/drop-core.rs new file mode 100644 index 000000000000..75ac9b7eb1d5 --- /dev/null +++ b/third_party/rust/tokio/tests/drop-core.rs @@ -0,0 +1,42 @@ +extern crate tokio; +extern crate futures; + +use std::thread; +use std::net; + +use futures::future; +use futures::prelude::*; +use futures::sync::oneshot; +use tokio::net::TcpListener; +use tokio::reactor::Reactor; + +#[test] +fn tcp_doesnt_block() { + let core = Reactor::new().unwrap(); + let handle = core.handle(); + let listener = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let listener = TcpListener::from_std(listener, &handle).unwrap(); + drop(core); + assert!(listener.incoming().wait().next().unwrap().is_err()); +} + +#[test] +fn drop_wakes() { + let core = Reactor::new().unwrap(); + let handle = core.handle(); + let listener = net::TcpListener::bind("127.0.0.1:0").unwrap(); + let listener = TcpListener::from_std(listener, &handle).unwrap(); + let (tx, rx) = oneshot::channel::<()>(); + let t = thread::spawn(move || { + let incoming = listener.incoming(); + let new_socket = incoming.into_future().map_err(|_| ()); + let drop_tx = future::lazy(|| { + drop(tx); + future::ok(()) + }); + assert!(new_socket.join(drop_tx).wait().is_err()); + }); + drop(rx.wait()); + drop(core); + t.join().unwrap(); +} diff --git a/third_party/rust/tokio/tests/echo2.rs b/third_party/rust/tokio/tests/echo2.rs new file mode 100644 index 000000000000..6ead07d811f6 --- /dev/null +++ b/third_party/rust/tokio/tests/echo2.rs @@ -0,0 +1,53 @@ +#![cfg(feature = "unstable-futures")] + +// This test is the same as `echo.rs`, but ported to futures 0.2 + +extern crate env_logger; +extern crate futures2; +extern crate tokio; +extern crate tokio_io; + +use std::io::{Read, Write}; +use std::net::TcpStream; +use std::thread; + +use futures2::prelude::*; +use futures2::executor::block_on; +use tokio::net::TcpListener; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn echo_server() { + drop(env_logger::init()); + + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let msg = "foo bar baz"; + let t = thread::spawn(move || { + let mut s = TcpStream::connect(&addr).unwrap(); + + for _i in 0..1024 { + assert_eq!(t!(s.write(msg.as_bytes())), msg.len()); + let mut buf = [0; 1024]; + assert_eq!(t!(s.read(&mut buf)), msg.len()); + assert_eq!(&buf[..msg.len()], msg.as_bytes()); + } + }); + + let clients = srv.incoming(); + let client = clients.next().map(|e| e.0.unwrap()).map_err(|e| e.0); + let halves = client.map(|s| s.split()); + let copied = halves.and_then(|(a, b)| a.copy_into(b)); + + let (amt, _, _) = t!(block_on(copied)); + t.join().unwrap(); + + assert_eq!(amt, msg.len() as u64 * 1024); +} diff --git a/third_party/rust/tokio/tests/global.rs b/third_party/rust/tokio/tests/global.rs new file mode 100644 index 000000000000..5f7950a714e9 --- /dev/null +++ b/third_party/rust/tokio/tests/global.rs @@ -0,0 +1,136 @@ +extern crate futures; +extern crate tokio; +extern crate tokio_io; +extern crate env_logger; + +use std::{io, thread}; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; + +use futures::prelude::*; +use tokio::net::{TcpStream, TcpListener}; +use tokio::runtime::Runtime; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn hammer_old() { + let _ = env_logger::init(); + + let threads = (0..10).map(|_| { + thread::spawn(|| { + let srv = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); + let addr = t!(srv.local_addr()); + let mine = TcpStream::connect(&addr); + let theirs = srv.incoming().into_future() + .map(|(s, _)| s.unwrap()) + .map_err(|(s, _)| s); + let (mine, theirs) = t!(mine.join(theirs).wait()); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); + }) + }).collect::>(); + for thread in threads { + thread.join().unwrap(); + } +} + +struct Rd(Arc); +struct Wr(Arc); + +impl io::Read for Rd { + fn read(&mut self, dst: &mut [u8]) -> io::Result { + <&TcpStream>::read(&mut &*self.0, dst) + } +} + +impl tokio_io::AsyncRead for Rd { +} + +impl io::Write for Wr { + fn write(&mut self, src: &[u8]) -> io::Result { + <&TcpStream>::write(&mut &*self.0, src) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl tokio_io::AsyncWrite for Wr { + fn shutdown(&mut self) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +#[test] +fn hammer_split() { + use tokio_io::io; + + const N: usize = 100; + const ITER: usize = 10; + + let _ = env_logger::init(); + + for _ in 0..ITER { + let srv = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); + let addr = t!(srv.local_addr()); + + let cnt = Arc::new(AtomicUsize::new(0)); + + let mut rt = Runtime::new().unwrap(); + + fn split(socket: TcpStream, cnt: Arc) { + let socket = Arc::new(socket); + let rd = Rd(socket.clone()); + let wr = Wr(socket); + + let cnt2 = cnt.clone(); + + let rd = io::read(rd, vec![0; 1]) + .map(move |_| { + cnt2.fetch_add(1, Relaxed); + }) + .map_err(|e| panic!("read error = {:?}", e)); + + let wr = io::write_all(wr, b"1") + .map(move |_| { + cnt.fetch_add(1, Relaxed); + }) + .map_err(move |e| panic!("write error = {:?}", e)); + + tokio::spawn(rd); + tokio::spawn(wr); + } + + rt.spawn({ + let cnt = cnt.clone(); + srv.incoming() + .map_err(|e| panic!("accept error = {:?}", e)) + .take(N as u64) + .for_each(move |socket| { + split(socket, cnt.clone()); + Ok(()) + }) + }); + + for _ in 0..N { + rt.spawn({ + let cnt = cnt.clone(); + TcpStream::connect(&addr) + .map_err(move |e| panic!("connect error = {:?}", e)) + .map(move |socket| split(socket, cnt)) + }); + } + + rt.shutdown_on_idle().wait().unwrap(); + assert_eq!(N * 4, cnt.load(Relaxed)); + } +} diff --git a/third_party/rust/tokio/tests/global2.rs b/third_party/rust/tokio/tests/global2.rs new file mode 100644 index 000000000000..24244abb1232 --- /dev/null +++ b/third_party/rust/tokio/tests/global2.rs @@ -0,0 +1,122 @@ +#![cfg(feature = "unstable-futures")] + +// This test is the same as `global.rs`, but ported to futures 0.2 + +extern crate futures; +extern crate futures2; +extern crate tokio; +extern crate tokio_io; +extern crate env_logger; + +use std::{io, thread}; +use std::sync::Arc; + +use futures2::prelude::*; +use futures2::executor::block_on; +use futures2::task; + +use tokio::net::{TcpStream, TcpListener}; +use tokio::runtime::Runtime; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn hammer() { + let _ = env_logger::init(); + + let threads = (0..10).map(|_| { + thread::spawn(|| { + let srv = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); + let addr = t!(srv.local_addr()); + let mine = TcpStream::connect(&addr); + let theirs = srv.incoming().next() + .map(|(s, _)| s.unwrap()) + .map_err(|(s, _)| s); + let (mine, theirs) = t!(block_on(mine.join(theirs))); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); + }) + }).collect::>(); + for thread in threads { + thread.join().unwrap(); + } +} + +struct Rd(Arc); +struct Wr(Arc); + +impl AsyncRead for Rd { + fn poll_read(&mut self, cx: &mut task::Context, dst: &mut [u8]) -> Poll { + <&TcpStream>::poll_read(&mut &*self.0, cx, dst) + } +} + +impl AsyncWrite for Wr { + fn poll_write(&mut self, cx: &mut task::Context, src: &[u8]) -> Poll { + <&TcpStream>::poll_write(&mut &*self.0, cx, src) + } + + fn poll_flush(&mut self, _cx: &mut task::Context) -> Poll<(), io::Error> { + Ok(().into()) + } + + fn poll_close(&mut self, _cx: &mut task::Context) -> Poll<(), io::Error> { + Ok(().into()) + } +} + +#[test] +fn hammer_split() { + const N: usize = 100; + + let _ = env_logger::init(); + + let srv = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); + let addr = t!(srv.local_addr()); + + let mut rt = Runtime::new().unwrap(); + + fn split(socket: TcpStream) { + let socket = Arc::new(socket); + let rd = Rd(socket.clone()); + let wr = Wr(socket); + + let rd = rd.read(vec![0; 1]) + .map(|_| ()) + .map_err(|e| panic!("read error = {:?}", e)); + + let wr = wr.write_all(b"1") + .map(|_| ()) + .map_err(|e| panic!("write error = {:?}", e)); + + tokio::spawn2(rd); + tokio::spawn2(wr); + } + + rt.spawn2({ + srv.incoming() + .map_err(|e| panic!("accept error = {:?}", e)) + .take(N as u64) + .for_each(|socket| { + split(socket); + Ok(()) + }) + .map(|_| ()) + }); + + for _ in 0..N { + rt.spawn2({ + TcpStream::connect(&addr) + .map_err(|e| panic!("connect error = {:?}", e)) + .map(|socket| split(socket)) + }); + } + + futures::Future::wait(rt.shutdown_on_idle()).unwrap(); +} diff --git a/third_party/rust/tokio/tests/line-frames.rs b/third_party/rust/tokio/tests/line-frames.rs new file mode 100644 index 000000000000..4d42f5080654 --- /dev/null +++ b/third_party/rust/tokio/tests/line-frames.rs @@ -0,0 +1,88 @@ +extern crate env_logger; +extern crate futures; +extern crate tokio; +extern crate tokio_codec; +extern crate tokio_io; +extern crate tokio_threadpool; +extern crate bytes; + +use std::io; +use std::net::Shutdown; + +use bytes::{BytesMut, BufMut}; +use futures::{Future, Stream, Sink}; +use tokio::net::{TcpListener, TcpStream}; +use tokio_codec::{Encoder, Decoder}; +use tokio_io::io::{write_all, read}; +use tokio_threadpool::Builder; + +pub struct LineCodec; + +impl Decoder for LineCodec { + type Item = BytesMut; + type Error = io::Error; + + fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { + match buf.iter().position(|&b| b == b'\n') { + Some(i) => Ok(Some(buf.split_to(i + 1).into())), + None => Ok(None), + } + } + + fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result> { + if buf.len() == 0 { + Ok(None) + } else { + let amt = buf.len(); + Ok(Some(buf.split_to(amt))) + } + } +} + +impl Encoder for LineCodec { + type Item = BytesMut; + type Error = io::Error; + + fn encode(&mut self, item: BytesMut, into: &mut BytesMut) -> io::Result<()> { + into.put(&item[..]); + Ok(()) + } +} + +#[test] +fn echo() { + drop(env_logger::init()); + + let pool = Builder::new() + .pool_size(1) + .build(); + + let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + let sender = pool.sender().clone(); + let srv = listener.incoming().for_each(move |socket| { + let (sink, stream) = LineCodec.framed(socket).split(); + sender.spawn(sink.send_all(stream).map(|_| ()).map_err(|_| ())).unwrap(); + Ok(()) + }); + + pool.sender().spawn(srv.map_err(|e| panic!("srv error: {}", e))).unwrap(); + + let client = TcpStream::connect(&addr); + let client = client.wait().unwrap(); + let (client, _) = write_all(client, b"a\n").wait().unwrap(); + let (client, buf, amt) = read(client, vec![0; 1024]).wait().unwrap(); + assert_eq!(amt, 2); + assert_eq!(&buf[..2], b"a\n"); + + let (client, _) = write_all(client, b"\n").wait().unwrap(); + let (client, buf, amt) = read(client, buf).wait().unwrap(); + assert_eq!(amt, 1); + assert_eq!(&buf[..1], b"\n"); + + let (client, _) = write_all(client, b"b").wait().unwrap(); + client.shutdown(Shutdown::Write).unwrap(); + let (_client, buf, amt) = read(client, buf).wait().unwrap(); + assert_eq!(amt, 1); + assert_eq!(&buf[..1], b"b"); +} diff --git a/third_party/rust/tokio/tests/pipe-hup.rs b/third_party/rust/tokio/tests/pipe-hup.rs new file mode 100644 index 000000000000..d14f7674cde2 --- /dev/null +++ b/third_party/rust/tokio/tests/pipe-hup.rs @@ -0,0 +1,88 @@ +#![cfg(unix)] + +extern crate env_logger; +extern crate futures; +extern crate libc; +extern crate mio; +extern crate tokio; +extern crate tokio_io; + +use std::fs::File; +use std::io::{self, Write}; +use std::os::unix::io::{AsRawFd, FromRawFd}; +use std::thread; +use std::time::Duration; + +use mio::event::Evented; +use mio::unix::{UnixReady, EventedFd}; +use mio::{PollOpt, Ready, Token}; +use tokio::reactor::{Handle, PollEvented2}; +use tokio_io::io::read_to_end; +use futures::Future; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +struct MyFile(File); + +impl MyFile { + fn new(file: File) -> MyFile { + unsafe { + let r = libc::fcntl(file.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + assert!(r != -1, "fcntl error: {}", io::Error::last_os_error()); + } + MyFile(file) + } +} + +impl io::Read for MyFile { + fn read(&mut self, bytes: &mut [u8]) -> io::Result { + self.0.read(bytes) + } +} + +impl Evented for MyFile { + fn register(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt) + -> io::Result<()> { + let hup: Ready = UnixReady::hup().into(); + EventedFd(&self.0.as_raw_fd()).register(poll, token, interest | hup, opts) + } + fn reregister(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt) + -> io::Result<()> { + let hup: Ready = UnixReady::hup().into(); + EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest | hup, opts) + } + fn deregister(&self, poll: &mio::Poll) -> io::Result<()> { + EventedFd(&self.0.as_raw_fd()).deregister(poll) + } +} + +#[test] +fn hup() { + drop(env_logger::init()); + + let handle = Handle::default(); + unsafe { + let mut pipes = [0; 2]; + assert!(libc::pipe(pipes.as_mut_ptr()) != -1, + "pipe error: {}", io::Error::last_os_error()); + let read = File::from_raw_fd(pipes[0]); + let mut write = File::from_raw_fd(pipes[1]); + let t = thread::spawn(move || { + write.write_all(b"Hello!\n").unwrap(); + write.write_all(b"Good bye!\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + }); + + let source = PollEvented2::new_with_handle(MyFile::new(read), &handle).unwrap(); + + let reader = read_to_end(source, Vec::new()); + let (_, content) = t!(reader.wait()); + assert_eq!(&b"Hello!\nGood bye!\n"[..], &content[..]); + t.join().unwrap(); + } +} diff --git a/third_party/rust/tokio/tests/runtime.rs b/third_party/rust/tokio/tests/runtime.rs new file mode 100644 index 000000000000..7012a178391a --- /dev/null +++ b/third_party/rust/tokio/tests/runtime.rs @@ -0,0 +1,175 @@ +extern crate tokio; +extern crate env_logger; +extern crate futures; + +use futures::sync::oneshot; +use std::sync::{Arc, Mutex}; +use std::thread; +use tokio::io; +use tokio::net::{TcpStream, TcpListener}; +use tokio::prelude::future::lazy; +use tokio::prelude::*; +use tokio::runtime::Runtime; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +fn create_client_server_future() -> Box + Send> { + let server = t!(TcpListener::bind(&"127.0.0.1:0".parse().unwrap())); + let addr = t!(server.local_addr()); + let client = TcpStream::connect(&addr); + + let server = server.incoming().take(1) + .map_err(|e| panic!("accept err = {:?}", e)) + .for_each(|socket| { + tokio::spawn({ + io::write_all(socket, b"hello") + .map(|_| ()) + .map_err(|e| panic!("write err = {:?}", e)) + }) + }) + .map(|_| ()); + + let client = client + .map_err(|e| panic!("connect err = {:?}", e)) + .and_then(|client| { + // Read all + io::read_to_end(client, vec![]) + .map(|_| ()) + .map_err(|e| panic!("read err = {:?}", e)) + }); + + let future = server.join(client) + .map(|_| ()); + Box::new(future) +} + +#[test] +fn runtime_tokio_run() { + let _ = env_logger::init(); + + tokio::run(create_client_server_future()); +} + +#[test] +fn runtime_single_threaded() { + let _ = env_logger::init(); + + let mut runtime = tokio::runtime::current_thread::Runtime::new() + .unwrap(); + runtime.block_on(create_client_server_future()).unwrap(); + runtime.run().unwrap(); +} + +#[test] +fn runtime_multi_threaded() { + let _ = env_logger::init(); + + let mut runtime = tokio::runtime::Builder::new() + .build() + .unwrap(); + runtime.spawn(create_client_server_future()); + runtime.shutdown_on_idle().wait().unwrap(); +} + +#[test] +fn block_on_timer() { + use std::time::{Duration, Instant}; + use tokio::timer::{Delay, Error}; + + fn after_1s(x: T) -> Box + Send> + where + T: Send + 'static, + { + Box::new(Delay::new(Instant::now() + Duration::from_millis(100)).map(move |_| x)) + } + + let mut runtime = Runtime::new().unwrap(); + assert_eq!(runtime.block_on(after_1s(42)).unwrap(), 42); + runtime.shutdown_on_idle().wait().unwrap(); +} + +#[test] +fn spawn_from_block_on() { + let cnt = Arc::new(Mutex::new(0)); + let c = cnt.clone(); + + let mut runtime = Runtime::new().unwrap(); + let msg = runtime + .block_on(lazy(move || { + { + let mut x = c.lock().unwrap(); + *x = 1 + *x; + } + + // Spawn! + tokio::spawn(lazy(move || { + { + let mut x = c.lock().unwrap(); + *x = 1 + *x; + } + Ok::<(), ()>(()) + })); + + Ok::<_, ()>("hello") + })) + .unwrap(); + + runtime.shutdown_on_idle().wait().unwrap(); + assert_eq!(2, *cnt.lock().unwrap()); + assert_eq!(msg, "hello"); +} + +#[test] +fn block_waits() { + let (tx, rx) = oneshot::channel(); + + thread::spawn(|| { + use std::time::Duration; + thread::sleep(Duration::from_millis(1000)); + tx.send(()).unwrap(); + }); + + let cnt = Arc::new(Mutex::new(0)); + let c = cnt.clone(); + + let mut runtime = Runtime::new().unwrap(); + runtime + .block_on(rx.then(move |_| { + { + let mut x = c.lock().unwrap(); + *x = 1 + *x; + } + Ok::<_, ()>(()) + })) + .unwrap(); + + assert_eq!(1, *cnt.lock().unwrap()); + runtime.shutdown_on_idle().wait().unwrap(); +} + +#[test] +fn spawn_many() { + const ITER: usize = 200; + + let cnt = Arc::new(Mutex::new(0)); + let mut runtime = Runtime::new().unwrap(); + + for _ in 0..ITER { + let c = cnt.clone(); + runtime.spawn(lazy(move || { + { + let mut x = c.lock().unwrap(); + *x = 1 + *x; + } + Ok::<(), ()>(()) + })); + } + + runtime.shutdown_on_idle().wait().unwrap(); + assert_eq!(ITER, *cnt.lock().unwrap()); +} diff --git a/third_party/rust/tokio/tests/tcp2.rs b/third_party/rust/tokio/tests/tcp2.rs new file mode 100644 index 000000000000..4fbc978cba8f --- /dev/null +++ b/third_party/rust/tokio/tests/tcp2.rs @@ -0,0 +1,136 @@ +#![cfg(feature = "unstable-futures")] + +// This test is the same as `tcp.rs`, but ported to futures 0.2 + +extern crate env_logger; +extern crate tokio; +extern crate mio; +extern crate futures2; + +use std::{net, thread}; +use std::sync::mpsc::channel; + +use tokio::net::{TcpListener, TcpStream}; +use futures2::executor::block_on; +use futures2::prelude::*; + +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {:?}", stringify!($e), e), + }) +} + +#[test] +fn connect() { + drop(env_logger::init()); + let srv = t!(net::TcpListener::bind("127.0.0.1:0")); + let addr = t!(srv.local_addr()); + let t = thread::spawn(move || { + t!(srv.accept()).0 + }); + + let stream = TcpStream::connect(&addr); + let mine = t!(block_on(stream)); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} + +#[test] +fn accept() { + drop(env_logger::init()); + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let (tx, rx) = channel(); + let client = srv.incoming().map(move |t| { + tx.send(()).unwrap(); + t + }).next().map_err(|e| e.0); + assert!(rx.try_recv().is_err()); + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap() + }); + + let (mine, _remaining) = t!(block_on(client)); + let mine = mine.unwrap(); + let theirs = t.join().unwrap(); + + assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); + assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); +} + +#[test] +fn accept2() { + drop(env_logger::init()); + let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()))); + let addr = t!(srv.local_addr()); + + let t = thread::spawn(move || { + net::TcpStream::connect(&addr).unwrap() + }); + + let (tx, rx) = channel(); + let client = srv.incoming().map(move |t| { + tx.send(()).unwrap(); + t + }).next().map_err(|e| e.0); + assert!(rx.try_recv().is_err()); + + let (mine, _remaining) = t!(block_on(client)); + mine.unwrap(); + t.join().unwrap(); +} + +#[cfg(unix)] +mod unix { + use tokio::net::TcpStream; + use tokio::prelude::*; + + use env_logger; + use futures2::future; + use futures2::executor::block_on; + use futures2::io::AsyncRead; + use mio::unix::UnixReady; + + use std::{net, thread}; + use std::time::Duration; + + #[test] + fn poll_hup() { + drop(env_logger::init()); + + let srv = t!(net::TcpListener::bind("127.0.0.1:0")); + let addr = t!(srv.local_addr()); + let t = thread::spawn(move || { + let mut client = t!(srv.accept()).0; + client.write(b"hello world").unwrap(); + thread::sleep(Duration::from_millis(200)); + }); + + let mut stream = t!(block_on(TcpStream::connect(&addr))); + + // Poll for HUP before reading. + block_on(future::poll_fn(|cx| { + stream.poll_read_ready2(cx, UnixReady::hup().into()) + })).unwrap(); + + // Same for write half + block_on(future::poll_fn(|cx| { + stream.poll_write_ready2(cx) + })).unwrap(); + + let mut buf = vec![0; 11]; + + // Read the data + block_on(future::poll_fn(|cx| { + stream.poll_read(cx, &mut buf) + })).unwrap(); + + assert_eq!(b"hello world", &buf[..]); + + t.join().unwrap(); + } +} diff --git a/third_party/rust/tokio/tests/timer.rs b/third_party/rust/tokio/tests/timer.rs new file mode 100644 index 000000000000..8d1d52d73280 --- /dev/null +++ b/third_party/rust/tokio/tests/timer.rs @@ -0,0 +1,94 @@ +extern crate futures; +extern crate tokio; +extern crate tokio_io; +extern crate env_logger; + +use tokio::prelude::*; +use tokio::timer::*; + +use std::sync::mpsc; +use std::time::{Duration, Instant}; + +#[test] +fn timer_with_runtime() { + let _ = env_logger::init(); + + let when = Instant::now() + Duration::from_millis(100); + let (tx, rx) = mpsc::channel(); + + tokio::run({ + Delay::new(when) + .map_err(|e| panic!("unexpected error; err={:?}", e)) + .and_then(move |_| { + assert!(Instant::now() >= when); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.recv().unwrap(); +} + +#[test] +fn starving() { + use futures::{task, Poll, Async}; + + let _ = env_logger::init(); + + struct Starve(Delay, u64); + + impl Future for Starve { + type Item = u64; + type Error = (); + + fn poll(&mut self) -> Poll { + if self.0.poll().unwrap().is_ready() { + return Ok(self.1.into()); + } + + self.1 += 1; + + task::current().notify(); + + Ok(Async::NotReady) + } + } + + let when = Instant::now() + Duration::from_millis(20); + let starve = Starve(Delay::new(when), 0); + + let (tx, rx) = mpsc::channel(); + + tokio::run({ + starve + .and_then(move |_ticks| { + assert!(Instant::now() >= when); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.recv().unwrap(); +} + +#[test] +fn deadline() { + use futures::future; + + let _ = env_logger::init(); + + let when = Instant::now() + Duration::from_millis(20); + let (tx, rx) = mpsc::channel(); + + tokio::run({ + future::empty::<(), ()>() + .deadline(when) + .then(move |res| { + assert!(res.is_err()); + tx.send(()).unwrap(); + Ok(()) + }) + }); + + rx.recv().unwrap(); +} diff --git a/third_party/rust/traitobject/.cargo-checksum.json b/third_party/rust/traitobject/.cargo-checksum.json deleted file mode 100644 index 35f1c2ed5980..000000000000 --- a/third_party/rust/traitobject/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"180cc837b1ea2ce0ed737c2e15208e83abf6f92291a77ed1ebcc564c628c2077","Cargo.toml":"2fc318d42aa26fc082740d312f6f712102988b388fbc556f1ebfe8c58c7e74de","README.md":"34b536550c8940eced0946c2c83c9984648346afad575c2ecfcd5cd64aca8678","src/impls.rs":"b8afa3cf40ff67e8a652ed6e07a15a69ec67d75c9b356094b209140a60d2d89b","src/lib.rs":"266104684b419fe6efd823976f5d4e728cc044b5169ea9e3d162709e02bcd1d5"},"package":"efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079"} \ No newline at end of file diff --git a/third_party/rust/traitobject/.travis.yml b/third_party/rust/traitobject/.travis.yml deleted file mode 100644 index 47adcf7efb08..000000000000 --- a/third_party/rust/traitobject/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: rust -sudo: false - -script: - - cargo build - - cargo test - - cargo bench --no-run - - cargo doc - -after_success: - - if [ "$TRAVIS_PULL_REQUEST" == false && test == "TRAVIS_BRANCH" == "master" ]; then - - curl https://raw.githubusercontent.com/reem/rust-gh-docs/master/make-docs.sh > docs.sh - - chmod u+x docs.sh - - ./docs.sh reem project-name - -env: - global: - secure: QPYL1XUr4CyK/2DXlsYC1eCpWRpyEiqQSd/FFVR+YdP/rOJ7AyAXQqPhfgjDBQwvc6E2fUiyYjoV/xe1a757DDeZKlgd8Lp20fSDwvNt/Ejx8ueh3h3kuOtgDpIGSKX/l+XC+ltDpzjhh7bowI2/fOEf+kE53jvu9i4PiLnKdlY= - diff --git a/third_party/rust/traitobject/Cargo.toml b/third_party/rust/traitobject/Cargo.toml deleted file mode 100644 index 83bff93ec1ba..000000000000 --- a/third_party/rust/traitobject/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] - -name = "traitobject" -version = "0.1.0" -authors = ["Jonathan Reem "] -repository = "https://github.com/reem/rust-traitobject.git" -description = "Unsafe helpers for working with raw trait objects." -readme = "README.md" -license = "MIT/Apache-2.0" - diff --git a/third_party/rust/traitobject/README.md b/third_party/rust/traitobject/README.md deleted file mode 100644 index 9e9235ee73e0..000000000000 --- a/third_party/rust/traitobject/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# traitobject - -> Unsafe helpers for dealing with raw trait objects. - -## Usage - -Use the crates.io repository; add this to your `Cargo.toml` along -with the rest of your dependencies: - -```toml -[dependencies] -traitobject = "*" -``` - -## Author - -[Jonathan Reem](https://medium.com/@jreem) is the primary author and maintainer -of traitobject. - -## License - -MIT/Apache-2.0 - diff --git a/third_party/rust/traitobject/src/impls.rs b/third_party/rust/traitobject/src/impls.rs deleted file mode 100644 index 6d356869c984..000000000000 --- a/third_party/rust/traitobject/src/impls.rs +++ /dev/null @@ -1,82 +0,0 @@ -use Trait; - -unsafe impl Trait for ::std::any::Any + Send { } -unsafe impl Trait for ::std::any::Any + Sync { } -unsafe impl Trait for ::std::any::Any + Send + Sync { } -unsafe impl Trait for ::std::borrow::Borrow + Send { } -unsafe impl Trait for ::std::borrow::Borrow + Sync { } -unsafe impl Trait for ::std::borrow::Borrow + Send + Sync { } -unsafe impl Trait for ::std::borrow::BorrowMut + Send { } -unsafe impl Trait for ::std::borrow::BorrowMut + Sync { } -unsafe impl Trait for ::std::borrow::BorrowMut + Send + Sync { } -unsafe impl Trait for ::std::convert::AsMut + Send { } -unsafe impl Trait for ::std::convert::AsMut + Sync { } -unsafe impl Trait for ::std::convert::AsMut + Send + Sync { } -unsafe impl Trait for ::std::convert::AsRef + Send { } -unsafe impl Trait for ::std::convert::AsRef + Sync { } -unsafe impl Trait for ::std::convert::AsRef + Send + Sync { } -unsafe impl Trait for ::std::error::Error + Send { } -unsafe impl Trait for ::std::error::Error + Sync { } -unsafe impl Trait for ::std::error::Error + Send + Sync { } -unsafe impl Trait for ::std::fmt::Binary + Send { } -unsafe impl Trait for ::std::fmt::Binary + Sync { } -unsafe impl Trait for ::std::fmt::Binary + Send + Sync { } -unsafe impl Trait for ::std::fmt::Debug + Send { } -unsafe impl Trait for ::std::fmt::Debug + Sync { } -unsafe impl Trait for ::std::fmt::Debug + Send + Sync { } -unsafe impl Trait for ::std::fmt::Display + Send { } -unsafe impl Trait for ::std::fmt::Display + Sync { } -unsafe impl Trait for ::std::fmt::Display + Send + Sync { } -unsafe impl Trait for ::std::fmt::LowerExp + Send { } -unsafe impl Trait for ::std::fmt::LowerExp + Sync { } -unsafe impl Trait for ::std::fmt::LowerExp + Send + Sync { } -unsafe impl Trait for ::std::fmt::LowerHex + Send { } -unsafe impl Trait for ::std::fmt::LowerHex + Sync { } -unsafe impl Trait for ::std::fmt::LowerHex + Send + Sync { } -unsafe impl Trait for ::std::fmt::Octal + Send { } -unsafe impl Trait for ::std::fmt::Octal + Sync { } -unsafe impl Trait for ::std::fmt::Octal + Send + Sync { } -unsafe impl Trait for ::std::fmt::Pointer + Send { } -unsafe impl Trait for ::std::fmt::Pointer + Sync { } -unsafe impl Trait for ::std::fmt::Pointer + Send + Sync { } -unsafe impl Trait for ::std::fmt::UpperExp + Send { } -unsafe impl Trait for ::std::fmt::UpperExp + Sync { } -unsafe impl Trait for ::std::fmt::UpperExp + Send + Sync { } -unsafe impl Trait for ::std::fmt::UpperHex + Send { } -unsafe impl Trait for ::std::fmt::UpperHex + Sync { } -unsafe impl Trait for ::std::fmt::UpperHex + Send + Sync { } -unsafe impl Trait for ::std::fmt::Write + Send { } -unsafe impl Trait for ::std::fmt::Write + Sync { } -unsafe impl Trait for ::std::fmt::Write + Send + Sync { } -unsafe impl Trait for ::std::hash::Hasher + Send { } -unsafe impl Trait for ::std::hash::Hasher + Sync { } -unsafe impl Trait for ::std::hash::Hasher + Send + Sync { } -unsafe impl Trait for ::std::io::BufRead + Send { } -unsafe impl Trait for ::std::io::BufRead + Sync { } -unsafe impl Trait for ::std::io::BufRead + Send + Sync { } -unsafe impl Trait for ::std::io::Read + Send { } -unsafe impl Trait for ::std::io::Read + Sync { } -unsafe impl Trait for ::std::io::Read + Send + Sync { } -unsafe impl Trait for ::std::io::Seek + Send { } -unsafe impl Trait for ::std::io::Seek + Sync { } -unsafe impl Trait for ::std::io::Seek + Send + Sync { } -unsafe impl Trait for ::std::io::Write + Send { } -unsafe impl Trait for ::std::io::Write + Sync { } -unsafe impl Trait for ::std::io::Write + Send + Sync { } -unsafe impl Trait for ::std::iter::IntoIterator { } -unsafe impl Trait for ::std::iter::Iterator + Send { } -unsafe impl Trait for ::std::iter::Iterator + Sync { } -unsafe impl Trait for ::std::iter::Iterator + Send + Sync { } -unsafe impl Trait for ::std::marker::Send + Send { } -unsafe impl Trait for ::std::marker::Send + Sync { } -unsafe impl Trait for ::std::marker::Send + Send + Sync { } -unsafe impl Trait for ::std::marker::Sync + Send { } -unsafe impl Trait for ::std::marker::Sync + Sync { } -unsafe impl Trait for ::std::marker::Sync + Send + Sync { } -unsafe impl Trait for ::std::ops::Drop + Send { } -unsafe impl Trait for ::std::ops::Drop + Sync { } -unsafe impl Trait for ::std::ops::Drop + Send + Sync { } -unsafe impl Trait for ::std::string::ToString + Send { } -unsafe impl Trait for ::std::string::ToString + Sync { } -unsafe impl Trait for ::std::string::ToString + Send + Sync { } - diff --git a/third_party/rust/traitobject/src/lib.rs b/third_party/rust/traitobject/src/lib.rs deleted file mode 100644 index 5a7dac804d04..000000000000 --- a/third_party/rust/traitobject/src/lib.rs +++ /dev/null @@ -1,36 +0,0 @@ -#![cfg_attr(test, deny(warnings))] -#![deny(missing_docs)] - -//! # traitobject -//! -//! Unsafe helpers for working with raw TraitObjects. - -use std::mem; - -/// Get the data pointer from this trait object. -/// -/// Highly unsafe, as there is no information about the type of the data. -pub unsafe fn data(val: *const T) -> *const () { - *mem::transmute::<*const *const T, *const *const ()>(&val) -} - -/// Get the data pointer from this trait object, mutably. -/// -/// Highly unsafe, as there is no information about the type of the data. -pub unsafe fn data_mut(mut val: *mut T) -> *mut () { - *mem::transmute::<*mut *mut T, *mut *mut ()>(&mut val) -} - -#[test] -fn test_simple() { - let x = &7 as &Send; - unsafe { assert!(&7 == mem::transmute::<_, &i32>(data(x))) }; -} - -/// A trait implemented for all trait objects. -/// -/// Implementations for all traits in std are provided. -pub unsafe trait Trait {} - -mod impls; - diff --git a/third_party/rust/try-lock/.cargo-checksum.json b/third_party/rust/try-lock/.cargo-checksum.json new file mode 100644 index 000000000000..c25ba91a23ef --- /dev/null +++ b/third_party/rust/try-lock/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"1f53870987474bcf5f692dc5affe4035d30e3aa7e281b8e30973b3bb80d9900e","LICENSE":"69127cd697ac8e4da8d4a206ae067bbeb2aef41618c4be521509772110c4f202","README.md":"976bf116dd8794213bd242a61f819811a5c5a0fd4372acdbd21308863c05cfb6","src/lib.rs":"182411436b16dab70e9f416be8d8ffc3bc5e663de0923d7463f1ec0963ecbe80"},"package":"e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"} \ No newline at end of file diff --git a/third_party/rust/version_check/Cargo.toml b/third_party/rust/try-lock/Cargo.toml similarity index 63% rename from third_party/rust/version_check/Cargo.toml rename to third_party/rust/try-lock/Cargo.toml index 2afb45b7d809..5a2039b4d24c 100644 --- a/third_party/rust/version_check/Cargo.toml +++ b/third_party/rust/try-lock/Cargo.toml @@ -11,14 +11,16 @@ # will likely look very different (and much more reasonable) [package] -name = "version_check" -version = "0.1.3" -authors = ["Sergio Benitez "] -description = "Tiny crate to check the version of the installed/running rustc." -documentation = "https://docs.rs/version_check/" +name = "try-lock" +version = "0.2.2" +authors = ["Sean McArthur "] +description = "A lightweight atomic lock." +homepage = "https://github.com/seanmonstar/try-lock" +documentation = "https://docs.rs/try-lock" readme = "README.md" -keywords = ["version", "rustc", "minimum", "check"] +keywords = ["lock", "atomic"] +categories = ["concurrency"] license = "MIT" -repository = "https://github.com/SergioBenitez/version_check" +repository = "https://github.com/seanmonstar/try-lock" [dependencies] diff --git a/third_party/rust/unicase/LICENSE b/third_party/rust/try-lock/LICENSE similarity index 93% rename from third_party/rust/unicase/LICENSE rename to third_party/rust/try-lock/LICENSE index 44d199432578..5cddb267afe8 100644 --- a/third_party/rust/unicase/LICENSE +++ b/third_party/rust/try-lock/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2014-2015 Sean McArthur +Copyright (c) 2018 Sean McArthur +Copyright (c) 2016 Alex Crichton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/third_party/rust/try-lock/README.md b/third_party/rust/try-lock/README.md new file mode 100644 index 000000000000..abbb0eac47aa --- /dev/null +++ b/third_party/rust/try-lock/README.md @@ -0,0 +1,44 @@ +# TryLock + +- [Crates.io](https://crates.io/crates/try-lock) +- [Docs](https://docs.rs/try-lock) + +A light-weight lock guarded by an atomic boolean. + +Most efficient when contention is low, acquiring the lock is a single atomic swap, and releasing it just 1 more atomic swap. + +## Example + +```rust +use std::sync::Arc; +use try_lock::TryLock; + +// a thing we want to share +struct Widget { + name: String, +} + +// lock it up! +let widget1 = Arc::new(TryLock::new(Widget { + name: "Spanner".into(), +})); + +let widget2 = widget1.clone(); + + +// mutate the widget +let mut locked = widget1.try_lock().expect("example isn't locked yet"); +locked.name.push_str(" Bundle"); + +// hands off, buddy +let not_locked = widget2.try_lock(); +assert!(not_locked.is_none(), "widget1 has the lock"); + +// ok, you can have it +drop(locked); + +let locked2 = widget2.try_lock().expect("widget1 lock is released"); + +assert_eq!(locked2.name, "Spanner Bundle"); +``` + diff --git a/third_party/rust/try-lock/src/lib.rs b/third_party/rust/try-lock/src/lib.rs new file mode 100644 index 000000000000..7c0feec676b9 --- /dev/null +++ b/third_party/rust/try-lock/src/lib.rs @@ -0,0 +1,204 @@ +#![doc(html_root_url = "https://docs.rs/try-lock/0.2.2")] +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![deny(warnings)] + +//! A light-weight lock guarded by an atomic boolean. +//! +//! Most efficient when contention is low, acquiring the lock is a single +//! atomic swap, and releasing it just 1 more atomic swap. +//! +//! # Example +//! +//! ``` +//! use std::sync::Arc; +//! use try_lock::TryLock; +//! +//! // a thing we want to share +//! struct Widget { +//! name: String, +//! } +//! +//! // lock it up! +//! let widget1 = Arc::new(TryLock::new(Widget { +//! name: "Spanner".into(), +//! })); +//! +//! let widget2 = widget1.clone(); +//! +//! +//! // mutate the widget +//! let mut locked = widget1.try_lock().expect("example isn't locked yet"); +//! locked.name.push_str(" Bundle"); +//! +//! // hands off, buddy +//! let not_locked = widget2.try_lock(); +//! assert!(not_locked.is_none(), "widget1 has the lock"); +//! +//! // ok, you can have it +//! drop(locked); +//! +//! let locked2 = widget2.try_lock().expect("widget1 lock is released"); +//! +//! assert_eq!(locked2.name, "Spanner Bundle"); +//! ``` + +use std::cell::UnsafeCell; +use std::fmt; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// A light-weight lock guarded by an atomic boolean. +/// +/// Most efficient when contention is low, acquiring the lock is a single +/// atomic swap, and releasing it just 1 more atomic swap. +/// +/// It is only possible to try to acquire the lock, it is not possible to +/// wait for the lock to become ready, like with a `Mutex`. +#[derive(Default)] +pub struct TryLock { + is_locked: AtomicBool, + value: UnsafeCell, +} + +impl TryLock { + /// Create a `TryLock` around the value. + #[inline] + pub fn new(val: T) -> TryLock { + TryLock { + is_locked: AtomicBool::new(false), + value: UnsafeCell::new(val), + } + } + + /// Try to acquire the lock of this value. + /// + /// If the lock is already acquired by someone else, this returns + /// `None`. You can try to acquire again whenever you want, perhaps + /// by spinning a few times, or by using some other means of + /// notification. + /// + /// # Note + /// + /// The default memory ordering is to use `Acquire` to lock, and `Release` + /// to unlock. If different ordering is required, use + /// [`try_lock_order`](TryLock::try_lock_order). + #[inline] + pub fn try_lock(&self) -> Option> { + self.try_lock_order(Ordering::Acquire, Ordering::Release) + } + + /// Try to acquire the lock of this value using the lock and unlock orderings. + /// + /// If the lock is already acquired by someone else, this returns + /// `None`. You can try to acquire again whenever you want, perhaps + /// by spinning a few times, or by using some other means of + /// notification. + #[inline] + pub fn try_lock_order(&self, lock_order: Ordering, unlock_order: Ordering) -> Option> { + if !self.is_locked.swap(true, lock_order) { + Some(Locked { + lock: self, + order: unlock_order, + }) + } else { + None + } + } + + /// Take the value back out of the lock when this is the sole owner. + #[inline] + pub fn into_inner(self) -> T { + debug_assert!(!self.is_locked.load(Ordering::Relaxed), "TryLock was mem::forgotten"); + // Since the compiler can statically determine this is the only owner, + // it's safe to take the value out. In fact, in newer versions of Rust, + // `UnsafeCell::into_inner` has been marked safe. + // + // To support older version (1.21), the unsafe block is still here. + #[allow(unused_unsafe)] + unsafe { + self.value.into_inner() + } + } +} + +unsafe impl Send for TryLock {} +unsafe impl Sync for TryLock {} + +impl fmt::Debug for TryLock { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + + // Used if the TryLock cannot acquire the lock. + struct LockedPlaceholder; + + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("") + } + } + + let mut builder = f.debug_struct("TryLock"); + if let Some(locked) = self.try_lock() { + builder.field("value", &*locked); + } else { + builder.field("value", &LockedPlaceholder); + } + builder.finish() + } +} + +/// A locked value acquired from a `TryLock`. +/// +/// The type represents an exclusive view at the underlying value. The lock is +/// released when this type is dropped. +/// +/// This type derefs to the underlying value. +#[must_use = "TryLock will immediately unlock if not used"] +pub struct Locked<'a, T: 'a> { + lock: &'a TryLock, + order: Ordering, +} + +impl<'a, T> Deref for Locked<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + unsafe { &*self.lock.value.get() } + } +} + +impl<'a, T> DerefMut for Locked<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.value.get() } + } +} + +impl<'a, T> Drop for Locked<'a, T> { + #[inline] + fn drop(&mut self) { + self.lock.is_locked.store(false, self.order); + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for Locked<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[cfg(test)] +mod tests { + use super::TryLock; + + #[test] + fn fmt_debug() { + let lock = TryLock::new(5); + assert_eq!(format!("{:?}", lock), "TryLock { value: 5 }"); + + let locked = lock.try_lock().unwrap(); + assert_eq!(format!("{:?}", locked), "5"); + + assert_eq!(format!("{:?}", lock), "TryLock { value: }"); + } +} diff --git a/third_party/rust/typeable/.cargo-checksum.json b/third_party/rust/typeable/.cargo-checksum.json deleted file mode 100644 index 2928f0d3299b..000000000000 --- a/third_party/rust/typeable/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"0d3e748c3b656d829a287f26360f7b860e0a579409828b4bdbe7ef843a79bb97","Cargo.toml":"76145a8c8efe636dc4b0a2939edf10798afe2529bbd1c95faab09330d2a3a6d9","src/lib.rs":"a3dc3caf75480c093cf4949b8287fef27a5e8f29ce20f7b7bea8f107b218c650"},"package":"1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887"} \ No newline at end of file diff --git a/third_party/rust/typeable/.travis.yml b/third_party/rust/typeable/.travis.yml deleted file mode 100644 index 69c22cbeaf76..000000000000 --- a/third_party/rust/typeable/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: c -env: - - LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib/rustlib/x86_64-unknown-linux-gnu/lib/ -install: - - curl https://static.rust-lang.org/rustup.sh | sudo bash -script: - - cargo build -v - - cargo test -v - - cargo doc -v -os: - - linux diff --git a/third_party/rust/typeable/Cargo.toml b/third_party/rust/typeable/Cargo.toml deleted file mode 100644 index 08ce135bbc07..000000000000 --- a/third_party/rust/typeable/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] - -name = "typeable" -version = "0.1.2" -authors = ["Jonathan Reem "] -description = "Exposes Typeable, for getting TypeIds at runtime." -keywords = ["type", "reflect", "typeid", "any", "downcast"] -repository = "https://github.com/reem/rust-typeable" -license = "MIT" - diff --git a/third_party/rust/typeable/src/lib.rs b/third_party/rust/typeable/src/lib.rs deleted file mode 100644 index f2ba33473d90..000000000000 --- a/third_party/rust/typeable/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -#![deny(missing_docs)] -#![deny(warnings)] - -//! Exposes `Typeable`, which exposes the `get_type` method, which gives -//! the `TypeId` of any 'static type. - -use std::any::{Any, TypeId}; - -/// Universal mixin trait for adding a `get_type` method. -/// -pub trait Typeable: Any { - /// Get the `TypeId` of this object. - #[inline(always)] - fn get_type(&self) -> TypeId { TypeId::of::() } -} - -impl Typeable for T {} - diff --git a/third_party/rust/unicase/.cargo-checksum.json b/third_party/rust/unicase/.cargo-checksum.json deleted file mode 100644 index b505bf630a3a..000000000000 --- a/third_party/rust/unicase/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"2b8c34447a7f2e36ed670780610caaf95d2b6450406762a28e866d10d412b3a2","Cargo.toml":"c9e02b663403a7bbc6a72da2db4dd19fc107f003c70fc908309168cd01205abc","LICENSE":"a745ca7ae4a3c089cfbe4b0a6288fa95776244a65be2914b147878146f8c64da","README.md":"4c7e3e81e09029b9c8465241a6b673ba5f723ea279464fd0d44c3544ed0867c5","build.rs":"2c7412ba90a1181cdfaf5dbbe04076054cc565fd753a2c83e59c2f34208abf6c","src/lib.rs":"fd09b88d0aa3a0e8f51e060267d031678a86cb6496afaa5f33224c3d2e2deaf3"},"package":"7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"} \ No newline at end of file diff --git a/third_party/rust/unicase/.travis.yml b/third_party/rust/unicase/.travis.yml deleted file mode 100644 index b42e8bbb74d0..000000000000 --- a/third_party/rust/unicase/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -language: rust - -sudo: false - -matrix: - fast_finish: true - include: - - rust: nightly - env: FEATURES="--features nightly" - - rust: beta - - rust: stable - - rust: 1.3.0 - -cache: - apt: true - directories: - - target/debug/deps - - target/debug/build - -after_success: | - [ $TRAVIS_BRANCH = master ] && - [ $TRAVIS_PULL_REQUEST = false ] && - cargo doc && - echo '' > target/doc/index.html && - sudo pip install ghp-import && - ghp-import -n target/doc && - git push -fq https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages - -env: - global: - - secure: rkPwcgjGQobsH3/LAV3Ecyt5Q6BiqWhAITLUUCX/lrBjXDaUma+RUjnh7bEzCQ5sG1hFLCJUlLZbWuNx+95t7yRgP14T3asp/a4PdtPkwL8okqq7RLAt7tjI6optUZ8Kr7HNp3jQDHda4tpSLDaZvM3zmrHOLwsDpYLRNAbhWM8= diff --git a/third_party/rust/unicase/README.md b/third_party/rust/unicase/README.md deleted file mode 100644 index f77322522e41..000000000000 --- a/third_party/rust/unicase/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# unicase - -[![Build Status](https://travis-ci.org/seanmonstar/unicase.svg?branch=master)](https://travis-ci.org/seanmonstar/unicase) - -Compare strings when case is not important. - -```rust -if UniCase(method) == UniCase('GET') { - // GET request -} -``` - -## License - -[MIT](./LICENSE) diff --git a/third_party/rust/unicase/build.rs b/third_party/rust/unicase/build.rs deleted file mode 100644 index 4483ec8fbff6..000000000000 --- a/third_party/rust/unicase/build.rs +++ /dev/null @@ -1,10 +0,0 @@ -extern crate version_check as rustc; - -fn main() { - if rustc::is_min_version("1.5.0").map(|(is_min, _)| is_min).unwrap_or(true) { - println!("cargo:rustc-cfg=__unicase__iter_cmp"); - } - if rustc::is_min_version("1.13.0").map(|(is_min, _)| is_min).unwrap_or(true) { - println!("cargo:rustc-cfg=__unicase__default_hasher"); - } -} diff --git a/third_party/rust/unicase/src/lib.rs b/third_party/rust/unicase/src/lib.rs deleted file mode 100644 index 43a709cff991..000000000000 --- a/third_party/rust/unicase/src/lib.rs +++ /dev/null @@ -1,214 +0,0 @@ -#![cfg_attr(test, deny(missing_docs))] -#![cfg_attr(test, deny(warnings))] -#![cfg_attr(feature = "heap_size", feature(custom_derive, plugin))] -#![cfg_attr(feature = "heap_size", plugin(heapsize_plugin))] - -//! # Case -//! -//! Case provices a way of specifying strings that are case-insensitive. -//! -//! ## Example -//! -//! ```rust -//! use unicase::UniCase; -//! -//! let a = UniCase("foobar"); -//! let b = UniCase("FoObAr"); -//! -//! assert_eq!(a, b); -//! ``` - -#[cfg(feature = "heap_size")] -extern crate heapsize; - -use std::ascii::AsciiExt; -#[cfg(__unicase__iter_cmp)] -use std::cmp::Ordering; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; -use std::str::FromStr; - -/// Case Insensitive wrapper of strings. -#[derive(Copy, Clone, Debug)] -#[cfg_attr(feature = "heap_size", derive(HeapSizeOf))] -pub struct UniCase(pub S); - -impl Deref for UniCase { - type Target = S; - #[inline] - fn deref<'a>(&'a self) -> &'a S { - &self.0 - } -} - -impl DerefMut for UniCase { - #[inline] - fn deref_mut<'a>(&'a mut self) -> &'a mut S { - &mut self.0 - } -} - -#[cfg(__unicase__iter_cmp)] -impl> PartialOrd for UniCase { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -#[cfg(__unicase__iter_cmp)] -impl> Ord for UniCase { - fn cmp(&self, other: &Self) -> Ordering { - let self_chars = self.as_ref().chars().map(|c| c.to_ascii_lowercase()); - let other_chars = other.as_ref().chars().map(|c| c.to_ascii_lowercase()); - self_chars.cmp(other_chars) - } -} - -impl> AsRef for UniCase { - #[inline] - fn as_ref(&self) -> &str { - self.0.as_ref() - } - -} - -impl fmt::Display for UniCase { - #[inline] - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, fmt) - } -} - -impl, S2: AsRef> PartialEq for UniCase { - #[inline] - fn eq(&self, other: &S2) -> bool { - self.as_ref().eq_ignore_ascii_case(other.as_ref()) - } -} - - -impl> Eq for UniCase {} - -impl FromStr for UniCase { - type Err = ::Err; - fn from_str(s: &str) -> Result, ::Err> { - s.parse().map(UniCase) - } -} - -impl> Hash for UniCase { - #[inline] - fn hash(&self, hasher: &mut H) { - for byte in self.as_ref().bytes().map(|b| b.to_ascii_lowercase()) { - hasher.write(&[byte]); - } - } -} - -macro_rules! from_impl { - ($from:ty => $to:ty; $by:ident) => ( - impl<'a> From<$from> for UniCase<$to> { - fn from(s: $from) -> Self { - UniCase(s.$by()) - } - } - ); - ($from:ty => $to:ty) => ( from_impl!($from => $to; into); ) -} - -macro_rules! into_impl { - ($to:ty) => ( - impl<'a> Into<$to> for UniCase<$to> { - fn into(self) -> $to { - self.0 - } - } - ); -} - -from_impl!(&'a str => &'a str); -from_impl!(&'a str => String); -from_impl!(&'a String => &'a str; as_ref); -from_impl!(String => String); - -into_impl!(&'a str); -into_impl!(String); - -#[cfg(test)] -mod test { - use super::UniCase; - use std::hash::{Hash, Hasher}; - #[cfg(not(__unicase__default_hasher))] - use std::hash::SipHasher as DefaultHasher; - #[cfg(__unicase__default_hasher)] - use std::collections::hash_map::DefaultHasher; - - fn hash(t: &T) -> u64 { - let mut s = DefaultHasher::new(); - t.hash(&mut s); - s.finish() - } - - #[test] - fn test_copy_for_refs() { - fn foo(_: UniCase) {} - - let a = UniCase("foobar"); - foo(a); - foo(a); - } - - #[test] - fn test_case_insensitive() { - let a = UniCase("foobar"); - let b = UniCase("FOOBAR"); - - assert_eq!(a, b); - assert_eq!(hash(&a), hash(&b)); - } - - #[test] - fn test_different_string_types() { - let a = UniCase("foobar"); - let b = "FOOBAR".to_owned(); - assert_eq!(a, b); - assert_eq!(UniCase(b), a); - } - - #[cfg(__unicase__iter_cmp)] - #[test] - fn test_case_cmp() { - assert!(UniCase("foobar") == UniCase("FOOBAR")); - assert!(UniCase("a") < UniCase("B")); - - assert!(UniCase("A") < UniCase("b")); - assert!(UniCase("aa") > UniCase("a")); - - assert!(UniCase("a") < UniCase("aa")); - assert!(UniCase("a") < UniCase("AA")); - } - - #[test] - fn test_from_impls() { - let view: &'static str = "foobar"; - let _: UniCase<&'static str> = view.into(); - let _: UniCase<&str> = view.into(); - let _: UniCase = view.into(); - - let owned: String = view.to_owned(); - let _: UniCase<&str> = (&owned).into(); - let _: UniCase = owned.into(); - } - - #[test] - fn test_into_impls() { - let view: UniCase<&'static str> = UniCase("foobar"); - let _: &'static str = view.into(); - let _: &str = view.into(); - - let owned: UniCase = "foobar".into(); - let _: String = owned.clone().into(); - let _: &str = owned.as_ref(); - } -} diff --git a/third_party/rust/version_check/.cargo-checksum.json b/third_party/rust/version_check/.cargo-checksum.json deleted file mode 100644 index 6d4eff05674f..000000000000 --- a/third_party/rust/version_check/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"dad6a4a872650afdecb540fa006f1a62527578ae2c7460e41ea6fd553dd317d4","LICENSE":"ce1f970284c4ab06e90671a984086692dc44da3e2b8ee86444d79b6f77bcf9e7","README.md":"27659fafbe25cb1a91dcfe30c6fd8c001ef9faea08b241f644b0b701e5ff3835","src/lib.rs":"8d73765fea27c9171502c57707801b4afd3fd355cad087a84f523677d2444978"},"package":"6b772017e347561807c1aa192438c5fd74242a670a6cffacc40f2defd1dc069d"} \ No newline at end of file diff --git a/third_party/rust/version_check/LICENSE b/third_party/rust/version_check/LICENSE deleted file mode 100644 index b551a84c8f53..000000000000 --- a/third_party/rust/version_check/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2017 Sergio Benitez - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/version_check/README.md b/third_party/rust/version_check/README.md deleted file mode 100644 index 161d8c74d080..000000000000 --- a/third_party/rust/version_check/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# version\_check - -This tiny crate checks that the running or installed `rustc` meets some version -requirements. The version is queried by calling the Rust compiler with -`--version`. The path to the compiler is determined first via the `RUSTC` -environment variable. If it is not set, then `rustc` is used. If that fails, no -determination is made, and calls return `None`. - -## Usage - -Add to your `Cargo.toml` file: - -```toml -[dependencies] -version_check = "0.1" -``` - -## Examples - -Check that the running compiler is a nightly release: - -```rust -extern crate version_check; - -match version_check::is_nightly() { - Some(true) => "running a nightly", - Some(false) => "not nightly", - None => "couldn't figure it out" -}; -``` - -Check that the running compiler is at least version `1.13.0`: - -```rust -extern crate version_check; - -match version_check::is_min_version("1.13.0") { - Some((true, version)) => format!("Yes! It's: {}", version), - Some((false, version)) => format!("No! {} is too old!", version), - None => "couldn't figure it out".into() -}; -``` - -Check that the running compiler was released on or after `2016-12-18`: - -```rust -extern crate version_check; - -match version_check::is_min_date("2016-12-18") { - Some((true, date)) => format!("Yes! It's: {}", date), - Some((false, date)) => format!("No! {} is too long ago!", date), - None => "couldn't figure it out".into() -}; -``` - -## Alternatives - -This crate is dead simple with no dependencies. If you need something more and -don't care about panicking if the version cannot be obtained or adding -dependencies, see [rustc_version](https://crates.io/crates/rustc_version). - -## License - -MIT. Have fun! diff --git a/third_party/rust/version_check/src/lib.rs b/third_party/rust/version_check/src/lib.rs deleted file mode 100644 index 439b57bac9ae..000000000000 --- a/third_party/rust/version_check/src/lib.rs +++ /dev/null @@ -1,246 +0,0 @@ -//! This tiny crate checks that the running or installed `rustc` meets some -//! version requirements. The version is queried by calling the Rust compiler -//! with `--version`. The path to the compiler is determined first via the -//! `RUSTC` environment variable. If it is not set, then `rustc` is used. If -//! that fails, no determination is made, and calls return `None`. -//! -//! # Example -//! -//! Check that the running compiler is a nightly release: -//! -//! ```rust -//! extern crate version_check; -//! -//! match version_check::is_nightly() { -//! Some(true) => "running a nightly", -//! Some(false) => "not nightly", -//! None => "couldn't figure it out" -//! }; -//! ``` -//! -//! Check that the running compiler is at least version `1.13.0`: -//! -//! ```rust -//! extern crate version_check; -//! -//! match version_check::is_min_version("1.13.0") { -//! Some((true, version)) => format!("Yes! It's: {}", version), -//! Some((false, version)) => format!("No! {} is too old!", version), -//! None => "couldn't figure it out".into() -//! }; -//! ``` -//! -//! Check that the running compiler was released on or after `2016-12-18`: -//! -//! ```rust -//! extern crate version_check; -//! -//! match version_check::is_min_date("2016-12-18") { -//! Some((true, date)) => format!("Yes! It's: {}", date), -//! Some((false, date)) => format!("No! {} is too long ago!", date), -//! None => "couldn't figure it out".into() -//! }; -//! ``` -//! -//! # Alternatives -//! -//! This crate is dead simple with no dependencies. If you need something more -//! and don't care about panicking if the version cannot be obtained or adding -//! dependencies, see [rustc_version](https://crates.io/crates/rustc_version). - -use std::env; -use std::process::Command; - -// Convert a string of %Y-%m-%d to a single u32 maintaining ordering. -fn str_to_ymd(ymd: &str) -> Option { - let ymd: Vec = ymd.split("-").filter_map(|s| s.parse::().ok()).collect(); - if ymd.len() != 3 { - return None - } - - let (y, m, d) = (ymd[0], ymd[1], ymd[2]); - Some((y << 9) | (m << 5) | d) -} - -// Convert a string with prefix major-minor-patch to a single u64 maintaining -// ordering. Assumes none of the components are > 1048576. -fn str_to_mmp(mmp: &str) -> Option { - let mut mmp: Vec = mmp.split('-') - .nth(0) - .unwrap_or("") - .split('.') - .filter_map(|s| s.parse::().ok()) - .collect(); - - if mmp.is_empty() { - return None - } - - while mmp.len() < 3 { - mmp.push(0); - } - - let (maj, min, patch) = (mmp[0] as u64, mmp[1] as u64, mmp[2] as u64); - Some((maj << 32) | (min << 16) | patch) -} - -/// Returns (version, date) as available. -fn version_and_date_from_rustc_version(s: &str) -> (Option, Option) { - let mut components = s.split(" "); - let version = components.nth(1); - let date = components.nth(1).map(|s| s.trim_right().trim_right_matches(")")); - (version.map(|s| s.to_string()), date.map(|s| s.to_string())) -} - -/// Returns (version, date) as available. -fn get_version_and_date() -> Option<(Option, Option)> { - env::var("RUSTC").ok() - .and_then(|rustc| Command::new(rustc).arg("--version").output().ok()) - .or_else(|| Command::new("rustc").arg("--version").output().ok()) - .and_then(|output| String::from_utf8(output.stdout).ok()) - .map(|s| version_and_date_from_rustc_version(&s)) -} - -/// Checks that the running or installed `rustc` was released no earlier than -/// some date. -/// -/// The format of `min_date` must be YYYY-MM-DD. For instance: `2016-12-20` or -/// `2017-01-09`. -/// -/// If the date cannot be retrieved or parsed, or if `min_date` could not be -/// parsed, returns `None`. Otherwise returns a tuple where the first value is -/// `true` if the installed `rustc` is at least from `min_data` and the second -/// value is the date (in YYYY-MM-DD) of the installed `rustc`. -pub fn is_min_date(min_date: &str) -> Option<(bool, String)> { - if let Some((_, Some(actual_date_str))) = get_version_and_date() { - str_to_ymd(&actual_date_str) - .and_then(|actual| str_to_ymd(min_date).map(|min| (min, actual))) - .map(|(min, actual)| (actual >= min, actual_date_str)) - } else { - None - } -} - -/// Checks that the running or installed `rustc` is at least some minimum -/// version. -/// -/// The format of `min_version` is a semantic version: `1.3.0`, `1.15.0-beta`, -/// `1.14.0`, `1.16.0-nightly`, etc. -/// -/// If the version cannot be retrieved or parsed, or if `min_version` could not -/// be parsed, returns `None`. Otherwise returns a tuple where the first value -/// is `true` if the installed `rustc` is at least `min_version` and the second -/// value is the version (semantic) of the installed `rustc`. -pub fn is_min_version(min_version: &str) -> Option<(bool, String)> { - if let Some((Some(actual_version_str), _)) = get_version_and_date() { - str_to_mmp(&actual_version_str) - .and_then(|actual| str_to_mmp(min_version).map(|min| (min, actual))) - .map(|(min, actual)| (actual >= min, actual_version_str)) - } else { - None - } -} - -fn version_channel_is(channel: &str) -> Option { - get_version_and_date() - .and_then(|(version_str_opt, _)| version_str_opt) - .map(|version_str| version_str.contains(channel)) -} - -/// Determines whether the running or installed `rustc` is on the nightly -/// channel. -/// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a nightly release, and `Some(false)` -/// otherwise. -pub fn is_nightly() -> Option { - version_channel_is("nightly") -} - -/// Determines whether the running or installed `rustc` is on the beta channel. -/// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a beta release, and `Some(false)` -/// otherwise. -pub fn is_beta() -> Option { - version_channel_is("beta") -} - -/// Determines whether the running or installed `rustc` is on the dev channel. -/// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a dev release, and `Some(false)` -/// otherwise. -pub fn is_dev() -> Option { - version_channel_is("dev") -} - -/// Determines whether the running or installed `rustc` supports feature flags. -/// In other words, if the channel is either "nightly" or "dev". -/// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version supports features, and `Some(false)` -/// otherwise. -pub fn supports_features() -> Option { - match is_nightly() { - b@Some(true) => b, - _ => is_dev() - } -} - -#[cfg(test)] -mod tests { - use super::version_and_date_from_rustc_version; - use super::str_to_mmp; - - macro_rules! check_mmp { - ($string:expr => ($x:expr, $y:expr, $z:expr)) => ( - if let Some(mmp) = str_to_mmp($string) { - let expected = $x << 32 | $y << 16 | $z; - if mmp != expected { - panic!("{} didn't parse as {}.{}.{}.", $string, $x, $y, $z); - } - } else { - panic!("{} didn't parse for mmp testing.", $string); - } - ) - } - - macro_rules! check_version { - ($s:expr => ($x:expr, $y:expr, $z:expr)) => ( - if let (Some(version_str), _) = version_and_date_from_rustc_version($s) { - check_mmp!(&version_str => ($x, $y, $z)); - } else { - panic!("{} didn't parse for version testing.", $s); - } - ) - } - - #[test] - fn test_str_to_mmp() { - check_mmp!("1.18.0" => (1, 18, 0)); - check_mmp!("1.19.0" => (1, 19, 0)); - check_mmp!("1.19.0-nightly" => (1, 19, 0)); - check_mmp!("1.12.2349" => (1, 12, 2349)); - check_mmp!("0.12" => (0, 12, 0)); - check_mmp!("1.12.5" => (1, 12, 5)); - check_mmp!("1.12" => (1, 12, 0)); - check_mmp!("1" => (1, 0, 0)); - } - - #[test] - fn test_version_parse() { - check_version!("rustc 1.18.0" => (1, 18, 0)); - check_version!("rustc 1.8.0" => (1, 8, 0)); - check_version!("rustc 1.20.0-nightly" => (1, 20, 0)); - check_version!("rustc 1.20" => (1, 20, 0)); - check_version!("rustc 1.3" => (1, 3, 0)); - check_version!("rustc 1" => (1, 0, 0)); - check_version!("rustc 1.2.5.6" => (1, 2, 5)); - check_version!("rustc 1.5.1-beta" => (1, 5, 1)); - check_version!("rustc 1.20.0-nightly (d84693b93 2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0 (d84693b93 2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0 (2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0-dev (2017-07-09)" => (1, 20, 0)); - } -} diff --git a/third_party/rust/want/.cargo-checksum.json b/third_party/rust/want/.cargo-checksum.json new file mode 100644 index 000000000000..73a3abd5e648 --- /dev/null +++ b/third_party/rust/want/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"01b02421032674e5352e0dd9625a0ff6920b9e4729eaf1478d3e39521ae039dd","LICENSE":"6dfa8def8b810ab8043bdacf09b4166b9c4651f93b4a19399ddc4c52055b1069","README.md":"7b86dab1b6a6fce64f67087020800d7b7223b6f94ea3d4eda76beac5fc8be6d8","src/lib.rs":"c3fbea2335d472872333c15699e3f3e7739d939c3a124309a19f83aef7e03547"},"package":"797464475f30ddb8830cc529aaaae648d581f99e2036a928877dfde027ddf6b3"} \ No newline at end of file diff --git a/third_party/rust/unicase/Cargo.toml b/third_party/rust/want/Cargo.toml similarity index 53% rename from third_party/rust/unicase/Cargo.toml rename to third_party/rust/want/Cargo.toml index ee82de2ccf74..adef99aa150c 100644 --- a/third_party/rust/unicase/Cargo.toml +++ b/third_party/rust/want/Cargo.toml @@ -11,23 +11,20 @@ # will likely look very different (and much more reasonable) [package] -name = "unicase" -version = "1.4.2" -authors = ["Sean McArthur "] -build = "build.rs" -description = "A case-insensitive wrapper around strings." -keywords = ["lowercase", "case", "case-insensitive"] +name = "want" +version = "0.0.6" +authors = ["Sean McArthur "] +description = "Detect when another Future wants a result." +homepage = "https://github.com/seanmonstar/want" +documentation = "https://docs.rs/want" +keywords = ["futures", "channel"] license = "MIT" -repository = "https://github.com/seanmonstar/unicase" -[dependencies.heapsize] -version = ">=0.2.0, <0.4" -optional = true - -[dependencies.heapsize_plugin] -version = "0.1.2" -optional = true -[build-dependencies.version_check] +repository = "https://github.com/seanmonstar/want" +[dependencies.futures] version = "0.1" -[features] -heap_size = ["heapsize", "heapsize_plugin"] +[dependencies.log] +version = "0.4" + +[dependencies.try-lock] +version = "0.2" diff --git a/third_party/rust/mime/LICENSE b/third_party/rust/want/LICENSE similarity index 96% rename from third_party/rust/mime/LICENSE rename to third_party/rust/want/LICENSE index 557b7e5fc97e..2c7783422144 100644 --- a/third_party/rust/mime/LICENSE +++ b/third_party/rust/want/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 Sean McArthur +Copyright (c) 2018 Sean McArthur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/third_party/rust/want/README.md b/third_party/rust/want/README.md new file mode 100644 index 000000000000..a8d169a2ceac --- /dev/null +++ b/third_party/rust/want/README.md @@ -0,0 +1,3 @@ +# Want + +A `Future`s channel-like utility to signal when a value is wanted. diff --git a/third_party/rust/want/src/lib.rs b/third_party/rust/want/src/lib.rs new file mode 100644 index 000000000000..fb6fe48da834 --- /dev/null +++ b/third_party/rust/want/src/lib.rs @@ -0,0 +1,501 @@ +#![doc(html_root_url = "https://docs.rs/want/0.0.6")] +#![deny(warnings)] +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] + +//! A Futures channel-like utility to signal when a value is wanted. +//! +//! Futures are supposed to be lazy, and only starting work if `Future::poll` +//! is called. The same is true of `Stream`s, but when using a channel as +//! a `Stream`, it can be hard to know if the receiver is ready for the next +//! value. +//! +//! Put another way, given a `(tx, rx)` from `futures::sync::mpsc::channel()`, +//! how can the sender (`tx`) know when the receiver (`rx`) actually wants more +//! work to be produced? Just because there is room in the channel buffer +//! doesn't mean the work would be used by the receiver. +//! +//! This is where something like `want` comes in. Added to a channel, you can +//! make sure that the `tx` only creates the message and sends it when the `rx` +//! has `poll()` for it, and the buffer was empty. + +extern crate futures; +#[macro_use] +extern crate log; +extern crate try_lock; + +use std::fmt; +use std::mem; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +// SeqCst is the only ordering used to ensure accessing the state and +// TryLock are never re-ordered. +use std::sync::atomic::Ordering::SeqCst; + +use futures::{Async, Poll}; +use futures::task::{self, Task}; + +use try_lock::TryLock; + +/// Create a new `want` channel. +pub fn new() -> (Giver, Taker) { + let inner = Arc::new(Inner { + state: AtomicUsize::new(State::Idle.into()), + task: TryLock::new(None), + }); + let inner2 = inner.clone(); + ( + Giver { + inner: inner, + }, + Taker { + inner: inner2, + }, + ) +} + +/// An entity that gives a value when wanted. +pub struct Giver { + inner: Arc, +} + +/// An entity that wants a value. +pub struct Taker { + inner: Arc, +} + +/// A cloneable `Giver`. +/// +/// It differs from `Giver` in that you cannot poll for `want`. It's only +/// usable as a cancellation watcher. +#[derive(Clone)] +pub struct SharedGiver { + inner: Arc, +} + +/// The `Taker` has canceled its interest in a value. +pub struct Closed { + _inner: (), +} + +#[derive(Clone, Copy, Debug)] +enum State { + Idle, + Want, + Give, + Closed, +} + +impl From for usize { + fn from(s: State) -> usize { + match s { + State::Idle => 0, + State::Want => 1, + State::Give => 2, + State::Closed => 3, + } + } +} + +impl From for State { + fn from(num: usize) -> State { + match num { + 0 => State::Idle, + 1 => State::Want, + 2 => State::Give, + 3 => State::Closed, + _ => unreachable!("unknown state: {}", num), + } + } +} + +struct Inner { + state: AtomicUsize, + task: TryLock>, +} + +// ===== impl Giver ====== + +impl Giver { + /// Poll whether the `Taker` has registered interest in another value. + /// + /// - If the `Taker` has called `want()`, this returns `Async::Ready(())`. + /// - If the `Taker` has not called `want()` since last poll, this + /// returns `Async::NotReady`, and parks the current task to be notified + /// when the `Taker` does call `want()`. + /// - If the `Taker` has canceled (or dropped), this returns `Closed`. + /// + /// After knowing that the Taker is wanting, the state can be reset by + /// calling [`give`](Giver::give). + pub fn poll_want(&mut self) -> Poll<(), Closed> { + loop { + let state = self.inner.state.load(SeqCst).into(); + match state { + State::Want => { + trace!("poll_want: taker wants!"); + return Ok(Async::Ready(())); + }, + State::Closed => { + trace!("poll_want: closed"); + return Err(Closed { _inner: () }); + }, + State::Idle | State::Give => { + // Taker doesn't want anything yet, so park. + if let Some(mut locked) = self.inner.task.try_lock_order(SeqCst, SeqCst) { + + // While we have the lock, try to set to GIVE. + let old = self.inner.state.compare_and_swap( + state.into(), + State::Give.into(), + SeqCst, + ); + // If it's still the first state (Idle or Give), park current task. + if old == state.into() { + let park = locked.as_ref() + .map(|t| !t.will_notify_current()) + .unwrap_or(true); + if park { + let old = mem::replace(&mut *locked, Some(task::current())); + drop(locked); + old.map(|prev_task| { + // there was an old task parked here. + // it might be waiting to be notified, + // so poke it before dropping. + prev_task.notify(); + }); + } + return Ok(Async::NotReady) + } + // Otherwise, something happened! Go around the loop again. + } else { + // if we couldn't take the lock, then a Taker has it. + // The *ONLY* reason is because it is in the process of notifying us + // of its want. + // + // We need to loop again to see what state it was changed to. + } + }, + } + } + } + + /// Mark the state as idle, if the Taker currently is wanting. + /// + /// Returns true if Taker was wanting, false otherwise. + #[inline] + pub fn give(&self) -> bool { + // only set to IDLE if it is still Want + self.inner.state.compare_and_swap( + State::Want.into(), + State::Idle.into(), + SeqCst, + ) == State::Want.into() + } + + /// Check if the `Taker` has called `want()` without parking a task. + /// + /// This is safe to call outside of a futures task context, but other + /// means of being notified is left to the user. + #[inline] + pub fn is_wanting(&self) -> bool { + self.inner.state.load(SeqCst) == State::Want.into() + } + + + /// Check if the `Taker` has canceled interest without parking a task. + #[inline] + pub fn is_canceled(&self) -> bool { + self.inner.state.load(SeqCst) == State::Closed.into() + } + + /// Converts this into a `SharedGiver`. + #[inline] + pub fn shared(self) -> SharedGiver { + SharedGiver { + inner: self.inner, + } + } +} + +impl fmt::Debug for Giver { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Giver") + .field("state", &self.inner.state()) + .finish() + } +} + +// ===== impl SharedGiver ====== + +impl SharedGiver { + /// Check if the `Taker` has called `want()` without parking a task. + /// + /// This is safe to call outside of a futures task context, but other + /// means of being notified is left to the user. + #[inline] + pub fn is_wanting(&self) -> bool { + self.inner.state.load(SeqCst) == State::Want.into() + } + + + /// Check if the `Taker` has canceled interest without parking a task. + #[inline] + pub fn is_canceled(&self) -> bool { + self.inner.state.load(SeqCst) == State::Closed.into() + } +} + +impl fmt::Debug for SharedGiver { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SharedGiver") + .field("state", &self.inner.state()) + .finish() + } +} + +// ===== impl Taker ====== + +impl Taker { + /// Signal to the `Giver` that the want is canceled. + /// + /// This is useful to tell that the channel is closed if you cannot + /// drop the value yet. + #[inline] + pub fn cancel(&mut self) { + trace!("signal: {:?}", State::Closed); + self.signal(State::Closed) + } + + /// Signal to the `Giver` that a value is wanted. + #[inline] + pub fn want(&mut self) { + debug_assert!( + self.inner.state.load(SeqCst) != State::Closed.into(), + "want called after cancel" + ); + trace!("signal: {:?}", State::Want); + self.signal(State::Want) + } + + #[inline] + fn signal(&mut self, state: State) { + let old_state = self.inner.state.swap(state.into(), SeqCst).into(); + match old_state { + State::Idle | State::Want | State::Closed => (), + State::Give => { + loop { + if let Some(mut locked) = self.inner.task.try_lock_order(SeqCst, SeqCst) { + if let Some(task) = locked.take() { + drop(locked); + trace!("signal found waiting giver, notifying"); + task.notify(); + } + return; + } else { + // if we couldn't take the lock, then a Giver has it. + // The *ONLY* reason is because it is in the process of parking. + // + // We need to loop and take the lock so we can notify this task. + } + } + }, + } + } +} + +impl Drop for Taker { + #[inline] + fn drop(&mut self) { + self.signal(State::Closed); + } +} + +impl fmt::Debug for Taker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Taker") + .field("state", &self.inner.state()) + .finish() + } +} + +// ===== impl Closed ====== + +impl fmt::Debug for Closed { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Closed") + .finish() + } +} + +// ===== impl Inner ====== + +impl Inner { + #[inline] + fn state(&self) -> State { + self.state.load(SeqCst).into() + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use futures::{Async, Stream}; + use futures::future::{poll_fn, Future}; + use futures::sync::{mpsc, oneshot}; + use super::*; + + #[test] + fn want_ready() { + let (mut gv, mut tk) = new(); + tk.want(); + assert!(gv.poll_want().unwrap().is_ready()); + } + + #[test] + fn want_notify_0() { + let (mut gv, mut tk) = new(); + let (tx, rx) = oneshot::channel(); + + thread::spawn(move || { + tk.want(); + // use a oneshot to keep this thread alive + // until other thread was notified of want + rx.wait().expect("rx"); + }); + + poll_fn(|| { + gv.poll_want() + }).wait().expect("wait"); + + assert!(gv.is_wanting(), "still wanting after poll_want success"); + assert!(gv.give(), "give is true when wanting"); + + assert!(!gv.is_wanting(), "no longer wanting after give"); + assert!(!gv.is_canceled(), "give doesn't cancel"); + + assert!(!gv.give(), "give is false if not wanting"); + + tx.send(()).expect("tx"); + } + + /// This tests that if the Giver moves tasks after parking, + /// it will still wake up the correct task. + #[test] + fn want_notify_moving_tasks() { + use std::sync::Arc; + use futures::executor::{spawn, Notify, NotifyHandle}; + + struct WantNotify; + + impl Notify for WantNotify { + fn notify(&self, _id: usize) { + } + } + + fn n() -> NotifyHandle { + Arc::new(WantNotify).into() + } + + let (mut gv, mut tk) = new(); + + let mut s = spawn(poll_fn(move || { + gv.poll_want() + })); + + // Register with t1 as the task::current() + let t1 = n(); + assert!(s.poll_future_notify(&t1, 1).unwrap().is_not_ready()); + + thread::spawn(move || { + thread::sleep(::std::time::Duration::from_millis(100)); + tk.want(); + }); + + // And now, move to a ThreadNotify task. + s.into_inner().wait().expect("poll_want"); + } + + #[test] + fn cancel() { + // explicit + let (mut gv, mut tk) = new(); + + assert!(!gv.is_canceled()); + + tk.cancel(); + + assert!(gv.is_canceled()); + assert!(gv.poll_want().is_err()); + + // implicit + let (mut gv, tk) = new(); + + assert!(!gv.is_canceled()); + + drop(tk); + + assert!(gv.is_canceled()); + assert!(gv.poll_want().is_err()); + + // notifies + let (mut gv, tk) = new(); + + thread::spawn(move || { + let _tk = tk; + // and dropped + }); + + poll_fn(move || { + gv.poll_want() + }).wait().expect_err("wait"); + } + + #[test] + fn stress() { + let nthreads = 5; + let nwants = 100; + + for _ in 0..nthreads { + let (mut gv, mut tk) = new(); + let (mut tx, mut rx) = mpsc::channel(0); + + // rx thread + thread::spawn(move || { + let mut cnt = 0; + poll_fn(move || { + while cnt < nwants { + let n = match rx.poll().expect("rx poll") { + Async::Ready(n) => n.expect("rx opt"), + Async::NotReady => { + tk.want(); + return Ok(Async::NotReady); + }, + }; + assert_eq!(cnt, n); + cnt += 1; + } + Ok::<_, ()>(Async::Ready(())) + }).wait().expect("rx wait"); + }); + + // tx thread + thread::spawn(move || { + let mut cnt = 0; + let nsent = poll_fn(move || { + loop { + while let Ok(()) = tx.try_send(cnt) { + cnt += 1; + } + match gv.poll_want() { + Ok(Async::Ready(_)) => (), + Ok(Async::NotReady) => return Ok::<_, ()>(Async::NotReady), + Err(_) => return Ok(Async::Ready(cnt)), + } + } + }).wait().expect("tx wait"); + + assert_eq!(nsent, nwants); + }).join().expect("thread join"); + } + } +} diff --git a/toolkit/components/search/nsSearchService.js b/toolkit/components/search/nsSearchService.js index 2c0982d11f7d..9d008ded66d1 100644 --- a/toolkit/components/search/nsSearchService.js +++ b/toolkit/components/search/nsSearchService.js @@ -779,18 +779,6 @@ function getLocalizedPref(aPrefName, aDefault) { return aDefault; } -/** - * Wrapper for nsIPrefBranch::getBoolPref. - * @param aPrefName - * The name of the pref to get. - * @returns aDefault if the requested pref doesn't exist. - */ -function getBoolPref(aName, aDefault) { - if (Services.prefs.getPrefType(aName) != Ci.nsIPrefBranch.PREF_BOOL) - return aDefault; - return Services.prefs.getBoolPref(aName); -} - /** * @return a sanitized name to be used as a filename, or a random name * if a sanitized name cannot be obtained (if aName contains @@ -1415,7 +1403,7 @@ Engine.prototype = { stringBundle.formatStringFromName("addEngineConfirmation", [this._name, this._uri.host], 2); var checkboxMessage = null; - if (!getBoolPref(BROWSER_SEARCH_PREF + "noCurrentEngine", false)) + if (!Services.prefs.getBoolPref(BROWSER_SEARCH_PREF + "noCurrentEngine", false)) checkboxMessage = stringBundle.GetStringFromName("addEngineAsCurrentText"); var addButtonLabel = @@ -3542,7 +3530,7 @@ SearchService.prototype = { // If the user has specified a custom engine order, read the order // information from the metadata instead of the default prefs. - if (getBoolPref(BROWSER_SEARCH_PREF + "useDBForOrder", false)) { + if (Services.prefs.getBoolPref(BROWSER_SEARCH_PREF + "useDBForOrder", false)) { LOG("_buildSortedEngineList: using db for order"); // Flag to keep track of whether or not we need to call _saveSortedEngineList. @@ -4468,7 +4456,7 @@ SearchService.prototype = { notify: function SRCH_SVC_notify(aTimer) { LOG("_notify: checking for updates"); - if (!getBoolPref(BROWSER_SEARCH_PREF + "update", true)) + if (!Services.prefs.getBoolPref(BROWSER_SEARCH_PREF + "update", true)) return; // Our timer has expired, but unfortunately, we can't get any data from it. @@ -4573,7 +4561,7 @@ const SEARCH_UPDATE_LOG_PREFIX = "*** Search update: "; * logging pref (browser.search.update.log) is set to true. */ function ULOG(aText) { - if (getBoolPref(BROWSER_SEARCH_PREF + "update.log", false)) { + if (Services.prefs.getBoolPref(BROWSER_SEARCH_PREF + "update.log", false)) { dump(SEARCH_UPDATE_LOG_PREFIX + aText + "\n"); Services.console.logStringMessage(aText); } @@ -4589,7 +4577,8 @@ var engineUpdateService = { update: function eus_Update(aEngine) { let engine = aEngine.wrappedJSObject; ULOG("update called for " + aEngine._name); - if (!getBoolPref(BROWSER_SEARCH_PREF + "update", true) || !engine._hasUpdates) + if (!Services.prefs.getBoolPref(BROWSER_SEARCH_PREF + "update", true) || + !engine._hasUpdates) return; let testEngine = null; diff --git a/toolkit/xre/nsAppRunner.cpp b/toolkit/xre/nsAppRunner.cpp index 55b1e74a6675..4ec90673ca76 100644 --- a/toolkit/xre/nsAppRunner.cpp +++ b/toolkit/xre/nsAppRunner.cpp @@ -4384,6 +4384,9 @@ void AddSandboxAnnotations() sandboxCapable = true; #elif defined(XP_LINUX) sandboxCapable = SandboxInfo::Get().CanSandboxContent(); +#elif defined(__OpenBSD__) + sandboxCapable = true; + StartOpenBSDSandbox(GeckoProcessType_Default); #endif CrashReporter::AnnotateCrashReport( diff --git a/widget/nsShmImage.cpp b/widget/nsShmImage.cpp index 906409f1b387..f2cba3f20b94 100644 --- a/widget/nsShmImage.cpp +++ b/widget/nsShmImage.cpp @@ -67,6 +67,12 @@ nsShmImage::CreateShmSegment() { size_t size = SharedMemory::PageAlignedSize(mStride * mSize.height); +#if defined(__OpenBSD__) && defined(MOZ_SANDBOX) + static mozilla::LazyLogModule sPledgeLog("SandboxPledge"); + MOZ_LOG(sPledgeLog, mozilla::LogLevel::Debug, + ("%s called when pledged, returning false\n", __func__)); + return false; +#endif mShmId = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600); if (mShmId == -1) { return false; diff --git a/xpcom/base/nsDebugImpl.cpp b/xpcom/base/nsDebugImpl.cpp index f3d9af29607e..07ab28da831e 100644 --- a/xpcom/base/nsDebugImpl.cpp +++ b/xpcom/base/nsDebugImpl.cpp @@ -180,6 +180,10 @@ nsDebugImpl::GetIsDebuggerAttached(bool* aResult) { *aResult = false; +#if defined(__OpenBSD__) && defined(MOZ_SANDBOX) + // no access to KERN_PROC_PID sysctl when pledge'd + return NS_OK; +#endif #if defined(XP_WIN) *aResult = ::IsDebuggerPresent(); #elif defined(XP_MACOSX) || defined(__DragonFly__) || defined(__FreeBSD__) \ diff --git a/xpcom/base/nsMemoryReporterManager.cpp b/xpcom/base/nsMemoryReporterManager.cpp index 69272d7b747f..c56933c2bc05 100644 --- a/xpcom/base/nsMemoryReporterManager.cpp +++ b/xpcom/base/nsMemoryReporterManager.cpp @@ -193,6 +193,12 @@ SystemHeapSize(int64_t* aSizeOut) static MOZ_MUST_USE nsresult GetKinfoProcSelf(KINFO_PROC* aProc) { +#if defined(__OpenBSD__) && defined(MOZ_SANDBOX) + static LazyLogModule sPledgeLog("SandboxPledge"); + MOZ_LOG(sPledgeLog, LogLevel::Debug, + ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__)); + return NS_ERROR_FAILURE; +#endif int mib[] = { CTL_KERN, KERN_PROC,