Bug 1879989: build(webgpu): update wgpu to 23392c5228ce81ae3a9772cf93da933be2d5961c r=webgpu-reviewers,supply-chain-reviewers,nical

Differential Revision: https://phabricator.services.mozilla.com/D201659
This commit is contained in:
Erich Gubler 2024-02-27 19:24:42 +00:00
parent aa9c51926b
commit bbae4b3687
95 changed files with 8368 additions and 1285 deletions

View file

@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"]
[source."git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c"]
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
replace-with = "vendored-sources"
[source."git+https://github.com/glandium/mio?rev=9a2ef335c366044ffe73b1c4acabe50a1daefe05"]

26
Cargo.lock generated
View file

@ -1150,7 +1150,7 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.19.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a#07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
source = "git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c#23392c5228ce81ae3a9772cf93da933be2d5961c"
dependencies = [
"bitflags 2.4.1",
"libloading",
@ -1422,6 +1422,15 @@ dependencies = [
"quick-error",
]
[[package]]
name = "document-features"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95"
dependencies = [
"litrs",
]
[[package]]
name = "dogear"
version = "0.5.0"
@ -3249,6 +3258,12 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da"
[[package]]
name = "litrs"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5"
[[package]]
name = "lmdb-rkv"
version = "0.14.0"
@ -3847,7 +3862,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.19.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a#07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
source = "git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c#23392c5228ce81ae3a9772cf93da933be2d5961c"
dependencies = [
"arrayvec",
"bit-set",
@ -6472,13 +6487,14 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.19.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a#07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
source = "git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c#23392c5228ce81ae3a9772cf93da933be2d5961c"
dependencies = [
"arrayvec",
"bit-vec",
"bitflags 2.4.1",
"cfg_aliases",
"codespan-reporting",
"document-features",
"indexmap 2.999.999",
"log",
"naga",
@ -6498,7 +6514,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.19.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a#07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
source = "git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c#23392c5228ce81ae3a9772cf93da933be2d5961c"
dependencies = [
"android_system_properties",
"arrayvec",
@ -6537,7 +6553,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.19.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a#07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
source = "git+https://github.com/gfx-rs/wgpu?rev=23392c5228ce81ae3a9772cf93da933be2d5961c#23392c5228ce81ae3a9772cf93da933be2d5961c"
dependencies = [
"bitflags 2.4.1",
"js-sys",

View file

@ -17,7 +17,7 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
# TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182
features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"]
@ -26,37 +26,37 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
features = ["metal"]
# We want the wgpu-core Direct3D backends on Windows.
[target.'cfg(windows)'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
features = ["dx12"]
# We want the wgpu-core Vulkan backend on Linux and Windows.
[target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
features = ["vulkan"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
features = ["windows_rs"]
[target.'cfg(windows)'.dependencies.d3d12]
git = "https://github.com/gfx-rs/wgpu"
rev = "07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
rev = "23392c5228ce81ae3a9772cf93da933be2d5961c"
[target.'cfg(windows)'.dependencies]
winapi = "0.3"

View file

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS"
release: commit 07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a
release: 23392c5228ce81ae3a9772cf93da933be2d5961c (2024-02-27T17:15:13Z).
# Revision to pull in
# Must be a long or short commit SHA (long preferred)
revision: 07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a
revision: 23392c5228ce81ae3a9772cf93da933be2d5961c
license: ['MIT', 'Apache-2.0']

View file

@ -499,7 +499,8 @@ pub unsafe extern "C" fn wgpu_server_buffer_map(
// the returned value of buffer_map_async.
let result = gfx_select!(buffer_id => global.buffer_map_async(
buffer_id,
start .. start + size,
start,
Some(size),
operation
));

View file

@ -1324,12 +1324,12 @@ delta = "0.5.0 -> 0.7.0"
who = [
"Erich Gubler <egubler@mozilla.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Nicolas Silva <nical@fastmail.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.7.0 -> 0.19.0@git:07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
delta = "0.7.0 -> 0.19.0@git:23392c5228ce81ae3a9772cf93da933be2d5961c"
importable = false
[[audits.darling]]
@ -1496,6 +1496,11 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
criteria = "safe-to-deploy"
delta = "0.2.3 -> 0.2.4"
[[audits.document-features]]
who = "Erich Gubler <erichdongubler@gmail.com>"
criteria = "safe-to-deploy"
version = "0.2.8"
[[audits.dogear]]
who = "Sammy Khamis <skhamis@mozilla.com>"
criteria = "safe-to-deploy"
@ -2397,6 +2402,11 @@ who = "Makoto Kato <m_kato@ga2.so-net.ne.jp>"
criteria = "safe-to-deploy"
delta = "0.7.0 -> 0.7.2"
[[audits.litrs]]
who = "Erich Gubler <erichdongubler@gmail.com>"
criteria = "safe-to-deploy"
version = "0.4.1"
[[audits.lmdb-rkv]]
who = "Bobby Holley <bobbyholley@gmail.com>"
criteria = "safe-to-deploy"
@ -2656,12 +2666,12 @@ delta = "0.13.0 -> 0.14.0"
[[audits.naga]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Nicolas Silva <nical@fastmail.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.14.0 -> 0.19.0@git:07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
delta = "0.14.0 -> 0.19.0@git:23392c5228ce81ae3a9772cf93da933be2d5961c"
importable = false
[[audits.net2]]
@ -4470,12 +4480,12 @@ delta = "0.17.0 -> 0.18.0"
[[audits.wgpu-core]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Nicolas Silva <nical@fastmail.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.18.0 -> 0.19.0@git:07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
delta = "0.18.0 -> 0.19.0@git:23392c5228ce81ae3a9772cf93da933be2d5961c"
importable = false
[[audits.wgpu-hal]]
@ -4524,12 +4534,12 @@ delta = "0.17.0 -> 0.18.0"
[[audits.wgpu-hal]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Nicolas Silva <nical@fastmail.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.18.0 -> 0.19.0@git:07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
delta = "0.18.0 -> 0.19.0@git:23392c5228ce81ae3a9772cf93da933be2d5961c"
importable = false
[[audits.wgpu-types]]
@ -4578,12 +4588,12 @@ delta = "0.17.0 -> 0.18.0"
[[audits.wgpu-types]]
who = [
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
"Erich Gubler <erichdongubler@gmail.com>",
"Jim Blandy <jimb@red-bean.com>",
"Nicolas Silva <nical@fastmail.com>",
"Erich Gubler <erichdongubler@gmail.com>",
]
criteria = "safe-to-deploy"
delta = "0.18.0 -> 0.19.0@git:07e59eb6fc7de3f682f1c401b9cf9f0da9ee4b4a"
delta = "0.18.0 -> 0.19.0@git:23392c5228ce81ae3a9772cf93da933be2d5961c"
importable = false
[[audits.whatsys]]

View file

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"c1ccf4587ca168b3baa54580469c5dcc776decac0d996d3bb31d2341b47efa11","Cargo.toml":"390d32c2b791a6745c075c474e6d57c65d5f77f0e7190ff8a8c5342fbb40722a","LICENSE-APACHE":"074e6e32c86a4c0ef8b3ed25b721ca23aca83df277cd88106ef7177c354615ff","LICENSE-MIT":"aa893340d14b9844625be6a50ac644169a01b52f0211cbf81b09e1874c8cd81d","README.md":"89a83c4acc6891e5651772fc78a1d6362070774eaa6c5b5d4bfbe9e57a957be9","lib.rs":"2f4ede9d0619d85449891d9055605188db681d57b405e40e529831266e014ee5","rustfmt.toml":"f74204a6f92aa7422a16ecb2ffe2d5bae0f123b778d08b5db1a398a3c9ca4306","tests/self-doc.rs":"24bbda93f3b323c0b7c543c1df3bf45522b8026283103211805f070de66abadc"},"package":"ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95"}

View file

@ -0,0 +1,44 @@
# Changelog
## 0.2.7 - 2023-12-29
* Remove `\n` between features (#17)
* Don't throw an error when there is no features in Cargo.toml (#20)
## 0.2.7 - 2022-12-21
* Fix parsing of Cargo.toml with multi-line array of array (#16)
## 0.2.6 - 2022-09-24
* Fix parsing of escaped string literal in the macro arguments
## 0.2.5 - 2022-09-17
* Allow customization of the output with the `feature_label=` parameter
## 0.2.4 - 2022-09-14
* Fix dependencies or features written with quotes
## 0.2.3 - 2022-08-15
* Fix parsing of table with `#` within strings (#10)
## 0.2.2 - 2022-07-25
* Fix parsing of dependencies or feature spanning multiple lines (#9)
## 0.2.1 - 2022-02-12
* Fix indentation of multi-lines feature comments (#5)
## 0.2.0 - 2022-02-11
* Added ability to document optional features. (This is a breaking change in the
sense that previously ignored comments may now result in errors)
## 0.1.0 - 2022-02-01
Initial release

View file

@ -0,0 +1,40 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "document-features"
version = "0.2.8"
authors = ["Slint Developers <info@slint-ui.com>"]
description = "Extract documentation for the feature flags from comments in Cargo.toml"
homepage = "https://slint-ui.com"
readme = "README.md"
keywords = [
"documentation",
"features",
"rustdoc",
"macro",
]
categories = ["development-tools"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/slint-ui/document-features"
[lib]
path = "lib.rs"
proc-macro = true
[dependencies.litrs]
version = "0.4.1"
default-features = false
[features]
default = []
self-test = []

View file

@ -0,0 +1,73 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,19 @@
Copyright (c) 2020 Olivier Goffart <ogoffart@sixtyfps.io>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,43 @@
# Document your crate's feature flags
[![Crates.io](https://img.shields.io/crates/v/document-features)](https://crates.io/crates/document-features)
[![Documentation](https://docs.rs/document-features/badge.svg)](https://docs.rs/document-features/)
This crate provides a macro that extracts documentation comments from Cargo.toml
To use this crate, add `#![doc = document_features::document_features!()]` in your crate documentation.
The `document_features!()` macro reads your `Cargo.toml` file, extracts feature comments and generates
a markdown string for your documentation.
Use `## ` and `#! ` comments in your Cargo.toml to document features, for example:
```toml
[dependencies]
document-features = "0.2"
## ...
[features]
## The foo feature enables the `foo` functions
foo = []
## The bar feature enables the [`bar`] module
bar = []
#! ### Experimental features
#! The following features are experimental
## Activate the fusion reactor
fusion = []
```
These comments keep the feature definition and documentation next to each other, and they are then
rendered into your crate documentation.
Check out the [documentation](https://docs.rs/document-features/) for more details.
## Contributions
Contributions are welcome. We accept pull requests and bug reports.
## License
MIT OR Apache-2.0

View file

@ -0,0 +1,877 @@
// Copyright © SixtyFPS GmbH <info@sixtyfps.io>
// SPDX-License-Identifier: MIT OR Apache-2.0
/*!
Document your crate's feature flags.
This crates provides a macro that extracts "documentation" comments from Cargo.toml
To use this crate, add `#![doc = document_features::document_features!()]` in your crate documentation.
The `document_features!()` macro reads your `Cargo.toml` file, extracts feature comments and generates
a markdown string for your documentation.
Basic example:
```rust
//! Normal crate documentation goes here.
//!
//! ## Feature flags
#![doc = document_features::document_features!()]
// rest of the crate goes here.
```
## Documentation format:
The documentation of your crate features goes into `Cargo.toml`, where they are defined.
The `document_features!()` macro analyzes the contents of `Cargo.toml`.
Similar to Rust's documentation comments `///` and `//!`, the macro understands
comments that start with `## ` and `#! `. Note the required trailing space.
Lines starting with `###` will not be understood as doc comment.
`## ` comments are meant to be *above* the feature they document.
There can be several `## ` comments, but they must always be followed by a
feature name or an optional dependency.
There should not be `#! ` comments between the comment and the feature they document.
`#! ` comments are not associated with a particular feature, and will be printed
in where they occur. Use them to group features, for example.
## Examples:
*/
// Note: because rustdoc escapes the first `#` of a line starting with `#`,
// these docs comments have one more `#` ,
#![doc = self_test!(/**
[package]
name = "..."
## ...
[features]
default = ["foo"]
##! This comments goes on top
### The foo feature enables the `foo` functions
foo = []
### The bar feature enables the bar module
bar = []
##! ### Experimental features
##! The following features are experimental
### Enable the fusion reactor
###
### Can lead to explosions
fusion = []
[dependencies]
document-features = "0.2"
##! ### Optional dependencies
### Enable this feature to implement the trait for the types from the genial crate
genial = { version = "0.2", optional = true }
### This awesome dependency is specified in its own table
[dependencies.awesome]
version = "1.3.5"
optional = true
*/
=>
/**
This comments goes on top
* **`foo`** *(enabled by default)* The foo feature enables the `foo` functions
* **`bar`** The bar feature enables the bar module
#### Experimental features
The following features are experimental
* **`fusion`** Enable the fusion reactor
Can lead to explosions
#### Optional dependencies
* **`genial`** Enable this feature to implement the trait for the types from the genial crate
* **`awesome`** This awesome dependency is specified in its own table
*/
)]
/*!
## Customization
You can customize the formatting of the features in the generated documentation by setting
the key **`feature_label=`** to a given format string. This format string must be either
a [string literal](https://doc.rust-lang.org/reference/tokens.html#string-literals) or
a [raw string literal](https://doc.rust-lang.org/reference/tokens.html#raw-string-literals).
Every occurrence of `{feature}` inside the format string will be substituted with the name of the feature.
For instance, to emulate the HTML formatting used by `rustdoc` one can use the following:
```rust
#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
```
The default formatting is equivalent to:
```rust
#![doc = document_features::document_features!(feature_label = "**`{feature}`**")]
```
## Compatibility
The minimum Rust version required to use this crate is Rust 1.54 because of the
feature to have macro in doc comments. You can make this crate optional and use
`#[cfg_attr()]` statements to enable it only when building the documentation:
You need to have two levels of `cfg_attr` because Rust < 1.54 doesn't parse the attribute
otherwise.
```rust,ignore
#![cfg_attr(
feature = "document-features",
cfg_attr(doc, doc = ::document_features::document_features!())
)]
```
In your Cargo.toml, enable this feature while generating the documentation on docs.rs:
```toml
[dependencies]
document-features = { version = "0.2", optional = true }
[package.metadata.docs.rs]
features = ["document-features"]
## Alternative: enable all features so they are all documented
## all-features = true
```
*/
#[cfg(not(feature = "default"))]
compile_error!(
"The feature `default` must be enabled to ensure \
forward compatibility with future version of this crate"
);
extern crate proc_macro;
use proc_macro::{TokenStream, TokenTree};
use std::borrow::Cow;
use std::collections::HashSet;
use std::convert::TryFrom;
use std::fmt::Write;
use std::path::Path;
use std::str::FromStr;
fn error(e: &str) -> TokenStream {
TokenStream::from_str(&format!("::core::compile_error!{{\"{}\"}}", e.escape_default())).unwrap()
}
fn compile_error(msg: &str, tt: Option<TokenTree>) -> TokenStream {
let span = tt.as_ref().map_or_else(proc_macro::Span::call_site, TokenTree::span);
use proc_macro::{Delimiter, Group, Ident, Literal, Punct, Spacing};
use std::iter::FromIterator;
TokenStream::from_iter(vec![
TokenTree::Ident(Ident::new("compile_error", span)),
TokenTree::Punct({
let mut punct = Punct::new('!', Spacing::Alone);
punct.set_span(span);
punct
}),
TokenTree::Group({
let mut group = Group::new(Delimiter::Brace, {
TokenStream::from_iter([TokenTree::Literal({
let mut string = Literal::string(msg);
string.set_span(span);
string
})])
});
group.set_span(span);
group
}),
])
}
#[derive(Default)]
struct Args {
feature_label: Option<String>,
}
fn parse_args(input: TokenStream) -> Result<Args, TokenStream> {
let mut token_trees = input.into_iter().fuse();
// parse the key, ensuring that it is the identifier `feature_label`
match token_trees.next() {
None => return Ok(Args::default()),
Some(TokenTree::Ident(ident)) if ident.to_string() == "feature_label" => (),
tt => return Err(compile_error("expected `feature_label`", tt)),
}
// parse a single equal sign `=`
match token_trees.next() {
Some(TokenTree::Punct(p)) if p.as_char() == '=' => (),
tt => return Err(compile_error("expected `=`", tt)),
}
// parse the value, ensuring that it is a string literal containing the substring `"{feature}"`
let feature_label;
if let Some(tt) = token_trees.next() {
match litrs::StringLit::<String>::try_from(&tt) {
Ok(string_lit) if string_lit.value().contains("{feature}") => {
feature_label = string_lit.value().to_string()
}
_ => {
return Err(compile_error(
"expected a string literal containing the substring \"{feature}\"",
Some(tt),
))
}
}
} else {
return Err(compile_error(
"expected a string literal containing the substring \"{feature}\"",
None,
));
}
// ensure there is nothing left after the format string
if let tt @ Some(_) = token_trees.next() {
return Err(compile_error("unexpected token after the format string", tt));
}
Ok(Args { feature_label: Some(feature_label) })
}
/// Produce a literal string containing documentation extracted from Cargo.toml
///
/// See the [crate] documentation for details
#[proc_macro]
pub fn document_features(tokens: TokenStream) -> TokenStream {
parse_args(tokens)
.and_then(|args| document_features_impl(&args))
.unwrap_or_else(std::convert::identity)
}
fn document_features_impl(args: &Args) -> Result<TokenStream, TokenStream> {
let path = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let mut cargo_toml = std::fs::read_to_string(Path::new(&path).join("Cargo.toml"))
.map_err(|e| error(&format!("Can't open Cargo.toml: {:?}", e)))?;
if !cargo_toml.contains("\n##") && !cargo_toml.contains("\n#!") {
// On crates.io, Cargo.toml is usually "normalized" and stripped of all comments.
// The original Cargo.toml has been renamed Cargo.toml.orig
if let Ok(orig) = std::fs::read_to_string(Path::new(&path).join("Cargo.toml.orig")) {
if orig.contains("##") || orig.contains("#!") {
cargo_toml = orig;
}
}
}
let result = process_toml(&cargo_toml, args).map_err(|e| error(&e))?;
Ok(std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&result))).collect())
}
fn process_toml(cargo_toml: &str, args: &Args) -> Result<String, String> {
// Get all lines between the "[features]" and the next block
let mut lines = cargo_toml
.lines()
.map(str::trim)
// and skip empty lines and comments that are not docs comments
.filter(|l| {
!l.is_empty() && (!l.starts_with('#') || l.starts_with("##") || l.starts_with("#!"))
});
let mut top_comment = String::new();
let mut current_comment = String::new();
let mut features = vec![];
let mut default_features = HashSet::new();
let mut current_table = "";
while let Some(line) = lines.next() {
if let Some(x) = line.strip_prefix("#!") {
if !x.is_empty() && !x.starts_with(' ') {
continue; // it's not a doc comment
}
if !current_comment.is_empty() {
return Err("Cannot mix ## and #! comments between features.".into());
}
if top_comment.is_empty() && !features.is_empty() {
top_comment = "\n".into();
}
writeln!(top_comment, "{}", x).unwrap();
} else if let Some(x) = line.strip_prefix("##") {
if !x.is_empty() && !x.starts_with(' ') {
continue; // it's not a doc comment
}
writeln!(current_comment, " {}", x).unwrap();
} else if let Some(table) = line.strip_prefix('[') {
current_table = table
.split_once(']')
.map(|(t, _)| t.trim())
.ok_or_else(|| format!("Parse error while parsing line: {}", line))?;
if !current_comment.is_empty() {
let dep = current_table
.rsplit_once('.')
.and_then(|(table, dep)| table.trim().ends_with("dependencies").then(|| dep))
.ok_or_else(|| format!("Not a feature: `{}`", line))?;
features.push((
dep.trim(),
std::mem::take(&mut top_comment),
std::mem::take(&mut current_comment),
));
}
} else if let Some((dep, rest)) = line.split_once('=') {
let dep = dep.trim().trim_matches('"');
let rest = get_balanced(rest, &mut lines)
.map_err(|e| format!("Parse error while parsing value {}: {}", dep, e))?;
if current_table == "features" && dep == "default" {
let defaults = rest
.trim()
.strip_prefix('[')
.and_then(|r| r.strip_suffix(']'))
.ok_or_else(|| format!("Parse error while parsing dependency {}", dep))?
.split(',')
.map(|d| d.trim().trim_matches(|c| c == '"' || c == '\'').trim().to_string())
.filter(|d| !d.is_empty());
default_features.extend(defaults);
}
if !current_comment.is_empty() {
if current_table.ends_with("dependencies") {
if !rest
.split_once("optional")
.and_then(|(_, r)| r.trim().strip_prefix('='))
.map_or(false, |r| r.trim().starts_with("true"))
{
return Err(format!("Dependency {} is not an optional dependency", dep));
}
} else if current_table != "features" {
return Err(format!(
r#"Comment cannot be associated with a feature: "{}""#,
current_comment.trim()
));
}
features.push((
dep,
std::mem::take(&mut top_comment),
std::mem::take(&mut current_comment),
));
}
}
}
if !current_comment.is_empty() {
return Err("Found comment not associated with a feature".into());
}
if features.is_empty() {
return Ok("*No documented features in Cargo.toml*".into());
}
let mut result = String::new();
for (f, top, comment) in features {
let default = if default_features.contains(f) { " *(enabled by default)*" } else { "" };
if !comment.trim().is_empty() {
if let Some(feature_label) = &args.feature_label {
writeln!(
result,
"{}* {}{} —{}",
top,
feature_label.replace("{feature}", f),
default,
comment.trim_end(),
)
.unwrap();
} else {
writeln!(result, "{}* **`{}`**{} —{}", top, f, default, comment.trim_end())
.unwrap();
}
} else if let Some(feature_label) = &args.feature_label {
writeln!(result, "{}* {}{}", top, feature_label.replace("{feature}", f), default,)
.unwrap();
} else {
writeln!(result, "{}* **`{}`**{}", top, f, default).unwrap();
}
}
result += &top_comment;
Ok(result)
}
fn get_balanced<'a>(
first_line: &'a str,
lines: &mut impl Iterator<Item = &'a str>,
) -> Result<Cow<'a, str>, String> {
let mut line = first_line;
let mut result = Cow::from("");
let mut in_quote = false;
let mut level = 0;
loop {
let mut last_slash = false;
for (idx, b) in line.as_bytes().iter().enumerate() {
if last_slash {
last_slash = false
} else if in_quote {
match b {
b'\\' => last_slash = true,
b'"' | b'\'' => in_quote = false,
_ => (),
}
} else {
match b {
b'\\' => last_slash = true,
b'"' => in_quote = true,
b'{' | b'[' => level += 1,
b'}' | b']' if level == 0 => return Err("unbalanced source".into()),
b'}' | b']' => level -= 1,
b'#' => {
line = &line[..idx];
break;
}
_ => (),
}
}
}
if result.len() == 0 {
result = Cow::from(line);
} else {
*result.to_mut() += line;
}
if level == 0 {
return Ok(result);
}
line = if let Some(l) = lines.next() {
l
} else {
return Err("unbalanced source".into());
};
}
}
#[test]
fn test_get_balanced() {
assert_eq!(
get_balanced(
"{",
&mut IntoIterator::into_iter(["a", "{ abc[], #ignore", " def }", "}", "xxx"])
),
Ok("{a{ abc[], def }}".into())
);
assert_eq!(
get_balanced("{ foo = \"{#\" } #ignore", &mut IntoIterator::into_iter(["xxx"])),
Ok("{ foo = \"{#\" } ".into())
);
assert_eq!(
get_balanced("]", &mut IntoIterator::into_iter(["["])),
Err("unbalanced source".into())
);
}
#[cfg(feature = "self-test")]
#[proc_macro]
#[doc(hidden)]
/// Helper macro for the tests. Do not use
pub fn self_test_helper(input: TokenStream) -> TokenStream {
process_toml((&input).to_string().trim_matches(|c| c == '"' || c == '#'), &Args::default())
.map_or_else(
|e| error(&e),
|r| {
std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&r)))
.collect()
},
)
}
#[cfg(feature = "self-test")]
macro_rules! self_test {
(#[doc = $toml:literal] => #[doc = $md:literal]) => {
concat!(
"\n`````rust\n\
fn normalize_md(md : &str) -> String {
md.lines().skip_while(|l| l.is_empty()).map(|l| l.trim())
.collect::<Vec<_>>().join(\"\\n\")
}
assert_eq!(normalize_md(document_features::self_test_helper!(",
stringify!($toml),
")), normalize_md(",
stringify!($md),
"));\n`````\n\n"
)
};
}
#[cfg(not(feature = "self-test"))]
macro_rules! self_test {
(#[doc = $toml:literal] => #[doc = $md:literal]) => {
concat!(
"This contents in Cargo.toml:\n`````toml",
$toml,
"\n`````\n Generates the following:\n\
<table><tr><th>Preview</th></tr><tr><td>\n\n",
$md,
"\n</td></tr></table>\n\n&nbsp;\n",
)
};
}
// The following struct is inserted only during generation of the documentation in order to exploit doc-tests.
// These doc-tests are used to check that invalid arguments to the `document_features!` macro cause a compile time error.
// For a more principled way of testing compilation error, maybe investigate <https://docs.rs/trybuild>.
//
/// ```rust
/// #![doc = document_features::document_features!()]
/// #![doc = document_features::document_features!(feature_label = "**`{feature}`**")]
/// #![doc = document_features::document_features!(feature_label = r"**`{feature}`**")]
/// #![doc = document_features::document_features!(feature_label = r#"**`{feature}`**"#)]
/// #![doc = document_features::document_features!(feature_label = "<span class=\"stab portability\"><code>{feature}</code></span>")]
/// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(feature_label > "<span>{feature}</span>")]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(label = "<span>{feature}</span>")]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(feature_label = "{feat}")]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(feature_label = 3.14)]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(feature_label = )]
/// ```
/// ```compile_fail
/// #![doc = document_features::document_features!(feature_label = "**`{feature}`**" extra)]
/// ```
#[cfg(doc)]
struct FeatureLabelCompilationTest;
#[cfg(test)]
mod tests {
use super::{process_toml, Args};
#[track_caller]
fn test_error(toml: &str, expected: &str) {
let err = process_toml(toml, &Args::default()).unwrap_err();
assert!(err.contains(expected), "{:?} does not contain {:?}", err, expected)
}
#[test]
fn only_get_balanced_in_correct_table() {
process_toml(
r#"
[package.metadata.release]
pre-release-replacements = [
{test=\"\#\# \"},
]
[abcd]
[features]#xyz
#! abc
#
###
#! def
#!
## 123
## 456
feat1 = ["plop"]
#! ghi
no_doc = []
##
feat2 = ["momo"]
#! klm
default = ["feat1", "something_else"]
#! end
"#,
&Args::default(),
)
.unwrap();
}
#[test]
fn no_features() {
let r = process_toml(
r#"
[features]
[dependencies]
foo = 4;
"#,
&Args::default(),
)
.unwrap();
assert_eq!(r, "*No documented features in Cargo.toml*");
}
#[test]
fn no_features2() {
let r = process_toml(
r#"
[packages]
[dependencies]
"#,
&Args::default(),
)
.unwrap();
assert_eq!(r, "*No documented features in Cargo.toml*");
}
#[test]
fn parse_error3() {
test_error(
r#"
[features]
ff = []
[abcd
efgh
[dependencies]
"#,
"Parse error while parsing line: [abcd",
);
}
#[test]
fn parse_error4() {
test_error(
r#"
[features]
## dd
## ff
#! ee
## ff
"#,
"Cannot mix",
);
}
#[test]
fn parse_error5() {
test_error(
r#"
[features]
## dd
"#,
"not associated with a feature",
);
}
#[test]
fn parse_error6() {
test_error(
r#"
[features]
# ff
foo = []
default = [
#ffff
# ff
"#,
"Parse error while parsing value default",
);
}
#[test]
fn parse_error7() {
test_error(
r#"
[features]
# f
foo = [ x = { ]
bar = []
"#,
"Parse error while parsing value foo",
);
}
#[test]
fn not_a_feature1() {
test_error(
r#"
## hallo
[features]
"#,
"Not a feature: `[features]`",
);
}
#[test]
fn not_a_feature2() {
test_error(
r#"
[package]
## hallo
foo = []
"#,
"Comment cannot be associated with a feature: \"hallo\"",
);
}
#[test]
fn non_optional_dep1() {
test_error(
r#"
[dev-dependencies]
## Not optional
foo = { version = "1.2", optional = false }
"#,
"Dependency foo is not an optional dependency",
);
}
#[test]
fn non_optional_dep2() {
test_error(
r#"
[dev-dependencies]
## Not optional
foo = { version = "1.2" }
"#,
"Dependency foo is not an optional dependency",
);
}
#[test]
fn basic() {
let toml = r#"
[abcd]
[features]#xyz
#! abc
#
###
#! def
#!
## 123
## 456
feat1 = ["plop"]
#! ghi
no_doc = []
##
feat2 = ["momo"]
#! klm
default = ["feat1", "something_else"]
#! end
"#;
let parsed = process_toml(toml, &Args::default()).unwrap();
assert_eq!(
parsed,
" abc\n def\n\n* **`feat1`** *(enabled by default)* — 123\n 456\n\n ghi\n* **`feat2`**\n\n klm\n end\n"
);
let parsed = process_toml(
toml,
&Args {
feature_label: Some(
"<span class=\"stab portability\"><code>{feature}</code></span>".into(),
),
},
)
.unwrap();
assert_eq!(
parsed,
" abc\n def\n\n* <span class=\"stab portability\"><code>feat1</code></span> *(enabled by default)* — 123\n 456\n\n ghi\n* <span class=\"stab portability\"><code>feat2</code></span>\n\n klm\n end\n"
);
}
#[test]
fn dependencies() {
let toml = r#"
#! top
[dev-dependencies] #yo
## dep1
dep1 = { version="1.2", optional=true}
#! yo
dep2 = "1.3"
## dep3
[target.'cfg(unix)'.build-dependencies.dep3]
version = "42"
optional = true
"#;
let parsed = process_toml(toml, &Args::default()).unwrap();
assert_eq!(parsed, " top\n* **`dep1`** — dep1\n\n yo\n* **`dep3`** — dep3\n");
let parsed = process_toml(
toml,
&Args {
feature_label: Some(
"<span class=\"stab portability\"><code>{feature}</code></span>".into(),
),
},
)
.unwrap();
assert_eq!(parsed, " top\n* <span class=\"stab portability\"><code>dep1</code></span> — dep1\n\n yo\n* <span class=\"stab portability\"><code>dep3</code></span> — dep3\n");
}
#[test]
fn multi_lines() {
let toml = r#"
[package.metadata.foo]
ixyz = [
["array"],
[
"of",
"arrays"
]
]
[dev-dependencies]
## dep1
dep1 = {
version="1.2-}",
optional=true
}
[features]
default = [
"goo",
"\"]",
"bar",
]
## foo
foo = [
"bar"
]
## bar
bar = [
]
"#;
let parsed = process_toml(toml, &Args::default()).unwrap();
assert_eq!(
parsed,
"* **`dep1`** — dep1\n* **`foo`** — foo\n* **`bar`** *(enabled by default)* — bar\n"
);
let parsed = process_toml(
toml,
&Args {
feature_label: Some(
"<span class=\"stab portability\"><code>{feature}</code></span>".into(),
),
},
)
.unwrap();
assert_eq!(
parsed,
"* <span class=\"stab portability\"><code>dep1</code></span> — dep1\n* <span class=\"stab portability\"><code>foo</code></span> — foo\n* <span class=\"stab portability\"><code>bar</code></span> *(enabled by default)* — bar\n"
);
}
#[test]
fn dots_in_feature() {
let toml = r#"
[features]
## This is a test
"teßt." = []
default = ["teßt."]
[dependencies]
## A dep
"dep" = { version = "123", optional = true }
"#;
let parsed = process_toml(toml, &Args::default()).unwrap();
assert_eq!(
parsed,
"* **`teßt.`** *(enabled by default)* — This is a test\n* **`dep`** — A dep\n"
);
let parsed = process_toml(
toml,
&Args {
feature_label: Some(
"<span class=\"stab portability\"><code>{feature}</code></span>".into(),
),
},
)
.unwrap();
assert_eq!(
parsed,
"* <span class=\"stab portability\"><code>teßt.</code></span> *(enabled by default)* — This is a test\n* <span class=\"stab portability\"><code>dep</code></span> — A dep\n"
);
}
}

View file

@ -0,0 +1 @@
use_small_heuristics = "Max"

View file

@ -0,0 +1,37 @@
#[test]
fn ensure_it_compiles() {
document_features::document_features!();
document_features::document_features!(feature_label = "**`{feature}`**");
document_features::document_features!(feature_label = r"**`{feature}`**");
document_features::document_features!(feature_label = r#"**`{feature}`**"#);
document_features::document_features!(
feature_label = "<span class=\"stab portability\"><code>{feature}</code></span>"
);
document_features::document_features!(
feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#
);
document_features::document_features!(
feature_label = r##"<span class="stab portability"><code>{feature}</code></span>"##
);
}
#[test]
fn self_doc() {
let actual = document_features::document_features!();
let expected = "* **`self-test`** — Internal feature used only for the tests, don't enable\n";
assert_eq!(actual, expected);
}
#[test]
fn self_doc_with_custom_label() {
let actual = document_features::document_features!(
feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#
);
let expected =
"* <span class=\"stab portability\"><code>self-test</code></span> — Internal feature used only for the tests, don't enable\n";
assert_eq!(actual, expected);
let actual2 = document_features::document_features!(
feature_label = "<span class=\"stab\u{0020}portability\"><code>{feature}</code></span>"
);
assert_eq!(actual2, expected);
}

View file

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"03cea7c394dd09087f6b2c7ba4b4641b5c2c50b32b7286cabd5be4850f62f170","Cargo.toml":"6ef884164a0139f0591a381ada2c99d850d38e5f3af3451efa12f808f8a799e0","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"7dc1552e88f49132cb358b1b962fc5e79fa42d70bcbb88c526d33e45b8e98036","README.md":"533d31adf3b4258b838cd6a1cdb58139e2cf761c3c38aa4654f66f34335c9073","src/bool/mod.rs":"53c6eedfd94552689e51233fffb8a99ce9321a32db0f08de8b18d48cda9b1877","src/bool/tests.rs":"a0e6d034036aa04aac6b847bb561bdba759d85c78d4cbb7fb93f4422efb83656","src/byte/mod.rs":"ff2a3e6108a9b32ae0d925ec34735d20194d5c6b27af060516a46d21397c75be","src/byte/tests.rs":"ac36dace42cd151ac9d26cc35701bc8b65f8f1ed6ee1cfef4eeb6caa9dd702bc","src/bytestr/mod.rs":"8fd951374f7edc2077465cd4f97001eece46358f2bb0c45fddb2942aac6ee13b","src/bytestr/tests.rs":"194b28f157196260b1c2a612dfb36fb1dace491db2ed2bbb39227771ed6baf60","src/char/mod.rs":"2bb6f25da83670f18ec40f8a38565aa2294a4cdf81c8bbaf081531a32b6c6d0c","src/char/tests.rs":"9de497c8c7d7a139ff81f3d7bf8b5c682316d983bebb58c58d2af97f4cd26c35","src/err.rs":"54d000c4f37258c6886dd5b7069e2f5282e51aec3731feb77935582ae8c18908","src/escape.rs":"a944e95344df54c16bf4cc6a8fb01a81e2eac2aacd4758b938d3339212fce60c","src/float/mod.rs":"defaf83526acdc8f9b34c7d1ac17d866a93409dc392eb608160778d6bb4a1e25","src/float/tests.rs":"5875403f1a72104973ed83d0cf29d766e7b2fa5c23615c85a5f2eeed02b115c9","src/impls.rs":"c5dd37dd3ecd29c40a0ed243b907765a27729a1b1f73fa2c6762105feb6527bc","src/integer/mod.rs":"2b9109ddd34faf76fc9ce9dfb04bcc6aed4834231c74bd8a774bd256cc57c18a","src/integer/tests.rs":"01147ce9b6742bb1614cf863090699c54bf660b9f2c6a5eb529d67ae92230c0d","src/lib.rs":"2e79c8035d0fb77db9414b5569eeef13b6db8cde48ef2a45ffcf5f2492d02a4a","src/parse.rs":"e1fa4a76331d52f711e1b06cdba853a4f815281366f4f4f68b4c0a109f8a1734","src/string/mod.rs":"52a9cda38f7cd5b025bc5ec7edb8106487ba3d141789f5bc239c4561490cdc29","src/string/tests.rs":"1e0150ddd921a74ed5ebf6216708132d7768f3beb11a8c7bbfcf4ba01db40a5b","src/test_util.rs":"3badda83d7f256bb25b840820bc0d3a6523b4ded913555cbea5533b6ccad5654","src/tests.rs":"9f0dc2fe7a0eefb6575acd824767bb7d837a584dc7999ef59a457255a2cd7f3d"},"package":"b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5"}

103
third_party/rust/litrs/CHANGELOG.md vendored Normal file
View file

@ -0,0 +1,103 @@
# Changelog
All notable changes to this project will be documented in this file.
## [Unreleased]
## [0.4.1] - 2023-10-18
- Fixed incorrectly labeling `27f32` a float literals in docs.
- Added hint to integer literal docs about parsing as `u128`.
## [0.4.0] - 2023-03-05
### Added
- Add ability to parse literals with arbitrary suffixes (e.g. `"foo"bla` or `23px`)
- Add `suffix()` method to all literal types except `BoolLit`
- Add `IntegerBase::value`
- Add `from_suffix` and `suffix` methods to `FloatType` and `IntegerType`
- Add `FromStr` and `Display` impls to `FloatType` and `IntegerType`
### Changed
- **Breaking**: Mark `FloatType` and `IntegerType` as `#[non_exhaustive]`
- **Breaking**: Fix integer parsing for cases like `27f32`. `Literal::parse`
and `IntegerLit::parse` will both identify this as an integer literal.
- **Breaking**: Fix float parsing by correctly rejecting inputs like `27f32`. A
float literal must have a period OR an exponent part, according to the spec.
Previously decimal integers were accepted in `FloatLit::parse`.
- Improved some parts of the docs
### Removed
- **Breaking**: Remove `OwnedLiteral` and `SharedLiteral`
## [0.3.0] - 2022-12-19
### Breaking
- Bump MSRV (minimal supported Rust version) to 1.54
### Added
- Add `raw_input` and `into_raw_input` to non-bool `*Lit` types
- Add `impl From<*Lit> for pm::Literal` (for non-bool literals)
- Add `impl From<BoolLit> for pm::Ident`
### Fixed
- Fix link to reference and clarify bool literals ([#7](https://github.com/LukasKalbertodt/litrs/pull/7))
### Internals
- Move lots of parsing code into non-generic functions (this hopefully reduces compile times)
- To implement `[into_]raw_input` for integer and float literals, their
internals were changed a bit so that they store the full input string now.
## [0.2.3] - 2021-06-09
### Changed
- Minor internal code change to bring MSRV from 1.52 to 1.42
## [0.2.2] - 2021-06-09
### Changed
- Fixed (byte) string literal parsing by:
- Correctly handling "string continue" sequences
- Correctly converting `\n\r` into `\n`
## [0.2.1] - 2021-06-04
### Changed
- Fixed the `expected` value of the error returned from `TryFrom<TokenTree>` impls in some cases
## [0.2.0] - 2021-05-28
### Changed
- **Breaking**: rename `Error` to `ParseError`. That describes its purpose more
closely and is particular useful now that other error types exist in the library.
### Removed
- **Breaking**: remove `proc-macro` feature and instead offer the corresponding
`impl`s unconditionally. Since the feature didn't enable/disable a
dependency (`proc-macro` is a compiler provided crate) and since apparently
it works fine in `no_std` environments, I dropped this feature. I don't
currently see a reason why the corresponding impls should be conditional.
### Added
- `TryFrom<TokenTree> for litrs::Literal` impls
- `From<*Lit> for litrs::Literal` impls
- `TryFrom<proc_macro[2]::Literal> for *Lit`
- `TryFrom<TokenTree> for *Lit`
- `InvalidToken` error type for all new `TryFrom` impls
## [0.1.1] - 2021-05-25
### Added
- `From` impls to create a `Literal` from references to proc-macro literal types:
- `From<&proc_macro::Literal>`
- `From<&proc_macro2::Literal>`
- Better examples in README and repository
## 0.1.0 - 2021-05-24
### Added
- Everything
[Unreleased]: https://github.com/LukasKalbertodt/litrs/compare/v0.4.1...HEAD
[0.4.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.3...v0.3.0
[0.2.3]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.2...v0.2.3
[0.2.2]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.1...v0.2.2
[0.2.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.1.1...v0.2.0
[0.1.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.1.0...v0.1.1

51
third_party/rust/litrs/Cargo.toml vendored Normal file
View file

@ -0,0 +1,51 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.54"
name = "litrs"
version = "0.4.1"
authors = ["Lukas Kalbertodt <lukas.kalbertodt@gmail.com>"]
exclude = [".github"]
description = """
Parse and inspect Rust literals (i.e. tokens in the Rust programming language
representing fixed values). Particularly useful for proc macros, but can also
be used outside of a proc-macro context.
"""
documentation = "https://docs.rs/litrs/"
readme = "README.md"
keywords = [
"literal",
"parsing",
"proc-macro",
"type",
"procedural",
]
categories = [
"development-tools::procedural-macro-helpers",
"parser-implementations",
"development-tools::build-utils",
]
license = "MIT/Apache-2.0"
repository = "https://github.com/LukasKalbertodt/litrs/"
[dependencies.proc-macro2]
version = "1"
optional = true
[dependencies.unicode-xid]
version = "0.2.4"
optional = true
[features]
check_suffix = ["unicode-xid"]
default = ["proc-macro2"]

176
third_party/rust/litrs/LICENSE-APACHE vendored Normal file
View file

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

25
third_party/rust/litrs/LICENSE-MIT vendored Normal file
View file

@ -0,0 +1,25 @@
Copyright (c) 2020 Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

88
third_party/rust/litrs/README.md vendored Normal file
View file

@ -0,0 +1,88 @@
# `litrs`: parsing and inspecting Rust literals
[<img alt="CI status of main" src="https://img.shields.io/github/actions/workflow/status/LukasKalbertodt/litrs/ci.yml?branch=main&label=CI&logo=github&logoColor=white&style=for-the-badge" height="23">](https://github.com/LukasKalbertodt/litrs/actions/workflows/ci.yml)
[<img alt="Crates.io Version" src="https://img.shields.io/crates/v/litrs?logo=rust&style=for-the-badge" height="23">](https://crates.io/crates/litrs)
[<img alt="docs.rs" src="https://img.shields.io/crates/v/litrs?color=blue&label=docs&style=for-the-badge" height="23">](https://docs.rs/litrs)
`litrs` offers functionality to parse Rust literals, i.e. tokens in the Rust programming language that represent fixed values.
For example: `27`, `"crab"`, `bool`.
This is particularly useful for proc macros, but can also be used outside of a proc-macro context.
**Why this library?**
Unfortunately, the `proc_macro` API shipped with the compiler offers no easy way to inspect literals.
There are mainly two libraries for this purpose:
[`syn`](https://github.com/dtolnay/syn) and [`literalext`](https://github.com/mystor/literalext).
The latter is deprecated.
And `syn` is oftentimes overkill for the task at hand, especially when developing function-like proc-macros (e.g. `foo!(..)`).
This crate is a lightweight alternative.
Also, when it comes to literals, `litrs` offers a bit more flexibility and a few more features compared to `syn`.
I'm interested in community feedback!
If you consider using this, please speak your mind [in this issue](https://github.com/LukasKalbertodt/litrs/issues/1).
## Example
### In proc macro
```rust
use std::convert::TryFrom;
use proc_macro::TokenStream;
use litrs::Literal;
#[proc_macro]
pub fn foo(input: TokenStream) -> TokenStream {
// Please do proper error handling in your real code!
let first_token = input.into_iter().next().expect("no input");
// `try_from` will return an error if the token is not a literal.
match Literal::try_from(first_token) {
// Convenient methods to produce decent errors via `compile_error!`.
Err(e) => return e.to_compile_error(),
// You can now inspect your literal!
Ok(Literal::Integer(i)) => {
println!("Got an integer specified in base {:?}", i.base());
let value = i.value::<u64>().expect("integer literal too large");
println!("Is your integer even? {}", value % 2 == 0);
}
Ok(other) => {
println!("Got a non-integer literal");
}
}
TokenStream::new() // dummy output
}
```
If you are expecting a specific kind of literal, you can also use this, which will return an error if the token is not a float literal.
```rust
FloatLit::try_from(first_token)
```
### Parsing from a `&str`
Outside of a proc macro context you might want to parse a string directly.
```rust
use litrs::{FloatLit, Literal};
let lit = Literal::parse("'🦀'").expect("failed to parse literal");
let float_lit = FloatLit::parse("2.7e3").expect("failed to parse as float literal");
```
See [**the documentation**](https://docs.rs/litrs) or the `examples/` directory for more examples and information.
<br />
---
## License
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this project by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.

55
third_party/rust/litrs/src/bool/mod.rs vendored Normal file
View file

@ -0,0 +1,55 @@
use std::fmt;
use crate::{ParseError, err::{perr, ParseErrorKind::*}};
/// A bool literal: `true` or `false`. Also see [the reference][ref].
///
/// Notice that, strictly speaking, from Rust point of view "boolean literals" are not
/// actual literals but [keywords].
///
/// [ref]: https://doc.rust-lang.org/reference/expressions/literal-expr.html#boolean-literal-expressions
/// [keywords]: https://doc.rust-lang.org/reference/keywords.html#strict-keywords
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BoolLit {
False,
True,
}
impl BoolLit {
/// Parses the input as a bool literal. Returns an error if the input is
/// invalid or represents a different kind of literal.
pub fn parse(s: &str) -> Result<Self, ParseError> {
match s {
"false" => Ok(Self::False),
"true" => Ok(Self::True),
_ => Err(perr(None, InvalidLiteral)),
}
}
/// Returns the actual Boolean value of this literal.
pub fn value(self) -> bool {
match self {
Self::False => false,
Self::True => true,
}
}
/// Returns the literal as string.
pub fn as_str(&self) -> &'static str {
match self {
Self::False => "false",
Self::True => "true",
}
}
}
impl fmt::Display for BoolLit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(self.as_str())
}
}
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,48 @@
use crate::{
Literal, BoolLit,
test_util::assert_parse_ok_eq,
};
macro_rules! assert_bool_parse {
($input:literal, $expected:expr) => {
assert_parse_ok_eq(
$input, Literal::parse($input), Literal::Bool($expected), "Literal::parse");
assert_parse_ok_eq($input, BoolLit::parse($input), $expected, "BoolLit::parse");
};
}
#[test]
fn parse_ok() {
assert_bool_parse!("false", BoolLit::False);
assert_bool_parse!("true", BoolLit::True);
}
#[test]
fn parse_err() {
assert!(Literal::parse("fa").is_err());
assert!(Literal::parse("fal").is_err());
assert!(Literal::parse("fals").is_err());
assert!(Literal::parse(" false").is_err());
assert!(Literal::parse("false ").is_err());
assert!(Literal::parse("False").is_err());
assert!(Literal::parse("tr").is_err());
assert!(Literal::parse("tru").is_err());
assert!(Literal::parse(" true").is_err());
assert!(Literal::parse("true ").is_err());
assert!(Literal::parse("True").is_err());
}
#[test]
fn value() {
assert!(!BoolLit::False.value());
assert!(BoolLit::True.value());
}
#[test]
fn as_str() {
assert_eq!(BoolLit::False.as_str(), "false");
assert_eq!(BoolLit::True.as_str(), "true");
}

107
third_party/rust/litrs/src/byte/mod.rs vendored Normal file
View file

@ -0,0 +1,107 @@
use core::fmt;
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
escape::unescape,
parse::check_suffix,
};
/// A (single) byte literal, e.g. `b'k'` or `b'!'`.
///
/// See [the reference][ref] for more information.
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#byte-literals
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ByteLit<B: Buffer> {
raw: B,
/// Start index of the suffix or `raw.len()` if there is no suffix.
start_suffix: usize,
value: u8,
}
impl<B: Buffer> ByteLit<B> {
/// Parses the input as a byte literal. Returns an error if the input is
/// invalid or represents a different kind of literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
if input.is_empty() {
return Err(perr(None, Empty));
}
if !input.starts_with("b'") {
return Err(perr(None, InvalidByteLiteralStart));
}
let (value, start_suffix) = parse_impl(&input)?;
Ok(Self { raw: input, value, start_suffix })
}
/// Returns the byte value that this literal represents.
pub fn value(&self) -> u8 {
self.value
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.start_suffix..]
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
}
impl ByteLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn to_owned(&self) -> ByteLit<String> {
ByteLit {
raw: self.raw.to_owned(),
start_suffix: self.start_suffix,
value: self.value,
}
}
}
impl<B: Buffer> fmt::Display for ByteLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(&self.raw)
}
}
/// Precondition: must start with `b'`.
#[inline(never)]
pub(crate) fn parse_impl(input: &str) -> Result<(u8, usize), ParseError> {
let input_bytes = input.as_bytes();
let first = input_bytes.get(2).ok_or(perr(None, UnterminatedByteLiteral))?;
let (c, len) = match first {
b'\'' if input_bytes.get(3) == Some(&b'\'') => return Err(perr(2, UnescapedSingleQuote)),
b'\'' => return Err(perr(None, EmptyByteLiteral)),
b'\n' | b'\t' | b'\r' => return Err(perr(2, UnescapedSpecialWhitespace)),
b'\\' => unescape::<u8>(&input[2..], 2)?,
other if other.is_ascii() => (*other, 1),
_ => return Err(perr(2, NonAsciiInByteLiteral)),
};
match input[2 + len..].find('\'') {
Some(0) => {}
Some(_) => return Err(perr(None, OverlongByteLiteral)),
None => return Err(perr(None, UnterminatedByteLiteral)),
}
let start_suffix = 2 + len + 1;
let suffix = &input[start_suffix..];
check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
Ok((c, start_suffix))
}
#[cfg(test)]
mod tests;

188
third_party/rust/litrs/src/byte/tests.rs vendored Normal file
View file

@ -0,0 +1,188 @@
use crate::{ByteLit, Literal, test_util::{assert_parse_ok_eq, assert_roundtrip}};
// ===== Utility functions =======================================================================
macro_rules! check {
($lit:literal) => { check!($lit, stringify!($lit), "") };
($lit:literal, $input:expr, $suffix:literal) => {
let input = $input;
let expected = ByteLit {
raw: input,
start_suffix: input.len() - $suffix.len(),
value: $lit,
};
assert_parse_ok_eq(input, ByteLit::parse(input), expected.clone(), "ByteLit::parse");
assert_parse_ok_eq(input, Literal::parse(input), Literal::Byte(expected), "Literal::parse");
let lit = ByteLit::parse(input).unwrap();
assert_eq!(lit.value(), $lit);
assert_eq!(lit.suffix(), $suffix);
assert_roundtrip(expected.to_owned(), input);
};
}
// ===== Actual tests ============================================================================
#[test]
fn alphanumeric() {
check!(b'a');
check!(b'b');
check!(b'y');
check!(b'z');
check!(b'A');
check!(b'B');
check!(b'Y');
check!(b'Z');
check!(b'0');
check!(b'1');
check!(b'8');
check!(b'9');
}
#[test]
fn special_chars() {
check!(b' ');
check!(b'!');
check!(b'"');
check!(b'#');
check!(b'$');
check!(b'%');
check!(b'&');
check!(b'(');
check!(b')');
check!(b'*');
check!(b'+');
check!(b',');
check!(b'-');
check!(b'.');
check!(b'/');
check!(b':');
check!(b';');
check!(b'<');
check!(b'=');
check!(b'>');
check!(b'?');
check!(b'@');
check!(b'[');
check!(b']');
check!(b'^');
check!(b'_');
check!(b'`');
check!(b'{');
check!(b'|');
check!(b'}');
check!(b'~');
}
#[test]
fn quote_escapes() {
check!(b'\'');
check!(b'\"');
}
#[test]
fn ascii_escapes() {
check!(b'\n');
check!(b'\r');
check!(b'\t');
check!(b'\\');
check!(b'\0');
check!(b'\x00');
check!(b'\x01');
check!(b'\x0c');
check!(b'\x0D');
check!(b'\x13');
check!(b'\x30');
check!(b'\x30');
check!(b'\x4B');
check!(b'\x6b');
check!(b'\x7F');
check!(b'\x7f');
}
#[test]
fn byte_escapes() {
check!(b'\x80');
check!(b'\x8a');
check!(b'\x8C');
check!(b'\x99');
check!(b'\xa0');
check!(b'\xAd');
check!(b'\xfe');
check!(b'\xFe');
check!(b'\xfF');
check!(b'\xFF');
}
#[test]
fn suffixes() {
check!(b'a', r##"b'a'peter"##, "peter");
check!(b'#', r##"b'#'peter"##, "peter");
check!(b'\n', r##"b'\n'peter"##, "peter");
check!(b'\'', r##"b'\''peter"##, "peter");
check!(b'\"', r##"b'\"'peter"##, "peter");
check!(b'\xFF', r##"b'\xFF'peter"##, "peter");
}
#[test]
fn invald_escapes() {
assert_err!(ByteLit, r"b'\a'", UnknownEscape, 2..4);
assert_err!(ByteLit, r"b'\y'", UnknownEscape, 2..4);
assert_err!(ByteLit, r"b'\", UnterminatedEscape, 2..3);
assert_err!(ByteLit, r"b'\x'", UnterminatedEscape, 2..5);
assert_err!(ByteLit, r"b'\x1'", InvalidXEscape, 2..6);
assert_err!(ByteLit, r"b'\xaj'", InvalidXEscape, 2..6);
assert_err!(ByteLit, r"b'\xjb'", InvalidXEscape, 2..6);
}
#[test]
fn unicode_escape_not_allowed() {
assert_err!(ByteLit, r"b'\u{0}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{00}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{b}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{B}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{7e}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{E4}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{e4}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{fc}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{Fc}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{fC}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{FC}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{b10}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{B10}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{0b10}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{2764}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{1f602}'", UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteLit, r"b'\u{1F602}'", UnicodeEscapeInByteLiteral, 2..4);
}
#[test]
fn parse_err() {
assert_err!(ByteLit, r"b''", EmptyByteLiteral, None);
assert_err!(ByteLit, r"b' ''", UnexpectedChar, 4..5);
assert_err!(ByteLit, r"b'", UnterminatedByteLiteral, None);
assert_err!(ByteLit, r"b'a", UnterminatedByteLiteral, None);
assert_err!(ByteLit, r"b'\n", UnterminatedByteLiteral, None);
assert_err!(ByteLit, r"b'\x35", UnterminatedByteLiteral, None);
assert_err!(ByteLit, r"b'ab'", OverlongByteLiteral, None);
assert_err!(ByteLit, r"b'a _'", OverlongByteLiteral, None);
assert_err!(ByteLit, r"b'\n3'", OverlongByteLiteral, None);
assert_err!(ByteLit, r"", Empty, None);
assert_err!(ByteLit, r"b'''", UnescapedSingleQuote, 2);
assert_err!(ByteLit, r"b''''", UnescapedSingleQuote, 2);
assert_err!(ByteLit, "b'\n'", UnescapedSpecialWhitespace, 2);
assert_err!(ByteLit, "b'\t'", UnescapedSpecialWhitespace, 2);
assert_err!(ByteLit, "b'\r'", UnescapedSpecialWhitespace, 2);
assert_err!(ByteLit, "b'న'", NonAsciiInByteLiteral, 2);
assert_err!(ByteLit, "b'犬'", NonAsciiInByteLiteral, 2);
assert_err!(ByteLit, "b'🦊'", NonAsciiInByteLiteral, 2);
}

View file

@ -0,0 +1,126 @@
use std::{fmt, ops::Range};
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
escape::{scan_raw_string, unescape_string},
};
/// A byte string or raw byte string literal, e.g. `b"hello"` or `br#"abc"def"#`.
///
/// See [the reference][ref] for more information.
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#byte-string-literals
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ByteStringLit<B: Buffer> {
/// The raw input.
raw: B,
/// The string value (with all escaped unescaped), or `None` if there were
/// no escapes. In the latter case, `input` is the string value.
value: Option<Vec<u8>>,
/// The number of hash signs in case of a raw string literal, or `None` if
/// it's not a raw string literal.
num_hashes: Option<u32>,
/// Start index of the suffix or `raw.len()` if there is no suffix.
start_suffix: usize,
}
impl<B: Buffer> ByteStringLit<B> {
/// Parses the input as a (raw) byte string literal. Returns an error if the
/// input is invalid or represents a different kind of literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
if input.is_empty() {
return Err(perr(None, Empty));
}
if !input.starts_with(r#"b""#) && !input.starts_with("br") {
return Err(perr(None, InvalidByteStringLiteralStart));
}
let (value, num_hashes, start_suffix) = parse_impl(&input)?;
Ok(Self { raw: input, value, num_hashes, start_suffix })
}
/// Returns the string value this literal represents (where all escapes have
/// been turned into their respective values).
pub fn value(&self) -> &[u8] {
self.value.as_deref().unwrap_or(&self.raw.as_bytes()[self.inner_range()])
}
/// Like `value` but returns a potentially owned version of the value.
///
/// The return value is either `Cow<'static, [u8]>` if `B = String`, or
/// `Cow<'a, [u8]>` if `B = &'a str`.
pub fn into_value(self) -> B::ByteCow {
let inner_range = self.inner_range();
let Self { raw, value, .. } = self;
value.map(B::ByteCow::from).unwrap_or_else(|| raw.cut(inner_range).into_byte_cow())
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.start_suffix..]
}
/// Returns whether this literal is a raw string literal (starting with
/// `r`).
pub fn is_raw_byte_string(&self) -> bool {
self.num_hashes.is_some()
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
/// The range within `self.raw` that excludes the quotes and potential `r#`.
fn inner_range(&self) -> Range<usize> {
match self.num_hashes {
None => 2..self.start_suffix - 1,
Some(n) => 2 + n as usize + 1..self.start_suffix - n as usize - 1,
}
}
}
impl ByteStringLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn into_owned(self) -> ByteStringLit<String> {
ByteStringLit {
raw: self.raw.to_owned(),
value: self.value,
num_hashes: self.num_hashes,
start_suffix: self.start_suffix,
}
}
}
impl<B: Buffer> fmt::Display for ByteStringLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(&self.raw)
}
}
/// Precondition: input has to start with either `b"` or `br`.
#[inline(never)]
fn parse_impl(input: &str) -> Result<(Option<Vec<u8>>, Option<u32>, usize), ParseError> {
if input.starts_with("br") {
scan_raw_string::<u8>(&input, 2)
.map(|(v, num, start_suffix)| (v.map(String::into_bytes), Some(num), start_suffix))
} else {
unescape_string::<u8>(&input, 2)
.map(|(v, start_suffix)| (v.map(String::into_bytes), None, start_suffix))
}
}
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,224 @@
use crate::{Literal, ByteStringLit, test_util::{assert_parse_ok_eq, assert_roundtrip}};
// ===== Utility functions =======================================================================
macro_rules! check {
($lit:literal, $has_escapes:expr, $num_hashes:expr) => {
check!($lit, stringify!($lit), $has_escapes, $num_hashes, "")
};
($lit:literal, $input:expr, $has_escapes:expr, $num_hashes:expr, $suffix:literal) => {
let input = $input;
let expected = ByteStringLit {
raw: input,
value: if $has_escapes { Some($lit.to_vec()) } else { None },
num_hashes: $num_hashes,
start_suffix: input.len() - $suffix.len(),
};
assert_parse_ok_eq(
input, ByteStringLit::parse(input), expected.clone(), "ByteStringLit::parse");
assert_parse_ok_eq(
input, Literal::parse(input), Literal::ByteString(expected.clone()), "Literal::parse");
let lit = ByteStringLit::parse(input).unwrap();
assert_eq!(lit.value(), $lit);
assert_eq!(lit.suffix(), $suffix);
assert_eq!(lit.into_value().as_ref(), $lit);
assert_roundtrip(expected.into_owned(), input);
};
}
// ===== Actual tests ============================================================================
#[test]
fn simple() {
check!(b"", false, None);
check!(b"a", false, None);
check!(b"peter", false, None);
}
#[test]
fn special_whitespace() {
let strings = ["\n", "\t", "foo\tbar", "baz\n"];
for &s in &strings {
let input = format!(r#"b"{}""#, s);
let input_raw = format!(r#"br"{}""#, s);
for (input, num_hashes) in vec![(input, None), (input_raw, Some(0))] {
let expected = ByteStringLit {
raw: &*input,
value: None,
num_hashes,
start_suffix: input.len(),
};
assert_parse_ok_eq(
&input, ByteStringLit::parse(&*input), expected.clone(), "ByteStringLit::parse");
assert_parse_ok_eq(
&input, Literal::parse(&*input), Literal::ByteString(expected), "Literal::parse");
assert_eq!(ByteStringLit::parse(&*input).unwrap().value(), s.as_bytes());
assert_eq!(ByteStringLit::parse(&*input).unwrap().into_value(), s.as_bytes());
}
}
let res = ByteStringLit::parse("br\"\r\"").expect("failed to parse");
assert_eq!(res.value(), b"\r");
}
#[test]
fn simple_escapes() {
check!(b"a\nb", true, None);
check!(b"\nb", true, None);
check!(b"a\n", true, None);
check!(b"\n", true, None);
check!(b"\x60foo \t bar\rbaz\n banana \0kiwi", true, None);
check!(b"foo \\ferris", true, None);
check!(b"baz \\ferris\"box", true, None);
check!(b"\\foo\\ banana\" baz\"", true, None);
check!(b"\"foo \\ferris \" baz\\", true, None);
check!(b"\x00", true, None);
check!(b" \x01", true, None);
check!(b"\x0c foo", true, None);
check!(b" foo\x0D ", true, None);
check!(b"\\x13", true, None);
check!(b"\"x30", true, None);
}
#[test]
fn string_continue() {
check!(b"foo\
bar", true, None);
check!(b"foo\
bar", true, None);
check!(b"foo\
banana", true, None);
// Weird whitespace characters
let lit = ByteStringLit::parse("b\"foo\\\n\r\t\n \n\tbar\"").expect("failed to parse");
assert_eq!(lit.value(), b"foobar");
// Raw strings do not handle "string continues"
check!(br"foo\
bar", false, Some(0));
}
#[test]
fn crlf_newlines() {
let lit = ByteStringLit::parse("b\"foo\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), b"foo\nbar");
let lit = ByteStringLit::parse("b\"\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), b"\nbar");
let lit = ByteStringLit::parse("b\"foo\r\n\"").expect("failed to parse");
assert_eq!(lit.value(), b"foo\n");
let lit = ByteStringLit::parse("br\"foo\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), b"foo\nbar");
let lit = ByteStringLit::parse("br#\"\r\nbar\"#").expect("failed to parse");
assert_eq!(lit.value(), b"\nbar");
let lit = ByteStringLit::parse("br##\"foo\r\n\"##").expect("failed to parse");
assert_eq!(lit.value(), b"foo\n");
}
#[test]
fn raw_byte_string() {
check!(br"", false, Some(0));
check!(br"a", false, Some(0));
check!(br"peter", false, Some(0));
check!(br"Greetings jason!", false, Some(0));
check!(br#""#, false, Some(1));
check!(br#"a"#, false, Some(1));
check!(br##"peter"##, false, Some(2));
check!(br###"Greetings # Jason!"###, false, Some(3));
check!(br########"we ## need #### more ####### hashtags"########, false, Some(8));
check!(br#"foo " bar"#, false, Some(1));
check!(br##"foo " bar"##, false, Some(2));
check!(br#"foo """" '"'" bar"#, false, Some(1));
check!(br#""foo""#, false, Some(1));
check!(br###""foo'"###, false, Some(3));
check!(br#""x'#_#s'"#, false, Some(1));
check!(br"#", false, Some(0));
check!(br"foo#", false, Some(0));
check!(br"##bar", false, Some(0));
check!(br###""##foo"##bar'"###, false, Some(3));
check!(br"foo\n\t\r\0\\x60\u{123}doggo", false, Some(0));
check!(br#"cat\n\t\r\0\\x60\u{123}doggo"#, false, Some(1));
}
#[test]
fn suffixes() {
check!(b"hello", r###"b"hello"suffix"###, false, None, "suffix");
check!(b"fox", r#"b"fox"peter"#, false, None, "peter");
check!(b"a\x0cb\\", r#"b"a\x0cb\\"_jürgen"#, true, None, "_jürgen");
check!(br"a\x0cb\\", r###"br#"a\x0cb\\"#_jürgen"###, false, Some(1), "_jürgen");
}
#[test]
fn parse_err() {
assert_err!(ByteStringLit, r#"b""#, UnterminatedString, None);
assert_err!(ByteStringLit, r#"b"cat"#, UnterminatedString, None);
assert_err!(ByteStringLit, r#"b"Jurgen"#, UnterminatedString, None);
assert_err!(ByteStringLit, r#"b"foo bar baz"#, UnterminatedString, None);
assert_err!(ByteStringLit, r#"b"fox"peter""#, InvalidSuffix, 6);
assert_err!(ByteStringLit, r###"br#"foo "# bar"#"###, UnexpectedChar, 10);
assert_err!(ByteStringLit, "b\"\r\"", IsolatedCr, 2);
assert_err!(ByteStringLit, "b\"fo\rx\"", IsolatedCr, 4);
assert_err!(ByteStringLit, r##"br####""##, UnterminatedRawString, None);
assert_err!(ByteStringLit, r#####"br##"foo"#bar"#####, UnterminatedRawString, None);
assert_err!(ByteStringLit, r##"br####"##, InvalidLiteral, None);
assert_err!(ByteStringLit, r##"br####x"##, InvalidLiteral, None);
}
#[test]
fn non_ascii() {
assert_err!(ByteStringLit, r#"b"న""#, NonAsciiInByteLiteral, 2);
assert_err!(ByteStringLit, r#"b"foo犬""#, NonAsciiInByteLiteral, 5);
assert_err!(ByteStringLit, r#"b"x🦊baz""#, NonAsciiInByteLiteral, 3);
assert_err!(ByteStringLit, r#"br"న""#, NonAsciiInByteLiteral, 3);
assert_err!(ByteStringLit, r#"br"foo犬""#, NonAsciiInByteLiteral, 6);
assert_err!(ByteStringLit, r#"br"x🦊baz""#, NonAsciiInByteLiteral, 4);
}
#[test]
fn invalid_escapes() {
assert_err!(ByteStringLit, r#"b"\a""#, UnknownEscape, 2..4);
assert_err!(ByteStringLit, r#"b"foo\y""#, UnknownEscape, 5..7);
assert_err!(ByteStringLit, r#"b"\"#, UnterminatedEscape, 2);
assert_err!(ByteStringLit, r#"b"\x""#, UnterminatedEscape, 2..4);
assert_err!(ByteStringLit, r#"b"foo\x1""#, UnterminatedEscape, 5..8);
assert_err!(ByteStringLit, r#"b" \xaj""#, InvalidXEscape, 3..7);
assert_err!(ByteStringLit, r#"b"\xjbbaz""#, InvalidXEscape, 2..6);
}
#[test]
fn unicode_escape_not_allowed() {
assert_err!(ByteStringLit, r#"b"\u{0}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{00}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{b}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{B}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{7e}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{E4}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{e4}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{fc}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{Fc}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{fC}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{FC}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{b10}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{B10}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{0b10}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{2764}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{1f602}""#, UnicodeEscapeInByteLiteral, 2..4);
assert_err!(ByteStringLit, r#"b"\u{1F602}""#, UnicodeEscapeInByteLiteral, 2..4);
}

105
third_party/rust/litrs/src/char/mod.rs vendored Normal file
View file

@ -0,0 +1,105 @@
use std::fmt;
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
escape::unescape,
parse::{first_byte_or_empty, check_suffix},
};
/// A character literal, e.g. `'g'` or `'🦊'`.
///
/// See [the reference][ref] for more information.
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#character-literals
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct CharLit<B: Buffer> {
raw: B,
/// Start index of the suffix or `raw.len()` if there is no suffix.
start_suffix: usize,
value: char,
}
impl<B: Buffer> CharLit<B> {
/// Parses the input as a character literal. Returns an error if the input
/// is invalid or represents a different kind of literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
match first_byte_or_empty(&input)? {
b'\'' => {
let (value, start_suffix) = parse_impl(&input)?;
Ok(Self { raw: input, value, start_suffix })
},
_ => Err(perr(0, DoesNotStartWithQuote)),
}
}
/// Returns the character value that this literal represents.
pub fn value(&self) -> char {
self.value
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.start_suffix..]
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
}
impl CharLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn to_owned(&self) -> CharLit<String> {
CharLit {
raw: self.raw.to_owned(),
start_suffix: self.start_suffix,
value: self.value,
}
}
}
impl<B: Buffer> fmt::Display for CharLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(&self.raw)
}
}
/// Precondition: first character in input must be `'`.
#[inline(never)]
pub(crate) fn parse_impl(input: &str) -> Result<(char, usize), ParseError> {
let first = input.chars().nth(1).ok_or(perr(None, UnterminatedCharLiteral))?;
let (c, len) = match first {
'\'' if input.chars().nth(2) == Some('\'') => return Err(perr(1, UnescapedSingleQuote)),
'\'' => return Err(perr(None, EmptyCharLiteral)),
'\n' | '\t' | '\r'
=> return Err(perr(1, UnescapedSpecialWhitespace)),
'\\' => unescape::<char>(&input[1..], 1)?,
other => (other, other.len_utf8()),
};
match input[1 + len..].find('\'') {
Some(0) => {}
Some(_) => return Err(perr(None, OverlongCharLiteral)),
None => return Err(perr(None, UnterminatedCharLiteral)),
}
let start_suffix = 1 + len + 1;
let suffix = &input[start_suffix..];
check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
Ok((c, start_suffix))
}
#[cfg(test)]
mod tests;

227
third_party/rust/litrs/src/char/tests.rs vendored Normal file
View file

@ -0,0 +1,227 @@
use crate::{Literal, test_util::{assert_parse_ok_eq, assert_roundtrip}};
use super::CharLit;
// ===== Utility functions =======================================================================
macro_rules! check {
($lit:literal) => { check!($lit, stringify!($lit), "") };
($lit:literal, $input:expr, $suffix:literal) => {
let input = $input;
let expected = CharLit {
raw: input,
start_suffix: input.len() - $suffix.len(),
value: $lit,
};
assert_parse_ok_eq(input, CharLit::parse(input), expected.clone(), "CharLit::parse");
assert_parse_ok_eq(input, Literal::parse(input), Literal::Char(expected), "Literal::parse");
let lit = CharLit::parse(input).unwrap();
assert_eq!(lit.value(), $lit);
assert_eq!(lit.suffix(), $suffix);
assert_roundtrip(expected.to_owned(), input);
};
}
// ===== Actual tests ============================================================================
#[test]
fn alphanumeric() {
check!('a');
check!('b');
check!('y');
check!('z');
check!('A');
check!('B');
check!('Y');
check!('Z');
check!('0');
check!('1');
check!('8');
check!('9');
}
#[test]
fn special_chars() {
check!(' ');
check!('!');
check!('"');
check!('#');
check!('$');
check!('%');
check!('&');
check!('(');
check!(')');
check!('*');
check!('+');
check!(',');
check!('-');
check!('.');
check!('/');
check!(':');
check!(';');
check!('<');
check!('=');
check!('>');
check!('?');
check!('@');
check!('[');
check!(']');
check!('^');
check!('_');
check!('`');
check!('{');
check!('|');
check!('}');
check!('~');
}
#[test]
fn unicode() {
check!('న');
check!('犬');
check!('🦊');
}
#[test]
fn quote_escapes() {
check!('\'');
check!('\"');
}
#[test]
fn ascii_escapes() {
check!('\n');
check!('\r');
check!('\t');
check!('\\');
check!('\0');
check!('\x00');
check!('\x01');
check!('\x0c');
check!('\x0D');
check!('\x13');
check!('\x30');
check!('\x30');
check!('\x4B');
check!('\x6b');
check!('\x7F');
check!('\x7f');
}
#[test]
fn unicode_escapes() {
check!('\u{0}');
check!('\u{00}');
check!('\u{b}');
check!('\u{B}');
check!('\u{7e}');
check!('\u{E4}');
check!('\u{e4}');
check!('\u{fc}');
check!('\u{Fc}');
check!('\u{fC}');
check!('\u{FC}');
check!('\u{b10}');
check!('\u{B10}');
check!('\u{0b10}');
check!('\u{2764}');
check!('\u{1f602}');
check!('\u{1F602}');
check!('\u{0}');
check!('\u{0__}');
check!('\u{3_b}');
check!('\u{1_F_6_0_2}');
check!('\u{1_F6_02_____}');
}
#[test]
fn suffixes() {
check!('a', r##"'a'peter"##, "peter");
check!('#', r##"'#'peter"##, "peter");
check!('\n', r##"'\n'peter"##, "peter");
check!('\'', r##"'\''peter"##, "peter");
check!('\"', r##"'\"'peter"##, "peter");
}
#[test]
fn invald_ascii_escapes() {
assert_err!(CharLit, r"'\x80'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\x81'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\x8a'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\x8F'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xa0'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xB0'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xc3'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xDf'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xff'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xfF'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xFf'", NonAsciiXEscape, 1..5);
assert_err!(CharLit, r"'\xFF'", NonAsciiXEscape, 1..5);
}
#[test]
fn invalid_escapes() {
assert_err!(CharLit, r"'\a'", UnknownEscape, 1..3);
assert_err!(CharLit, r"'\y'", UnknownEscape, 1..3);
assert_err!(CharLit, r"'\", UnterminatedEscape, 1);
assert_err!(CharLit, r"'\x'", UnterminatedEscape, 1..4);
assert_err!(CharLit, r"'\x1'", InvalidXEscape, 1..5);
assert_err!(CharLit, r"'\xaj'", InvalidXEscape, 1..5);
assert_err!(CharLit, r"'\xjb'", InvalidXEscape, 1..5);
}
#[test]
fn invalid_unicode_escapes() {
assert_err!(CharLit, r"'\u'", UnicodeEscapeWithoutBrace, 1..3);
assert_err!(CharLit, r"'\u '", UnicodeEscapeWithoutBrace, 1..3);
assert_err!(CharLit, r"'\u3'", UnicodeEscapeWithoutBrace, 1..3);
assert_err!(CharLit, r"'\u{'", UnterminatedUnicodeEscape, 1..5);
assert_err!(CharLit, r"'\u{12'", UnterminatedUnicodeEscape, 1..7);
assert_err!(CharLit, r"'\u{a0b'", UnterminatedUnicodeEscape, 1..8);
assert_err!(CharLit, r"'\u{a0_b '", UnterminatedUnicodeEscape, 1..11);
assert_err!(CharLit, r"'\u{_}'", InvalidStartOfUnicodeEscape, 4);
assert_err!(CharLit, r"'\u{_5f}'", InvalidStartOfUnicodeEscape, 4);
assert_err!(CharLit, r"'\u{x}'", NonHexDigitInUnicodeEscape, 4);
assert_err!(CharLit, r"'\u{0x}'", NonHexDigitInUnicodeEscape, 5);
assert_err!(CharLit, r"'\u{3bx}'", NonHexDigitInUnicodeEscape, 6);
assert_err!(CharLit, r"'\u{3b_x}'", NonHexDigitInUnicodeEscape, 7);
assert_err!(CharLit, r"'\u{4x_}'", NonHexDigitInUnicodeEscape, 5);
assert_err!(CharLit, r"'\u{1234567}'", TooManyDigitInUnicodeEscape, 10);
assert_err!(CharLit, r"'\u{1234567}'", TooManyDigitInUnicodeEscape, 10);
assert_err!(CharLit, r"'\u{1_23_4_56_7}'", TooManyDigitInUnicodeEscape, 14);
assert_err!(CharLit, r"'\u{abcdef123}'", TooManyDigitInUnicodeEscape, 10);
assert_err!(CharLit, r"'\u{110000}'", InvalidUnicodeEscapeChar, 1..10);
}
#[test]
fn parse_err() {
assert_err!(CharLit, r"''", EmptyCharLiteral, None);
assert_err!(CharLit, r"' ''", UnexpectedChar, 3);
assert_err!(CharLit, r"'", UnterminatedCharLiteral, None);
assert_err!(CharLit, r"'a", UnterminatedCharLiteral, None);
assert_err!(CharLit, r"'\n", UnterminatedCharLiteral, None);
assert_err!(CharLit, r"'\x35", UnterminatedCharLiteral, None);
assert_err!(CharLit, r"'ab'", OverlongCharLiteral, None);
assert_err!(CharLit, r"'a _'", OverlongCharLiteral, None);
assert_err!(CharLit, r"'\n3'", OverlongCharLiteral, None);
assert_err!(CharLit, r"", Empty, None);
assert_err!(CharLit, r"'''", UnescapedSingleQuote, 1);
assert_err!(CharLit, r"''''", UnescapedSingleQuote, 1);
assert_err!(CharLit, "'\n'", UnescapedSpecialWhitespace, 1);
assert_err!(CharLit, "'\t'", UnescapedSpecialWhitespace, 1);
assert_err!(CharLit, "'\r'", UnescapedSpecialWhitespace, 1);
}

371
third_party/rust/litrs/src/err.rs vendored Normal file
View file

@ -0,0 +1,371 @@
use std::{fmt, ops::Range};
/// An error signaling that a different kind of token was expected. Returned by
/// the various `TryFrom` impls.
#[derive(Debug, Clone, Copy)]
pub struct InvalidToken {
pub(crate) expected: TokenKind,
pub(crate) actual: TokenKind,
pub(crate) span: Span,
}
impl InvalidToken {
/// Returns a token stream representing `compile_error!("msg");` where
/// `"msg"` is the output of `self.to_string()`. **Panics if called outside
/// of a proc-macro context!**
pub fn to_compile_error(&self) -> proc_macro::TokenStream {
use proc_macro::{Delimiter, Ident, Group, Punct, Spacing, TokenTree};
let span = match self.span {
Span::One(s) => s,
#[cfg(feature = "proc-macro2")]
Span::Two(s) => s.unwrap(),
};
let msg = self.to_string();
let tokens = vec![
TokenTree::from(Ident::new("compile_error", span)),
TokenTree::from(Punct::new('!', Spacing::Alone)),
TokenTree::from(Group::new(
Delimiter::Parenthesis,
TokenTree::from(proc_macro::Literal::string(&msg)).into(),
)),
];
tokens.into_iter().map(|mut t| { t.set_span(span); t }).collect()
}
/// Like [`to_compile_error`][Self::to_compile_error], but returns a token
/// stream from `proc_macro2` and does not panic outside of a proc-macro
/// context.
#[cfg(feature = "proc-macro2")]
pub fn to_compile_error2(&self) -> proc_macro2::TokenStream {
use proc_macro2::{Delimiter, Ident, Group, Punct, Spacing, TokenTree};
let span = match self.span {
Span::One(s) => proc_macro2::Span::from(s),
Span::Two(s) => s,
};
let msg = self.to_string();
let tokens = vec![
TokenTree::from(Ident::new("compile_error", span)),
TokenTree::from(Punct::new('!', Spacing::Alone)),
TokenTree::from(Group::new(
Delimiter::Parenthesis,
TokenTree::from(proc_macro2::Literal::string(&msg)).into(),
)),
];
tokens.into_iter().map(|mut t| { t.set_span(span); t }).collect()
}
}
impl std::error::Error for InvalidToken {}
impl fmt::Display for InvalidToken {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn kind_desc(kind: TokenKind) -> &'static str {
match kind {
TokenKind::Punct => "a punctuation character",
TokenKind::Ident => "an identifier",
TokenKind::Group => "a group",
TokenKind::Literal => "a literal",
TokenKind::BoolLit => "a bool literal (`true` or `false`)",
TokenKind::ByteLit => "a byte literal (e.g. `b'r')",
TokenKind::ByteStringLit => r#"a byte string literal (e.g. `b"fox"`)"#,
TokenKind::CharLit => "a character literal (e.g. `'P'`)",
TokenKind::FloatLit => "a float literal (e.g. `3.14`)",
TokenKind::IntegerLit => "an integer literal (e.g. `27`)",
TokenKind::StringLit => r#"a string literal (e.g. "Ferris")"#,
}
}
write!(f, "expected {}, but found {}", kind_desc(self.expected), kind_desc(self.actual))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum TokenKind {
Punct,
Ident,
Group,
Literal,
BoolLit,
ByteLit,
ByteStringLit,
CharLit,
FloatLit,
IntegerLit,
StringLit,
}
/// Unfortunately, we have to deal with both cases.
#[derive(Debug, Clone, Copy)]
pub(crate) enum Span {
One(proc_macro::Span),
#[cfg(feature = "proc-macro2")]
Two(proc_macro2::Span),
}
impl From<proc_macro::Span> for Span {
fn from(src: proc_macro::Span) -> Self {
Self::One(src)
}
}
#[cfg(feature = "proc-macro2")]
impl From<proc_macro2::Span> for Span {
fn from(src: proc_macro2::Span) -> Self {
Self::Two(src)
}
}
/// Errors during parsing.
///
/// This type should be seen primarily for error reporting and not for catching
/// specific cases. The span and error kind are not guaranteed to be stable
/// over different versions of this library, meaning that a returned error can
/// change from one version to the next. There are simply too many fringe cases
/// that are not easy to classify as a specific error kind. It depends entirely
/// on the specific parser code how an invalid input is categorized.
///
/// Consider these examples:
/// - `'\` can be seen as
/// - invalid escape in character literal, or
/// - unterminated character literal.
/// - `'''` can be seen as
/// - empty character literal, or
/// - unescaped quote character in character literal.
/// - `0b64` can be seen as
/// - binary integer literal with invalid digit 6, or
/// - binary integer literal with invalid digit 4, or
/// - decimal integer literal with invalid digit b, or
/// - decimal integer literal 0 with unknown type suffix `b64`.
///
/// If you want to see more if these examples, feel free to check out the unit
/// tests of this library.
///
/// While this library does its best to emit sensible and precise errors, and to
/// keep the returned errors as stable as possible, full stability cannot be
/// guaranteed.
#[derive(Debug, Clone)]
pub struct ParseError {
pub(crate) span: Option<Range<usize>>,
pub(crate) kind: ParseErrorKind,
}
impl ParseError {
/// Returns a span of this error, if available. **Note**: the returned span
/// might change in future versions of this library. See [the documentation
/// of this type][ParseError] for more information.
pub fn span(&self) -> Option<Range<usize>> {
self.span.clone()
}
}
/// This is a free standing function instead of an associated one to reduce
/// noise around parsing code. There are lots of places that create errors, we
/// I wanna keep them as short as possible.
pub(crate) fn perr(span: impl SpanLike, kind: ParseErrorKind) -> ParseError {
ParseError {
span: span.into_span(),
kind,
}
}
pub(crate) trait SpanLike {
fn into_span(self) -> Option<Range<usize>>;
}
impl SpanLike for Option<Range<usize>> {
#[inline(always)]
fn into_span(self) -> Option<Range<usize>> {
self
}
}
impl SpanLike for Range<usize> {
#[inline(always)]
fn into_span(self) -> Option<Range<usize>> {
Some(self)
}
}
impl SpanLike for usize {
#[inline(always)]
fn into_span(self) -> Option<Range<usize>> {
Some(self..self + 1)
}
}
/// Kinds of errors.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub(crate) enum ParseErrorKind {
/// The input was an empty string
Empty,
/// An unexpected char was encountered.
UnexpectedChar,
/// Literal was not recognized.
InvalidLiteral,
/// Input does not start with decimal digit when trying to parse an integer.
DoesNotStartWithDigit,
/// A digit invalid for the specified integer base was found.
InvalidDigit,
/// Integer literal does not contain any valid digits.
NoDigits,
/// Exponent of a float literal does not contain any digits.
NoExponentDigits,
/// An unknown escape code, e.g. `\b`.
UnknownEscape,
/// A started escape sequence where the input ended before the escape was
/// finished.
UnterminatedEscape,
/// An `\x` escape where the two digits are not valid hex digits.
InvalidXEscape,
/// A string or character literal using the `\xNN` escape where `NN > 0x7F`.
NonAsciiXEscape,
/// A `\u{...}` escape in a byte or byte string literal.
UnicodeEscapeInByteLiteral,
/// A Unicode escape that does not start with a hex digit.
InvalidStartOfUnicodeEscape,
/// A `\u{...}` escape that lacks the opening brace.
UnicodeEscapeWithoutBrace,
/// In a `\u{...}` escape, a non-hex digit and non-underscore character was
/// found.
NonHexDigitInUnicodeEscape,
/// More than 6 digits found in unicode escape.
TooManyDigitInUnicodeEscape,
/// The value from a unicode escape does not represent a valid character.
InvalidUnicodeEscapeChar,
/// A `\u{..` escape that is not terminated (lacks the closing brace).
UnterminatedUnicodeEscape,
/// A character literal that's not terminated.
UnterminatedCharLiteral,
/// A character literal that contains more than one character.
OverlongCharLiteral,
/// An empty character literal, i.e. `''`.
EmptyCharLiteral,
UnterminatedByteLiteral,
OverlongByteLiteral,
EmptyByteLiteral,
NonAsciiInByteLiteral,
/// A `'` character was not escaped in a character or byte literal, or a `"`
/// character was not escaped in a string or byte string literal.
UnescapedSingleQuote,
/// A \n, \t or \r raw character in a char or byte literal.
UnescapedSpecialWhitespace,
/// When parsing a character, byte, string or byte string literal directly
/// and the input does not start with the corresponding quote character
/// (plus optional raw string prefix).
DoesNotStartWithQuote,
/// Unterminated raw string literal.
UnterminatedRawString,
/// String literal without a `"` at the end.
UnterminatedString,
/// Invalid start for a string literal.
InvalidStringLiteralStart,
/// Invalid start for a byte literal.
InvalidByteLiteralStart,
InvalidByteStringLiteralStart,
/// An literal `\r` character not followed by a `\n` character in a
/// (raw) string or byte string literal.
IsolatedCr,
/// Literal suffix is not a valid identifier.
InvalidSuffix,
/// Returned by `Float::parse` if an integer literal (no fractional nor
/// exponent part) is passed.
UnexpectedIntegerLit,
/// Integer suffixes cannot start with `e` or `E` as this conflicts with the
/// grammar for float literals.
IntegerSuffixStartingWithE,
}
impl std::error::Error for ParseError {}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use ParseErrorKind::*;
let description = match self.kind {
Empty => "input is empty",
UnexpectedChar => "unexpected character",
InvalidLiteral => "invalid literal",
DoesNotStartWithDigit => "number literal does not start with decimal digit",
InvalidDigit => "integer literal contains a digit invalid for its base",
NoDigits => "integer literal does not contain any digits",
NoExponentDigits => "exponent of floating point literal does not contain any digits",
UnknownEscape => "unknown escape",
UnterminatedEscape => "unterminated escape: input ended too soon",
InvalidXEscape => r"invalid `\x` escape: not followed by two hex digits",
NonAsciiXEscape => r"`\x` escape in char/string literal exceed ASCII range",
UnicodeEscapeInByteLiteral => r"`\u{...}` escape in byte (string) literal not allowed",
InvalidStartOfUnicodeEscape => r"invalid start of `\u{...}` escape",
UnicodeEscapeWithoutBrace => r"`Unicode \u{...}` escape without opening brace",
NonHexDigitInUnicodeEscape => r"non-hex digit found in `\u{...}` escape",
TooManyDigitInUnicodeEscape => r"more than six digits in `\u{...}` escape",
InvalidUnicodeEscapeChar => r"value specified in `\u{...}` escape is not a valid char",
UnterminatedUnicodeEscape => r"unterminated `\u{...}` escape",
UnterminatedCharLiteral => "character literal is not terminated",
OverlongCharLiteral => "character literal contains more than one character",
EmptyCharLiteral => "empty character literal",
UnterminatedByteLiteral => "byte literal is not terminated",
OverlongByteLiteral => "byte literal contains more than one byte",
EmptyByteLiteral => "empty byte literal",
NonAsciiInByteLiteral => "non ASCII character in byte (string) literal",
UnescapedSingleQuote => "character literal contains unescaped ' character",
UnescapedSpecialWhitespace => r"unescaped newline (\n), tab (\t) or cr (\r) character",
DoesNotStartWithQuote => "invalid start for char/byte/string literal",
UnterminatedRawString => "unterminated raw (byte) string literal",
UnterminatedString => "unterminated (byte) string literal",
InvalidStringLiteralStart => "invalid start for string literal",
InvalidByteLiteralStart => "invalid start for byte literal",
InvalidByteStringLiteralStart => "invalid start for byte string literal",
IsolatedCr => r"`\r` not immediately followed by `\n` in string",
InvalidSuffix => "literal suffix is not a valid identifier",
UnexpectedIntegerLit => "expected float literal, but found integer",
IntegerSuffixStartingWithE => "integer literal suffix must not start with 'e' or 'E'",
};
description.fmt(f)?;
if let Some(span) = &self.span {
write!(f, " (at {}..{})", span.start, span.end)?;
}
Ok(())
}
}

262
third_party/rust/litrs/src/escape.rs vendored Normal file
View file

@ -0,0 +1,262 @@
use crate::{ParseError, err::{perr, ParseErrorKind::*}, parse::{hex_digit_value, check_suffix}};
/// Must start with `\`
pub(crate) fn unescape<E: Escapee>(input: &str, offset: usize) -> Result<(E, usize), ParseError> {
let first = input.as_bytes().get(1)
.ok_or(perr(offset, UnterminatedEscape))?;
let out = match first {
// Quote escapes
b'\'' => (E::from_byte(b'\''), 2),
b'"' => (E::from_byte(b'"'), 2),
// Ascii escapes
b'n' => (E::from_byte(b'\n'), 2),
b'r' => (E::from_byte(b'\r'), 2),
b't' => (E::from_byte(b'\t'), 2),
b'\\' => (E::from_byte(b'\\'), 2),
b'0' => (E::from_byte(b'\0'), 2),
b'x' => {
let hex_string = input.get(2..4)
.ok_or(perr(offset..offset + input.len(), UnterminatedEscape))?
.as_bytes();
let first = hex_digit_value(hex_string[0])
.ok_or(perr(offset..offset + 4, InvalidXEscape))?;
let second = hex_digit_value(hex_string[1])
.ok_or(perr(offset..offset + 4, InvalidXEscape))?;
let value = second + 16 * first;
if E::SUPPORTS_UNICODE && value > 0x7F {
return Err(perr(offset..offset + 4, NonAsciiXEscape));
}
(E::from_byte(value), 4)
},
// Unicode escape
b'u' => {
if !E::SUPPORTS_UNICODE {
return Err(perr(offset..offset + 2, UnicodeEscapeInByteLiteral));
}
if input.as_bytes().get(2) != Some(&b'{') {
return Err(perr(offset..offset + 2, UnicodeEscapeWithoutBrace));
}
let closing_pos = input.bytes().position(|b| b == b'}')
.ok_or(perr(offset..offset + input.len(), UnterminatedUnicodeEscape))?;
let inner = &input[3..closing_pos];
if inner.as_bytes().first() == Some(&b'_') {
return Err(perr(4, InvalidStartOfUnicodeEscape));
}
let mut v: u32 = 0;
let mut digit_count = 0;
for (i, b) in inner.bytes().enumerate() {
if b == b'_'{
continue;
}
let digit = hex_digit_value(b)
.ok_or(perr(offset + 3 + i, NonHexDigitInUnicodeEscape))?;
if digit_count == 6 {
return Err(perr(offset + 3 + i, TooManyDigitInUnicodeEscape));
}
digit_count += 1;
v = 16 * v + digit as u32;
}
let c = std::char::from_u32(v)
.ok_or(perr(offset..closing_pos + 1, InvalidUnicodeEscapeChar))?;
(E::from_char(c), closing_pos + 1)
}
_ => return Err(perr(offset..offset + 2, UnknownEscape)),
};
Ok(out)
}
pub(crate) trait Escapee: Into<char> {
const SUPPORTS_UNICODE: bool;
fn from_byte(b: u8) -> Self;
fn from_char(c: char) -> Self;
}
impl Escapee for u8 {
const SUPPORTS_UNICODE: bool = false;
fn from_byte(b: u8) -> Self {
b
}
fn from_char(_: char) -> Self {
panic!("bug: `<u8 as Escapee>::from_char` was called");
}
}
impl Escapee for char {
const SUPPORTS_UNICODE: bool = true;
fn from_byte(b: u8) -> Self {
b.into()
}
fn from_char(c: char) -> Self {
c
}
}
/// Checks whether the character is skipped after a string continue start
/// (unescaped backlash followed by `\n`).
fn is_string_continue_skipable_whitespace(b: u8) -> bool {
b == b' ' || b == b'\t' || b == b'\n' || b == b'\r'
}
/// Unescapes a whole string or byte string.
#[inline(never)]
pub(crate) fn unescape_string<E: Escapee>(
input: &str,
offset: usize,
) -> Result<(Option<String>, usize), ParseError> {
let mut closing_quote_pos = None;
let mut i = offset;
let mut end_last_escape = offset;
let mut value = String::new();
while i < input.len() {
match input.as_bytes()[i] {
// Handle "string continue".
b'\\' if input.as_bytes().get(i + 1) == Some(&b'\n') => {
value.push_str(&input[end_last_escape..i]);
// Find the first non-whitespace character.
let end_escape = input[i + 2..].bytes()
.position(|b| !is_string_continue_skipable_whitespace(b))
.ok_or(perr(None, UnterminatedString))?;
i += 2 + end_escape;
end_last_escape = i;
}
b'\\' => {
let (c, len) = unescape::<E>(&input[i..input.len() - 1], i)?;
value.push_str(&input[end_last_escape..i]);
value.push(c.into());
i += len;
end_last_escape = i;
}
b'\r' => {
if input.as_bytes().get(i + 1) == Some(&b'\n') {
value.push_str(&input[end_last_escape..i]);
value.push('\n');
i += 2;
end_last_escape = i;
} else {
return Err(perr(i, IsolatedCr))
}
}
b'"' => {
closing_quote_pos = Some(i);
break;
},
b if !E::SUPPORTS_UNICODE && !b.is_ascii()
=> return Err(perr(i, NonAsciiInByteLiteral)),
_ => i += 1,
}
}
let closing_quote_pos = closing_quote_pos.ok_or(perr(None, UnterminatedString))?;
let start_suffix = closing_quote_pos + 1;
let suffix = &input[start_suffix..];
check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
// `value` is only empty if there was no escape in the input string
// (with the special case of the input being empty). This means the
// string value basically equals the input, so we store `None`.
let value = if value.is_empty() {
None
} else {
// There was an escape in the string, so we need to push the
// remaining unescaped part of the string still.
value.push_str(&input[end_last_escape..closing_quote_pos]);
Some(value)
};
Ok((value, start_suffix))
}
/// Reads and checks a raw (byte) string literal, converting `\r\n` sequences to
/// just `\n` sequences. Returns an optional new string (if the input contained
/// any `\r\n`) and the number of hashes used by the literal.
#[inline(never)]
pub(crate) fn scan_raw_string<E: Escapee>(
input: &str,
offset: usize,
) -> Result<(Option<String>, u32, usize), ParseError> {
// Raw string literal
let num_hashes = input[offset..].bytes().position(|b| b != b'#')
.ok_or(perr(None, InvalidLiteral))?;
if input.as_bytes().get(offset + num_hashes) != Some(&b'"') {
return Err(perr(None, InvalidLiteral));
}
let start_inner = offset + num_hashes + 1;
let hashes = &input[offset..num_hashes + offset];
let mut closing_quote_pos = None;
let mut i = start_inner;
let mut end_last_escape = start_inner;
let mut value = String::new();
while i < input.len() {
let b = input.as_bytes()[i];
if b == b'"' && input[i + 1..].starts_with(hashes) {
closing_quote_pos = Some(i);
break;
}
if b == b'\r' {
// Convert `\r\n` into `\n`. This is currently not well documented
// in the Rust reference, but is done even for raw strings. That's
// because rustc simply converts all line endings when reading
// source files.
if input.as_bytes().get(i + 1) == Some(&b'\n') {
value.push_str(&input[end_last_escape..i]);
value.push('\n');
i += 2;
end_last_escape = i;
continue;
} else if E::SUPPORTS_UNICODE {
// If no \n follows the \r and we are scanning a raw string
// (not raw byte string), we error.
return Err(perr(i, IsolatedCr))
}
}
if !E::SUPPORTS_UNICODE {
if !b.is_ascii() {
return Err(perr(i, NonAsciiInByteLiteral));
}
}
i += 1;
}
let closing_quote_pos = closing_quote_pos.ok_or(perr(None, UnterminatedRawString))?;
let start_suffix = closing_quote_pos + num_hashes + 1;
let suffix = &input[start_suffix..];
check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
// `value` is only empty if there was no \r\n in the input string (with the
// special case of the input being empty). This means the string value
// equals the input, so we store `None`.
let value = if value.is_empty() {
None
} else {
// There was an \r\n in the string, so we need to push the remaining
// unescaped part of the string still.
value.push_str(&input[end_last_escape..closing_quote_pos]);
Some(value)
};
Ok((value, num_hashes as u32, start_suffix))
}

257
third_party/rust/litrs/src/float/mod.rs vendored Normal file
View file

@ -0,0 +1,257 @@
use std::{fmt, str::FromStr};
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
parse::{end_dec_digits, first_byte_or_empty, check_suffix},
};
/// A floating point literal, e.g. `3.14`, `8.`, `135e12`, or `1.956e2f64`.
///
/// This kind of literal has several forms, but generally consists of a main
/// number part, an optional exponent and an optional type suffix. See
/// [the reference][ref] for more information.
///
/// A leading minus sign `-` is not part of the literal grammar! `-3.14` are two
/// tokens in the Rust grammar. Further, `27` and `27f32` are both not float,
/// but integer literals! Consequently `FloatLit::parse` will reject them.
///
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#floating-point-literals
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct FloatLit<B: Buffer> {
/// The whole raw input. The `usize` fields in this struct partition this
/// string. Always true: `end_integer_part <= end_fractional_part`.
///
/// ```text
/// 12_3.4_56e789f32
/// ╷ ╷ ╷
/// | | └ end_number_part = 13
/// | └ end_fractional_part = 9
/// └ end_integer_part = 4
///
/// 246.
/// ╷╷
/// |└ end_fractional_part = end_number_part = 4
/// └ end_integer_part = 3
///
/// 1234e89
/// ╷ ╷
/// | └ end_number_part = 7
/// └ end_integer_part = end_fractional_part = 4
/// ```
raw: B,
/// The first index not part of the integer part anymore. Since the integer
/// part is at the start, this is also the length of that part.
end_integer_part: usize,
/// The first index after the fractional part.
end_fractional_part: usize,
/// The first index after the whole number part (everything except type suffix).
end_number_part: usize,
}
impl<B: Buffer> FloatLit<B> {
/// Parses the input as a floating point literal. Returns an error if the
/// input is invalid or represents a different kind of literal. Will also
/// reject decimal integer literals like `23` or `17f32`, in accordance
/// with the spec.
pub fn parse(s: B) -> Result<Self, ParseError> {
match first_byte_or_empty(&s)? {
b'0'..=b'9' => {
// TODO: simplify once RFC 2528 is stabilized
let FloatLit {
end_integer_part,
end_fractional_part,
end_number_part,
..
} = parse_impl(&s)?;
Ok(Self { raw: s, end_integer_part, end_fractional_part, end_number_part })
},
_ => Err(perr(0, DoesNotStartWithDigit)),
}
}
/// Returns the number part (including integer part, fractional part and
/// exponent), but without the suffix. If you want an actual floating
/// point value, you need to parse this string, e.g. with `f32::from_str`
/// or an external crate.
pub fn number_part(&self) -> &str {
&(*self.raw)[..self.end_number_part]
}
/// Returns the non-empty integer part of this literal.
pub fn integer_part(&self) -> &str {
&(*self.raw)[..self.end_integer_part]
}
/// Returns the optional fractional part of this literal. Does not include
/// the period. If a period exists in the input, `Some` is returned, `None`
/// otherwise. Note that `Some("")` might be returned, e.g. for `3.`.
pub fn fractional_part(&self) -> Option<&str> {
if self.end_integer_part == self.end_fractional_part {
None
} else {
Some(&(*self.raw)[self.end_integer_part + 1..self.end_fractional_part])
}
}
/// Optional exponent part. Might be empty if there was no exponent part in
/// the input. Includes the `e` or `E` at the beginning.
pub fn exponent_part(&self) -> &str {
&(*self.raw)[self.end_fractional_part..self.end_number_part]
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.end_number_part..]
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
}
impl FloatLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn to_owned(&self) -> FloatLit<String> {
FloatLit {
raw: self.raw.to_owned(),
end_integer_part: self.end_integer_part,
end_fractional_part: self.end_fractional_part,
end_number_part: self.end_number_part,
}
}
}
impl<B: Buffer> fmt::Display for FloatLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &*self.raw)
}
}
/// Precondition: first byte of string has to be in `b'0'..=b'9'`.
#[inline(never)]
pub(crate) fn parse_impl(input: &str) -> Result<FloatLit<&str>, ParseError> {
// Integer part.
let end_integer_part = end_dec_digits(input.as_bytes());
let rest = &input[end_integer_part..];
// Fractional part.
let end_fractional_part = if rest.as_bytes().get(0) == Some(&b'.') {
// The fractional part must not start with `_`.
if rest.as_bytes().get(1) == Some(&b'_') {
return Err(perr(end_integer_part + 1, UnexpectedChar));
}
end_dec_digits(rest[1..].as_bytes()) + 1 + end_integer_part
} else {
end_integer_part
};
let rest = &input[end_fractional_part..];
// If we have a period that is not followed by decimal digits, the
// literal must end now.
if end_integer_part + 1 == end_fractional_part && !rest.is_empty() {
return Err(perr(end_integer_part + 1, UnexpectedChar));
}
// Optional exponent.
let end_number_part = if rest.starts_with('e') || rest.starts_with('E') {
// Strip single - or + sign at the beginning.
let exp_number_start = match rest.as_bytes().get(1) {
Some(b'-') | Some(b'+') => 2,
_ => 1,
};
// Find end of exponent and make sure there is at least one digit.
let end_exponent = end_dec_digits(rest[exp_number_start..].as_bytes()) + exp_number_start;
if !rest[exp_number_start..end_exponent].bytes().any(|b| matches!(b, b'0'..=b'9')) {
return Err(perr(
end_fractional_part..end_fractional_part + end_exponent,
NoExponentDigits,
));
}
end_exponent + end_fractional_part
} else {
end_fractional_part
};
// Make sure the suffix is valid.
let suffix = &input[end_number_part..];
check_suffix(suffix).map_err(|kind| perr(end_number_part..input.len(), kind))?;
// A float literal needs either a fractional or exponent part, otherwise its
// an integer literal.
if end_integer_part == end_number_part {
return Err(perr(None, UnexpectedIntegerLit));
}
Ok(FloatLit {
raw: input,
end_integer_part,
end_fractional_part,
end_number_part,
})
}
/// All possible float type suffixes.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum FloatType {
F32,
F64,
}
impl FloatType {
/// Returns the type corresponding to the given suffix (e.g. `"f32"` is
/// mapped to `Self::F32`). If the suffix is not a valid float type, `None`
/// is returned.
pub fn from_suffix(suffix: &str) -> Option<Self> {
match suffix {
"f32" => Some(FloatType::F32),
"f64" => Some(FloatType::F64),
_ => None,
}
}
/// Returns the suffix for this type, e.g. `"f32"` for `Self::F32`.
pub fn suffix(self) -> &'static str {
match self {
Self::F32 => "f32",
Self::F64 => "f64",
}
}
}
impl FromStr for FloatType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_suffix(s).ok_or(())
}
}
impl fmt::Display for FloatType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.suffix().fmt(f)
}
}
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,253 @@
use crate::{
Literal, ParseError,
test_util::{assert_parse_ok_eq, assert_roundtrip},
};
use super::{FloatLit, FloatType};
// ===== Utility functions =======================================================================
/// Helper macro to check parsing a float.
///
/// This macro contains quite a bit of logic itself (which can be buggy of
/// course), so we have a few test functions below to test a bunch of cases
/// manually.
macro_rules! check {
($intpart:literal $fracpart:literal $exppart:literal $suffix:tt) => {
let input = concat!($intpart, $fracpart, $exppart, check!(@stringify_suffix $suffix));
let expected_float = FloatLit {
raw: input,
end_integer_part: $intpart.len(),
end_fractional_part: $intpart.len() + $fracpart.len(),
end_number_part: $intpart.len() + $fracpart.len() + $exppart.len(),
};
assert_parse_ok_eq(
input, FloatLit::parse(input), expected_float.clone(), "FloatLit::parse");
assert_parse_ok_eq(
input, Literal::parse(input), Literal::Float(expected_float), "Literal::parse");
assert_eq!(FloatLit::parse(input).unwrap().suffix(), check!(@ty $suffix));
assert_roundtrip(expected_float.to_owned(), input);
};
(@ty f32) => { "f32" };
(@ty f64) => { "f64" };
(@ty -) => { "" };
(@stringify_suffix -) => { "" };
(@stringify_suffix $suffix:ident) => { stringify!($suffix) };
}
// ===== Actual tests ===========================================================================
#[test]
fn manual_without_suffix() -> Result<(), ParseError> {
let f = FloatLit::parse("3.14")?;
assert_eq!(f.number_part(), "3.14");
assert_eq!(f.integer_part(), "3");
assert_eq!(f.fractional_part(), Some("14"));
assert_eq!(f.exponent_part(), "");
assert_eq!(f.suffix(), "");
let f = FloatLit::parse("9.")?;
assert_eq!(f.number_part(), "9.");
assert_eq!(f.integer_part(), "9");
assert_eq!(f.fractional_part(), Some(""));
assert_eq!(f.exponent_part(), "");
assert_eq!(f.suffix(), "");
let f = FloatLit::parse("8e1")?;
assert_eq!(f.number_part(), "8e1");
assert_eq!(f.integer_part(), "8");
assert_eq!(f.fractional_part(), None);
assert_eq!(f.exponent_part(), "e1");
assert_eq!(f.suffix(), "");
let f = FloatLit::parse("8E3")?;
assert_eq!(f.number_part(), "8E3");
assert_eq!(f.integer_part(), "8");
assert_eq!(f.fractional_part(), None);
assert_eq!(f.exponent_part(), "E3");
assert_eq!(f.suffix(), "");
let f = FloatLit::parse("8_7_6.1_23e15")?;
assert_eq!(f.number_part(), "8_7_6.1_23e15");
assert_eq!(f.integer_part(), "8_7_6");
assert_eq!(f.fractional_part(), Some("1_23"));
assert_eq!(f.exponent_part(), "e15");
assert_eq!(f.suffix(), "");
let f = FloatLit::parse("8.2e-_04_9")?;
assert_eq!(f.number_part(), "8.2e-_04_9");
assert_eq!(f.integer_part(), "8");
assert_eq!(f.fractional_part(), Some("2"));
assert_eq!(f.exponent_part(), "e-_04_9");
assert_eq!(f.suffix(), "");
Ok(())
}
#[test]
fn manual_with_suffix() -> Result<(), ParseError> {
let f = FloatLit::parse("3.14f32")?;
assert_eq!(f.number_part(), "3.14");
assert_eq!(f.integer_part(), "3");
assert_eq!(f.fractional_part(), Some("14"));
assert_eq!(f.exponent_part(), "");
assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F32));
let f = FloatLit::parse("8e1f64")?;
assert_eq!(f.number_part(), "8e1");
assert_eq!(f.integer_part(), "8");
assert_eq!(f.fractional_part(), None);
assert_eq!(f.exponent_part(), "e1");
assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F64));
let f = FloatLit::parse("8_7_6.1_23e15f32")?;
assert_eq!(f.number_part(), "8_7_6.1_23e15");
assert_eq!(f.integer_part(), "8_7_6");
assert_eq!(f.fractional_part(), Some("1_23"));
assert_eq!(f.exponent_part(), "e15");
assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F32));
let f = FloatLit::parse("8.2e-_04_9f64")?;
assert_eq!(f.number_part(), "8.2e-_04_9");
assert_eq!(f.integer_part(), "8");
assert_eq!(f.fractional_part(), Some("2"));
assert_eq!(f.exponent_part(), "e-_04_9");
assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F64));
Ok(())
}
#[test]
fn simple() {
check!("3" ".14" "" -);
check!("3" ".14" "" f32);
check!("3" ".14" "" f64);
check!("3" "" "e987654321" -);
check!("3" "" "e987654321" f64);
check!("42_888" ".05" "" -);
check!("42_888" ".05" "E5___" f32);
check!("123456789" "" "e_1" f64);
check!("123456789" ".99" "e_1" f64);
check!("123456789" ".99" "" f64);
check!("123456789" ".99" "" -);
check!("147" ".3_33" "" -);
check!("147" ".3_33__" "E3" f64);
check!("147" ".3_33__" "" f32);
check!("147" ".333" "e-10" -);
check!("147" ".333" "e-_7" f32);
check!("147" ".333" "e+10" -);
check!("147" ".333" "e+_7" f32);
check!("86" "." "" -);
check!("0" "." "" -);
check!("0_" "." "" -);
check!("0" ".0000001" "" -);
check!("0" ".000_0001" "" -);
check!("0" ".0" "e+0" -);
check!("0" "" "E+0" -);
check!("34" "" "e+0" -);
check!("0" ".9182" "E+0" f32);
}
#[test]
fn non_standard_suffixes() {
#[track_caller]
fn check_suffix(
input: &str,
integer_part: &str,
fractional_part: Option<&str>,
exponent_part: &str,
suffix: &str,
) {
let lit = FloatLit::parse(input)
.unwrap_or_else(|e| panic!("expected to parse '{}' but got {}", input, e));
assert_eq!(lit.integer_part(), integer_part);
assert_eq!(lit.fractional_part(), fractional_part);
assert_eq!(lit.exponent_part(), exponent_part);
assert_eq!(lit.suffix(), suffix);
let lit = match Literal::parse(input) {
Ok(Literal::Float(f)) => f,
other => panic!("Expected float literal, but got {:?} for '{}'", other, input),
};
assert_eq!(lit.integer_part(), integer_part);
assert_eq!(lit.fractional_part(), fractional_part);
assert_eq!(lit.exponent_part(), exponent_part);
assert_eq!(lit.suffix(), suffix);
}
check_suffix("7.1f23", "7", Some("1"), "", "f23");
check_suffix("7.1f320", "7", Some("1"), "", "f320");
check_suffix("7.1f64_", "7", Some("1"), "", "f64_");
check_suffix("8.1f649", "8", Some("1"), "", "f649");
check_suffix("8.1f64f32", "8", Some("1"), "", "f64f32");
check_suffix("23e2_banana", "23", None, "e2_", "banana");
check_suffix("23.2_banana", "23", Some("2_"), "", "banana");
check_suffix("23e2pe55ter", "23", None, "e2", "pe55ter");
check_suffix("23e2p_e55ter", "23", None, "e2", "p_e55ter");
check_suffix("3.15Jürgen", "3", Some("15"), "", "Jürgen");
check_suffix("3e2e5", "3", None, "e2", "e5");
check_suffix("3e2e5f", "3", None, "e2", "e5f");
}
#[test]
fn parse_err() {
assert_err!(FloatLit, "", Empty, None);
assert_err_single!(FloatLit::parse("."), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("+"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("-"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("e"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("e8"), DoesNotStartWithDigit, 0);
assert_err!(FloatLit, "0e", NoExponentDigits, 1..2);
assert_err_single!(FloatLit::parse("f32"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("foo"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("inf"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("nan"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("NaN"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("NAN"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse("_2.7"), DoesNotStartWithDigit, 0);
assert_err_single!(FloatLit::parse(".5"), DoesNotStartWithDigit, 0);
assert_err!(FloatLit, "1e", NoExponentDigits, 1..2);
assert_err!(FloatLit, "1.e4", UnexpectedChar, 2);
assert_err!(FloatLit, "3._4", UnexpectedChar, 2);
assert_err!(FloatLit, "3.f32", UnexpectedChar, 2);
assert_err!(FloatLit, "3.e5", UnexpectedChar, 2);
assert_err!(FloatLit, "12345._987", UnexpectedChar, 6);
assert_err!(FloatLit, "46._", UnexpectedChar, 3);
assert_err!(FloatLit, "46.f32", UnexpectedChar, 3);
assert_err!(FloatLit, "46.e3", UnexpectedChar, 3);
assert_err!(FloatLit, "46._e3", UnexpectedChar, 3);
assert_err!(FloatLit, "46.e3f64", UnexpectedChar, 3);
assert_err!(FloatLit, "23.4e_", NoExponentDigits, 4..6);
assert_err!(FloatLit, "23E___f32", NoExponentDigits, 2..6);
assert_err!(FloatLit, "55e3.1", UnexpectedChar, 4..6);
assert_err!(FloatLit, "3.7+", UnexpectedChar, 3..4);
assert_err!(FloatLit, "3.7+2", UnexpectedChar, 3..5);
assert_err!(FloatLit, "3.7-", UnexpectedChar, 3..4);
assert_err!(FloatLit, "3.7-2", UnexpectedChar, 3..5);
assert_err!(FloatLit, "3.7e+", NoExponentDigits, 3..5);
assert_err!(FloatLit, "3.7e-", NoExponentDigits, 3..5);
assert_err!(FloatLit, "3.7e-+3", NoExponentDigits, 3..5); // suboptimal error
assert_err!(FloatLit, "3.7e+-3", NoExponentDigits, 3..5); // suboptimal error
assert_err_single!(FloatLit::parse("0x44.5"), InvalidSuffix, 1..6);
assert_err_single!(FloatLit::parse("3"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("35_389"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("9_8_7f32"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("9_8_7banana"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("7f23"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("7f320"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("7f64_"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("8f649"), UnexpectedIntegerLit, None);
assert_err_single!(FloatLit::parse("8f64f32"), UnexpectedIntegerLit, None);
}

401
third_party/rust/litrs/src/impls.rs vendored Normal file
View file

@ -0,0 +1,401 @@
use std::convert::TryFrom;
use crate::{Literal, err::{InvalidToken, TokenKind}};
/// Helper macro to call a `callback` macro four times for all combinations of
/// `proc_macro`/`proc_macro2` and `&`/owned.
macro_rules! helper {
($callback:ident, $($input:tt)*) => {
$callback!([proc_macro::] => $($input)*);
$callback!([&proc_macro::] => $($input)*);
#[cfg(feature = "proc-macro2")]
$callback!([proc_macro2::] => $($input)*);
#[cfg(feature = "proc-macro2")]
$callback!([&proc_macro2::] => $($input)*);
};
}
/// Like `helper!` but without reference types.
macro_rules! helper_no_refs {
($callback:ident, $($input:tt)*) => {
$callback!([proc_macro::] => $($input)*);
#[cfg(feature = "proc-macro2")]
$callback!([proc_macro2::] => $($input)*);
};
}
// ==============================================================================================
// ===== `From<*Lit> for Literal`
// ==============================================================================================
macro_rules! impl_specific_lit_to_lit {
($ty:ty, $variant:ident) => {
impl<B: crate::Buffer> From<$ty> for Literal<B> {
fn from(src: $ty) -> Self {
Literal::$variant(src)
}
}
};
}
impl_specific_lit_to_lit!(crate::BoolLit, Bool);
impl_specific_lit_to_lit!(crate::IntegerLit<B>, Integer);
impl_specific_lit_to_lit!(crate::FloatLit<B>, Float);
impl_specific_lit_to_lit!(crate::CharLit<B>, Char);
impl_specific_lit_to_lit!(crate::StringLit<B>, String);
impl_specific_lit_to_lit!(crate::ByteLit<B>, Byte);
impl_specific_lit_to_lit!(crate::ByteStringLit<B>, ByteString);
// ==============================================================================================
// ===== `From<pm::Literal> for Literal`
// ==============================================================================================
macro_rules! impl_tt_to_lit {
([$($prefix:tt)*] => ) => {
impl From<$($prefix)* Literal> for Literal<String> {
fn from(src: $($prefix)* Literal) -> Self {
// We call `expect` in all these impls: this library aims to implement exactly
// the Rust grammar, so if we have a valid Rust literal, we should always be
// able to parse it.
Self::parse(src.to_string())
.expect("bug: failed to parse output of `Literal::to_string`")
}
}
}
}
helper!(impl_tt_to_lit, );
// ==============================================================================================
// ===== `TryFrom<pm::TokenTree> for Literal`
// ==============================================================================================
macro_rules! impl_tt_to_lit {
([$($prefix:tt)*] => ) => {
impl TryFrom<$($prefix)* TokenTree> for Literal<String> {
type Error = InvalidToken;
fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
let span = tt.span();
let res = match tt {
$($prefix)* TokenTree::Group(_) => Err(TokenKind::Group),
$($prefix)* TokenTree::Punct(_) => Err(TokenKind::Punct),
$($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "true"
=> return Ok(Literal::Bool(crate::BoolLit::True)),
$($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "false"
=> return Ok(Literal::Bool(crate::BoolLit::False)),
$($prefix)* TokenTree::Ident(_) => Err(TokenKind::Ident),
$($prefix)* TokenTree::Literal(ref lit) => Ok(lit),
};
match res {
Ok(lit) => Ok(From::from(lit)),
Err(actual) => Err(InvalidToken {
actual,
expected: TokenKind::Literal,
span: span.into(),
}),
}
}
}
}
}
helper!(impl_tt_to_lit, );
// ==============================================================================================
// ===== `TryFrom<pm::Literal>`, `TryFrom<pm::TokenTree>` for non-bool `*Lit`
// ==============================================================================================
fn kind_of(lit: &Literal<String>) -> TokenKind {
match lit {
Literal::String(_) => TokenKind::StringLit,
Literal::Bool(_) => TokenKind::BoolLit,
Literal::Integer(_) => TokenKind::IntegerLit,
Literal::Float(_) => TokenKind::FloatLit,
Literal::Char(_) => TokenKind::CharLit,
Literal::Byte(_) => TokenKind::ByteLit,
Literal::ByteString(_) => TokenKind::ByteStringLit,
}
}
macro_rules! impl_for_specific_lit {
([$($prefix:tt)*] => $ty:ty, $variant:ident, $kind:ident) => {
impl TryFrom<$($prefix)* Literal> for $ty {
type Error = InvalidToken;
fn try_from(src: $($prefix)* Literal) -> Result<Self, Self::Error> {
let span = src.span();
let lit: Literal<String> = src.into();
match lit {
Literal::$variant(s) => Ok(s),
other => Err(InvalidToken {
expected: TokenKind::$kind,
actual: kind_of(&other),
span: span.into(),
}),
}
}
}
impl TryFrom<$($prefix)* TokenTree> for $ty {
type Error = InvalidToken;
fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
let span = tt.span();
let res = match tt {
$($prefix)* TokenTree::Group(_) => Err(TokenKind::Group),
$($prefix)* TokenTree::Punct(_) => Err(TokenKind::Punct),
$($prefix)* TokenTree::Ident(_) => Err(TokenKind::Ident),
$($prefix)* TokenTree::Literal(ref lit) => Ok(lit),
};
match res {
Ok(lit) => <$ty>::try_from(lit),
Err(actual) => Err(InvalidToken {
actual,
expected: TokenKind::$kind,
span: span.into(),
}),
}
}
}
};
}
helper!(impl_for_specific_lit, crate::IntegerLit<String>, Integer, IntegerLit);
helper!(impl_for_specific_lit, crate::FloatLit<String>, Float, FloatLit);
helper!(impl_for_specific_lit, crate::CharLit<String>, Char, CharLit);
helper!(impl_for_specific_lit, crate::StringLit<String>, String, StringLit);
helper!(impl_for_specific_lit, crate::ByteLit<String>, Byte, ByteLit);
helper!(impl_for_specific_lit, crate::ByteStringLit<String>, ByteString, ByteStringLit);
// ==============================================================================================
// ===== `From<*Lit> for pm::Literal`
// ==============================================================================================
macro_rules! impl_specific_lit_to_pm_lit {
([$($prefix:tt)*] => $ty:ident, $variant:ident, $kind:ident) => {
impl<B: crate::Buffer> From<crate::$ty<B>> for $($prefix)* Literal {
fn from(l: crate::$ty<B>) -> Self {
// This should never fail: an input that is parsed successfuly
// as one of our literal types should always parse as a
// proc_macro literal as well!
l.raw_input().parse().unwrap_or_else(|e| {
panic!(
"failed to parse `{}` as `{}`: {}",
l.raw_input(),
std::any::type_name::<Self>(),
e,
)
})
}
}
};
}
helper_no_refs!(impl_specific_lit_to_pm_lit, IntegerLit, Integer, IntegerLit);
helper_no_refs!(impl_specific_lit_to_pm_lit, FloatLit, Float, FloatLit);
helper_no_refs!(impl_specific_lit_to_pm_lit, CharLit, Char, CharLit);
helper_no_refs!(impl_specific_lit_to_pm_lit, StringLit, String, StringLit);
helper_no_refs!(impl_specific_lit_to_pm_lit, ByteLit, Byte, ByteLit);
helper_no_refs!(impl_specific_lit_to_pm_lit, ByteStringLit, ByteString, ByteStringLit);
// ==============================================================================================
// ===== `TryFrom<pm::TokenTree> for BoolLit`
// ==============================================================================================
macro_rules! impl_from_tt_for_bool {
([$($prefix:tt)*] => ) => {
impl TryFrom<$($prefix)* TokenTree> for crate::BoolLit {
type Error = InvalidToken;
fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
let span = tt.span();
let actual = match tt {
$($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "true"
=> return Ok(crate::BoolLit::True),
$($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "false"
=> return Ok(crate::BoolLit::False),
$($prefix)* TokenTree::Group(_) => TokenKind::Group,
$($prefix)* TokenTree::Punct(_) => TokenKind::Punct,
$($prefix)* TokenTree::Ident(_) => TokenKind::Ident,
$($prefix)* TokenTree::Literal(ref lit) => kind_of(&Literal::from(lit)),
};
Err(InvalidToken {
actual,
expected: TokenKind::BoolLit,
span: span.into(),
})
}
}
};
}
helper!(impl_from_tt_for_bool, );
// ==============================================================================================
// ===== `From<BoolLit> for pm::Ident`
// ==============================================================================================
macro_rules! impl_bool_lit_to_pm_lit {
([$($prefix:tt)*] => ) => {
impl From<crate::BoolLit> for $($prefix)* Ident {
fn from(l: crate::BoolLit) -> Self {
Self::new(l.as_str(), $($prefix)* Span::call_site())
}
}
};
}
helper_no_refs!(impl_bool_lit_to_pm_lit, );
mod tests {
//! # Tests
//!
//! ```no_run
//! extern crate proc_macro;
//!
//! use std::convert::TryFrom;
//! use litrs::Literal;
//!
//! fn give<T>() -> T {
//! panic!()
//! }
//!
//! let _ = litrs::Literal::<String>::from(give::<litrs::BoolLit>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::IntegerLit<String>>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::FloatLit<String>>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::CharLit<String>>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::StringLit<String>>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::ByteLit<String>>());
//! let _ = litrs::Literal::<String>::from(give::<litrs::ByteStringLit<String>>());
//!
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::BoolLit>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::IntegerLit<&'static str>>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::FloatLit<&'static str>>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::CharLit<&'static str>>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::StringLit<&'static str>>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::ByteLit<&'static str>>());
//! let _ = litrs::Literal::<&'static str>::from(give::<litrs::ByteStringLit<&'static str>>());
//!
//!
//! let _ = litrs::Literal::from(give::<proc_macro::Literal>());
//! let _ = litrs::Literal::from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::Literal::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::Literal::try_from(give::<&proc_macro::TokenTree>());
//!
//!
//! let _ = litrs::IntegerLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::IntegerLit::try_from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::FloatLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::FloatLit::try_from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::CharLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::CharLit::try_from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::StringLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::StringLit::try_from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::ByteLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::ByteLit::try_from(give::<&proc_macro::Literal>());
//!
//! let _ = litrs::ByteStringLit::try_from(give::<proc_macro::Literal>());
//! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro::Literal>());
//!
//!
//! let _ = litrs::BoolLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::BoolLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::IntegerLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::IntegerLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::FloatLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::FloatLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::CharLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::CharLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::StringLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::StringLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::ByteLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::ByteLit::try_from(give::<&proc_macro::TokenTree>());
//!
//! let _ = litrs::ByteStringLit::try_from(give::<proc_macro::TokenTree>());
//! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro::TokenTree>());
//! ```
}
#[cfg(feature = "proc-macro2")]
mod tests_proc_macro2 {
//! # Tests
//!
//! ```no_run
//! extern crate proc_macro;
//!
//! use std::convert::TryFrom;
//! use litrs::Literal;
//!
//! fn give<T>() -> T {
//! panic!()
//! }
//!
//! let _ = litrs::Literal::from(give::<proc_macro2::Literal>());
//! let _ = litrs::Literal::from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::Literal::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::Literal::try_from(give::<&proc_macro2::TokenTree>());
//!
//!
//! let _ = litrs::IntegerLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::IntegerLit::try_from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::FloatLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::FloatLit::try_from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::CharLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::CharLit::try_from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::StringLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::StringLit::try_from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::ByteLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::ByteLit::try_from(give::<&proc_macro2::Literal>());
//!
//! let _ = litrs::ByteStringLit::try_from(give::<proc_macro2::Literal>());
//! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro2::Literal>());
//!
//!
//! let _ = litrs::BoolLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::BoolLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::IntegerLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::IntegerLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::FloatLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::FloatLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::CharLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::CharLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::StringLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::StringLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::ByteLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::ByteLit::try_from(give::<&proc_macro2::TokenTree>());
//!
//! let _ = litrs::ByteStringLit::try_from(give::<proc_macro2::TokenTree>());
//! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro2::TokenTree>());
//! ```
}

View file

@ -0,0 +1,356 @@
use std::{fmt, str::FromStr};
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
parse::{first_byte_or_empty, hex_digit_value, check_suffix},
};
/// An integer literal, e.g. `27`, `0x7F`, `0b101010u8` or `5_000_000i64`.
///
/// An integer literal consists of an optional base prefix (`0b`, `0o`, `0x`),
/// the main part (digits and underscores), and an optional type suffix
/// (e.g. `u64` or `i8`). See [the reference][ref] for more information.
///
/// Note that integer literals are always positive: the grammar does not contain
/// the minus sign at all. The minus sign is just the unary negate operator,
/// not part of the literal. Which is interesting for cases like `- 128i8`:
/// here, the literal itself would overflow the specified type (`i8` cannot
/// represent 128). That's why in rustc, the literal overflow check is
/// performed as a lint after parsing, not during the lexing stage. Similarly,
/// [`IntegerLit::parse`] does not perform an overflow check.
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#integer-literals
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub struct IntegerLit<B: Buffer> {
/// The raw literal. Grammar: `<prefix?><main part><suffix?>`.
raw: B,
/// First index of the main number part (after the base prefix).
start_main_part: usize,
/// First index not part of the main number part.
end_main_part: usize,
/// Parsed `raw[..start_main_part]`.
base: IntegerBase,
}
impl<B: Buffer> IntegerLit<B> {
/// Parses the input as an integer literal. Returns an error if the input is
/// invalid or represents a different kind of literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
match first_byte_or_empty(&input)? {
digit @ b'0'..=b'9' => {
// TODO: simplify once RFC 2528 is stabilized
let IntegerLit {
start_main_part,
end_main_part,
base,
..
} = parse_impl(&input, digit)?;
Ok(Self { raw: input, start_main_part, end_main_part, base })
},
_ => Err(perr(0, DoesNotStartWithDigit)),
}
}
/// Performs the actual string to int conversion to obtain the integer
/// value. The optional type suffix of the literal **is ignored by this
/// method**. This means `N` does not need to match the type suffix!
///
/// Returns `None` if the literal overflows `N`.
///
/// Hint: `u128` can represent all possible values integer literal values,
/// as there are no negative literals (see type docs). Thus you can, for
/// example, safely use `lit.value::<u128>().to_string()` to get a decimal
/// string. (Technically, Rust integer literals can represent arbitrarily
/// large numbers, but those would be rejected at a later stage by the Rust
/// compiler).
pub fn value<N: FromIntegerLiteral>(&self) -> Option<N> {
let base = N::from_small_number(self.base.value());
let mut acc = N::from_small_number(0);
for digit in self.raw_main_part().bytes() {
if digit == b'_' {
continue;
}
// We don't actually need the base here: we already know this main
// part only contains digits valid for the specified base.
let digit = hex_digit_value(digit)
.unwrap_or_else(|| unreachable!("bug: integer main part contains non-digit"));
acc = acc.checked_mul(base)?;
acc = acc.checked_add(N::from_small_number(digit))?;
}
Some(acc)
}
/// The base of this integer literal.
pub fn base(&self) -> IntegerBase {
self.base
}
/// The main part containing the digits and potentially `_`. Do not try to
/// parse this directly as that would ignore the base!
pub fn raw_main_part(&self) -> &str {
&(*self.raw)[self.start_main_part..self.end_main_part]
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
///
/// If you want the type, try `IntegerType::from_suffix(lit.suffix())`.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.end_main_part..]
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
}
impl IntegerLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn to_owned(&self) -> IntegerLit<String> {
IntegerLit {
raw: self.raw.to_owned(),
start_main_part: self.start_main_part,
end_main_part: self.end_main_part,
base: self.base,
}
}
}
impl<B: Buffer> fmt::Display for IntegerLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", &*self.raw)
}
}
/// Integer literal types. *Implementation detail*.
///
/// Implemented for all integer literal types. This trait is sealed and cannot
/// be implemented outside of this crate. The trait's methods are implementation
/// detail of this library and are not subject to semver.
pub trait FromIntegerLiteral: self::sealed::Sealed + Copy {
/// Creates itself from the given number. `n` is guaranteed to be `<= 16`.
#[doc(hidden)]
fn from_small_number(n: u8) -> Self;
#[doc(hidden)]
fn checked_add(self, rhs: Self) -> Option<Self>;
#[doc(hidden)]
fn checked_mul(self, rhs: Self) -> Option<Self>;
#[doc(hidden)]
fn ty() -> IntegerType;
}
macro_rules! impl_from_int_literal {
($( $ty:ty => $variant:ident ,)* ) => {
$(
impl self::sealed::Sealed for $ty {}
impl FromIntegerLiteral for $ty {
fn from_small_number(n: u8) -> Self {
n as Self
}
fn checked_add(self, rhs: Self) -> Option<Self> {
self.checked_add(rhs)
}
fn checked_mul(self, rhs: Self) -> Option<Self> {
self.checked_mul(rhs)
}
fn ty() -> IntegerType {
IntegerType::$variant
}
}
)*
};
}
impl_from_int_literal!(
u8 => U8, u16 => U16, u32 => U32, u64 => U64, u128 => U128, usize => Usize,
i8 => I8, i16 => I16, i32 => I32, i64 => I64, i128 => I128, isize => Isize,
);
mod sealed {
pub trait Sealed {}
}
/// Precondition: first byte of string has to be in `b'0'..=b'9'`.
#[inline(never)]
pub(crate) fn parse_impl(input: &str, first: u8) -> Result<IntegerLit<&str>, ParseError> {
// Figure out base and strip prefix base, if it exists.
let (end_prefix, base) = match (first, input.as_bytes().get(1)) {
(b'0', Some(b'b')) => (2, IntegerBase::Binary),
(b'0', Some(b'o')) => (2, IntegerBase::Octal),
(b'0', Some(b'x')) => (2, IntegerBase::Hexadecimal),
// Everything else is treated as decimal. Several cases are caught
// by this:
// - "123"
// - "0"
// - "0u8"
// - "0r" -> this will error later
_ => (0, IntegerBase::Decimal),
};
let without_prefix = &input[end_prefix..];
// Scan input to find the first character that's not a valid digit.
let is_valid_digit = match base {
IntegerBase::Binary => |b| matches!(b, b'0' | b'1' | b'_'),
IntegerBase::Octal => |b| matches!(b, b'0'..=b'7' | b'_'),
IntegerBase::Decimal => |b| matches!(b, b'0'..=b'9' | b'_'),
IntegerBase::Hexadecimal => |b| matches!(b, b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F' | b'_'),
};
let end_main = without_prefix.bytes()
.position(|b| !is_valid_digit(b))
.unwrap_or(without_prefix.len());
let (main_part, suffix) = without_prefix.split_at(end_main);
check_suffix(suffix).map_err(|kind| {
// This is just to have a nicer error kind for this special case. If the
// suffix is invalid, it is non-empty -> unwrap ok.
let first = suffix.as_bytes()[0];
if !is_valid_digit(first) && first.is_ascii_digit() {
perr(end_main + end_prefix, InvalidDigit)
} else {
perr(end_main + end_prefix..input.len(), kind)
}
})?;
if suffix.starts_with('e') || suffix.starts_with('E') {
return Err(perr(end_main, IntegerSuffixStartingWithE));
}
// Make sure main number part is not empty.
if main_part.bytes().filter(|&b| b != b'_').count() == 0 {
return Err(perr(end_prefix..end_prefix + end_main, NoDigits));
}
Ok(IntegerLit {
raw: input,
start_main_part: end_prefix,
end_main_part: end_main + end_prefix,
base,
})
}
/// The bases in which an integer can be specified.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IntegerBase {
Binary,
Octal,
Decimal,
Hexadecimal,
}
impl IntegerBase {
/// Returns the literal prefix that indicates this base, i.e. `"0b"`,
/// `"0o"`, `""` and `"0x"`.
pub fn prefix(self) -> &'static str {
match self {
Self::Binary => "0b",
Self::Octal => "0o",
Self::Decimal => "",
Self::Hexadecimal => "0x",
}
}
/// Returns the base value, i.e. 2, 8, 10 or 16.
pub fn value(self) -> u8 {
match self {
Self::Binary => 2,
Self::Octal => 8,
Self::Decimal => 10,
Self::Hexadecimal => 16,
}
}
}
/// All possible integer type suffixes.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum IntegerType {
U8,
U16,
U32,
U64,
U128,
Usize,
I8,
I16,
I32,
I64,
I128,
Isize,
}
impl IntegerType {
/// Returns the type corresponding to the given suffix (e.g. `"u8"` is
/// mapped to `Self::U8`). If the suffix is not a valid integer type,
/// `None` is returned.
pub fn from_suffix(suffix: &str) -> Option<Self> {
match suffix {
"u8" => Some(Self::U8),
"u16" => Some(Self::U16),
"u32" => Some(Self::U32),
"u64" => Some(Self::U64),
"u128" => Some(Self::U128),
"usize" => Some(Self::Usize),
"i8" => Some(Self::I8),
"i16" => Some(Self::I16),
"i32" => Some(Self::I32),
"i64" => Some(Self::I64),
"i128" => Some(Self::I128),
"isize" => Some(Self::Isize),
_ => None,
}
}
/// Returns the suffix for this type, e.g. `"u8"` for `Self::U8`.
pub fn suffix(self) -> &'static str {
match self {
Self::U8 => "u8",
Self::U16 => "u16",
Self::U32 => "u32",
Self::U64 => "u64",
Self::U128 => "u128",
Self::Usize => "usize",
Self::I8 => "i8",
Self::I16 => "i16",
Self::I32 => "i32",
Self::I64 => "i64",
Self::I128 => "i128",
Self::Isize => "isize",
}
}
}
impl FromStr for IntegerType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_suffix(s).ok_or(())
}
}
impl fmt::Display for IntegerType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.suffix().fmt(f)
}
}
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,357 @@
use std::fmt::{Debug, Display};
use crate::{
FromIntegerLiteral, Literal, IntegerLit, IntegerType as Ty, IntegerBase, IntegerBase::*,
test_util::{assert_parse_ok_eq, assert_roundtrip},
};
// ===== Utility functions =======================================================================
#[track_caller]
fn check<T: FromIntegerLiteral + PartialEq + Debug + Display>(
input: &str,
value: T,
base: IntegerBase,
main_part: &str,
type_suffix: Option<Ty>,
) {
let expected_integer = IntegerLit {
raw: input,
start_main_part: base.prefix().len(),
end_main_part: base.prefix().len() + main_part.len(),
base,
};
assert_parse_ok_eq(
input, IntegerLit::parse(input), expected_integer.clone(), "IntegerLit::parse");
assert_parse_ok_eq(
input, Literal::parse(input), Literal::Integer(expected_integer), "Literal::parse");
assert_roundtrip(expected_integer.to_owned(), input);
assert_eq!(Ty::from_suffix(IntegerLit::parse(input).unwrap().suffix()), type_suffix);
let actual_value = IntegerLit::parse(input)
.unwrap()
.value::<T>()
.unwrap_or_else(|| panic!("unexpected overflow in `IntegerLit::value` for `{}`", input));
if actual_value != value {
panic!(
"Parsing int literal `{}` should give value `{}`, but actually resulted in `{}`",
input,
value,
actual_value,
);
}
}
// ===== Actual tests ===========================================================================
#[test]
fn parse_decimal() {
check("0", 0u128, Decimal, "0", None);
check("1", 1u8, Decimal, "1", None);
check("8", 8u16, Decimal, "8", None);
check("9", 9u32, Decimal, "9", None);
check("10", 10u64, Decimal, "10", None);
check("11", 11i8, Decimal, "11", None);
check("123456789", 123456789i128, Decimal, "123456789", None);
check("05", 5i16, Decimal, "05", None);
check("00005", 5i32, Decimal, "00005", None);
check("0123456789", 123456789i64, Decimal, "0123456789", None);
check("123_456_789", 123_456_789, Decimal, "123_456_789", None);
check("0___4", 4, Decimal, "0___4", None);
check("0___4_3", 43, Decimal, "0___4_3", None);
check("0___4_3", 43, Decimal, "0___4_3", None);
check("123___________", 123, Decimal, "123___________", None);
check(
"340282366920938463463374607431768211455",
340282366920938463463374607431768211455u128,
Decimal,
"340282366920938463463374607431768211455",
None,
);
check(
"340_282_366_920_938_463_463_374_607_431_768_211_455",
340282366920938463463374607431768211455u128,
Decimal,
"340_282_366_920_938_463_463_374_607_431_768_211_455",
None,
);
check(
"3_40_282_3669_20938_463463_3746074_31768211_455___",
340282366920938463463374607431768211455u128,
Decimal,
"3_40_282_3669_20938_463463_3746074_31768211_455___",
None,
);
}
#[test]
fn parse_binary() {
check("0b0", 0b0, Binary, "0", None);
check("0b000", 0b000, Binary, "000", None);
check("0b1", 0b1, Binary, "1", None);
check("0b01", 0b01, Binary, "01", None);
check("0b101010", 0b101010, Binary, "101010", None);
check("0b10_10_10", 0b10_10_10, Binary, "10_10_10", None);
check("0b01101110____", 0b01101110____, Binary, "01101110____", None);
check("0b10010u8", 0b10010u8, Binary, "10010", Some(Ty::U8));
check("0b10010i8", 0b10010u8, Binary, "10010", Some(Ty::I8));
check("0b10010u64", 0b10010u64, Binary, "10010", Some(Ty::U64));
check("0b10010i64", 0b10010i64, Binary, "10010", Some(Ty::I64));
check(
"0b1011001_00110000_00101000_10100101u32",
0b1011001_00110000_00101000_10100101u32,
Binary,
"1011001_00110000_00101000_10100101",
Some(Ty::U32),
);
}
#[test]
fn parse_octal() {
check("0o0", 0o0, Octal, "0", None);
check("0o1", 0o1, Octal, "1", None);
check("0o6", 0o6, Octal, "6", None);
check("0o7", 0o7, Octal, "7", None);
check("0o17", 0o17, Octal, "17", None);
check("0o123", 0o123, Octal, "123", None);
check("0o7654321", 0o7654321, Octal, "7654321", None);
check("0o7_53_1", 0o7_53_1, Octal, "7_53_1", None);
check("0o66_", 0o66_, Octal, "66_", None);
check("0o755u16", 0o755u16, Octal, "755", Some(Ty::U16));
check("0o755i128", 0o755i128, Octal, "755", Some(Ty::I128));
}
#[test]
fn parse_hexadecimal() {
check("0x0", 0x0, Hexadecimal, "0", None);
check("0x1", 0x1, Hexadecimal, "1", None);
check("0x9", 0x9, Hexadecimal, "9", None);
check("0xa", 0xa, Hexadecimal, "a", None);
check("0xf", 0xf, Hexadecimal, "f", None);
check("0x17", 0x17, Hexadecimal, "17", None);
check("0x1b", 0x1b, Hexadecimal, "1b", None);
check("0x123", 0x123, Hexadecimal, "123", None);
check("0xace", 0xace, Hexadecimal, "ace", None);
check("0xfdb971", 0xfdb971, Hexadecimal, "fdb971", None);
check("0xa_54_f", 0xa_54_f, Hexadecimal, "a_54_f", None);
check("0x6d_", 0x6d_, Hexadecimal, "6d_", None);
check("0xA", 0xA, Hexadecimal, "A", None);
check("0xF", 0xF, Hexadecimal, "F", None);
check("0x17", 0x17, Hexadecimal, "17", None);
check("0x1B", 0x1B, Hexadecimal, "1B", None);
check("0x123", 0x123, Hexadecimal, "123", None);
check("0xACE", 0xACE, Hexadecimal, "ACE", None);
check("0xFDB971", 0xFDB971, Hexadecimal, "FDB971", None);
check("0xA_54_F", 0xA_54_F, Hexadecimal, "A_54_F", None);
check("0x6D_", 0x6D_, Hexadecimal, "6D_", None);
check("0xFdB97a1", 0xFdB97a1, Hexadecimal, "FdB97a1", None);
check("0xfdB97A1", 0xfdB97A1, Hexadecimal, "fdB97A1", None);
check("0x40u16", 0x40u16, Hexadecimal, "40", Some(Ty::U16));
check("0xffi128", 0xffi128, Hexadecimal, "ff", Some(Ty::I128));
}
#[test]
fn starting_underscore() {
check("0b_1", 1, Binary, "_1", None);
check("0b_010i16", 0b_010, Binary, "_010", Some(Ty::I16));
check("0o_5", 5, Octal, "_5", None);
check("0o_750u128", 0o_750u128, Octal, "_750", Some(Ty::U128));
check("0x_c", 0xc, Hexadecimal, "_c", None);
check("0x_cf3i8", 0x_cf3, Hexadecimal, "_cf3", Some(Ty::I8));
}
#[test]
fn parse_overflowing_just_fine() {
check("256u8", 256u16, Decimal, "256", Some(Ty::U8));
check("123_456_789u8", 123_456_789u32, Decimal, "123_456_789", Some(Ty::U8));
check("123_456_789u16", 123_456_789u32, Decimal, "123_456_789", Some(Ty::U16));
check("123_123_456_789u8", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U8));
check("123_123_456_789u16", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U16));
check("123_123_456_789u32", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U32));
}
#[test]
fn suffixes() {
[
("123i8", Ty::I8),
("123i16", Ty::I16),
("123i32", Ty::I32),
("123i64", Ty::I64),
("123i128", Ty::I128),
("123u8", Ty::U8),
("123u16", Ty::U16),
("123u32", Ty::U32),
("123u64", Ty::U64),
("123u128", Ty::U128),
].iter().for_each(|&(s, ty)| {
assert_eq!(Ty::from_suffix(IntegerLit::parse(s).unwrap().suffix()), Some(ty));
});
}
#[test]
fn overflow_u128() {
let inputs = [
"340282366920938463463374607431768211456",
"0x100000000000000000000000000000000",
"0o4000000000000000000000000000000000000000000",
"0b1000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000",
"340282366920938463463374607431768211456u128",
"340282366920938463463374607431768211457",
"3_40_282_3669_20938_463463_3746074_31768211_456___",
"3_40_282_3669_20938_463463_3746074_31768211_455___1",
"3_40_282_3669_20938_463463_3746074_31768211_455___0u128",
"3402823669209384634633746074317682114570",
];
for &input in &inputs {
let lit = IntegerLit::parse(input).expect("failed to parse");
assert!(lit.value::<u128>().is_none());
}
}
#[test]
fn overflow_u8() {
let inputs = [
"256", "0x100", "0o400", "0b100000000",
"257", "0x101", "0o401", "0b100000001",
"300",
"1548",
"2548985",
"256u128",
"256u8",
"2_5_6",
"256_____1",
"256__",
];
for &input in &inputs {
let lit = IntegerLit::parse(input).expect("failed to parse");
assert!(lit.value::<u8>().is_none());
}
}
#[test]
fn parse_err() {
assert_err!(IntegerLit, "", Empty, None);
assert_err_single!(IntegerLit::parse("a"), DoesNotStartWithDigit, 0);
assert_err_single!(IntegerLit::parse(";"), DoesNotStartWithDigit, 0);
assert_err_single!(IntegerLit::parse("0;"), UnexpectedChar, 1..2);
assert_err!(IntegerLit, "0b", NoDigits, 2..2);
assert_err_single!(IntegerLit::parse(" 0"), DoesNotStartWithDigit, 0);
assert_err_single!(IntegerLit::parse("0 "), UnexpectedChar, 1);
assert_err!(IntegerLit, "0b3", InvalidDigit, 2);
assert_err_single!(IntegerLit::parse("_"), DoesNotStartWithDigit, 0);
assert_err_single!(IntegerLit::parse("_3"), DoesNotStartWithDigit, 0);
assert_err!(IntegerLit, "0x44.5", UnexpectedChar, 4..6);
assert_err_single!(IntegerLit::parse("123em"), IntegerSuffixStartingWithE, 3);
}
#[test]
fn invalid_digits() {
assert_err!(IntegerLit, "0b10201", InvalidDigit, 4);
assert_err!(IntegerLit, "0b9", InvalidDigit, 2);
assert_err!(IntegerLit, "0b07", InvalidDigit, 3);
assert_err!(IntegerLit, "0o12380", InvalidDigit, 5);
assert_err!(IntegerLit, "0o192", InvalidDigit, 3);
assert_err_single!(IntegerLit::parse("a_123"), DoesNotStartWithDigit, 0);
assert_err_single!(IntegerLit::parse("B_123"), DoesNotStartWithDigit, 0);
}
#[test]
fn no_valid_digits() {
assert_err!(IntegerLit, "0x_", NoDigits, 2..3);
assert_err!(IntegerLit, "0x__", NoDigits, 2..4);
assert_err!(IntegerLit, "0x________", NoDigits, 2..10);
assert_err!(IntegerLit, "0x_i8", NoDigits, 2..3);
assert_err!(IntegerLit, "0x_u8", NoDigits, 2..3);
assert_err!(IntegerLit, "0x_isize", NoDigits, 2..3);
assert_err!(IntegerLit, "0x_usize", NoDigits, 2..3);
assert_err!(IntegerLit, "0o_", NoDigits, 2..3);
assert_err!(IntegerLit, "0o__", NoDigits, 2..4);
assert_err!(IntegerLit, "0o________", NoDigits, 2..10);
assert_err!(IntegerLit, "0o_i32", NoDigits, 2..3);
assert_err!(IntegerLit, "0o_u32", NoDigits, 2..3);
assert_err!(IntegerLit, "0b_", NoDigits, 2..3);
assert_err!(IntegerLit, "0b__", NoDigits, 2..4);
assert_err!(IntegerLit, "0b________", NoDigits, 2..10);
assert_err!(IntegerLit, "0b_i128", NoDigits, 2..3);
assert_err!(IntegerLit, "0b_u128", NoDigits, 2..3);
}
#[test]
fn non_standard_suffixes() {
#[track_caller]
fn check_suffix<T: FromIntegerLiteral + PartialEq + Debug + Display>(
input: &str,
value: T,
base: IntegerBase,
main_part: &str,
suffix: &str,
) {
check(input, value, base, main_part, None);
assert_eq!(IntegerLit::parse(input).unwrap().suffix(), suffix);
}
check_suffix("5u7", 5, Decimal, "5", "u7");
check_suffix("5u7", 5, Decimal, "5", "u7");
check_suffix("5u9", 5, Decimal, "5", "u9");
check_suffix("5u0", 5, Decimal, "5", "u0");
check_suffix("33u12", 33, Decimal, "33", "u12");
check_suffix("84u17", 84, Decimal, "84", "u17");
check_suffix("99u80", 99, Decimal, "99", "u80");
check_suffix("1234uu16", 1234, Decimal, "1234", "uu16");
check_suffix("5i7", 5, Decimal, "5", "i7");
check_suffix("5i9", 5, Decimal, "5", "i9");
check_suffix("5i0", 5, Decimal, "5", "i0");
check_suffix("33i12", 33, Decimal, "33", "i12");
check_suffix("84i17", 84, Decimal, "84", "i17");
check_suffix("99i80", 99, Decimal, "99", "i80");
check_suffix("1234ii16", 1234, Decimal, "1234", "ii16");
check_suffix("0ui32", 0, Decimal, "0", "ui32");
check_suffix("1iu32", 1, Decimal, "1", "iu32");
check_suffix("54321a64", 54321, Decimal, "54321", "a64");
check_suffix("54321b64", 54321, Decimal, "54321", "b64");
check_suffix("54321x64", 54321, Decimal, "54321", "x64");
check_suffix("54321o64", 54321, Decimal, "54321", "o64");
check_suffix("0a", 0, Decimal, "0", "a");
check_suffix("0a3", 0, Decimal, "0", "a3");
check_suffix("0z", 0, Decimal, "0", "z");
check_suffix("0z3", 0, Decimal, "0", "z3");
check_suffix("0b0a", 0, Binary, "0", "a");
check_suffix("0b0A", 0, Binary, "0", "A");
check_suffix("0b01f", 1, Binary, "01", "f");
check_suffix("0b01F", 1, Binary, "01", "F");
check_suffix("0o7a_", 7, Octal, "7", "a_");
check_suffix("0o7A_", 7, Octal, "7", "A_");
check_suffix("0o72f_0", 0o72, Octal, "72", "f_0");
check_suffix("0o72F_0", 0o72, Octal, "72", "F_0");
check_suffix("0x8cg", 0x8c, Hexadecimal, "8c", "g");
check_suffix("0x8cG", 0x8c, Hexadecimal, "8c", "G");
check_suffix("0x8c1h_", 0x8c1, Hexadecimal, "8c1", "h_");
check_suffix("0x8c1H_", 0x8c1, Hexadecimal, "8c1", "H_");
check_suffix("0x8czu16", 0x8c, Hexadecimal, "8c", "zu16");
check_suffix("123_foo", 123, Decimal, "123_", "foo");
}

370
third_party/rust/litrs/src/lib.rs vendored Normal file
View file

@ -0,0 +1,370 @@
//! Parsing and inspecting Rust literal tokens.
//!
//! This library offers functionality to parse Rust literals, i.e. tokens in the
//! Rust programming language that represent fixed values. The grammar for
//! those is defined [here][ref].
//!
//! This kind of functionality already exists in the crate `syn`. However, as
//! you oftentimes don't need (nor want) the full power of `syn`, `litrs` was
//! built. This crate also offers a bit more flexibility compared to `syn`
//! (only regarding literals, of course).
//!
//!
//! # Quick start
//!
//! | **`StringLit::try_from(tt)?.value()`** |
//! | - |
//!
//! ... where `tt` is a `proc_macro::TokenTree` and where [`StringLit`] can be
//! replaced with [`Literal`] or other types of literals (e.g. [`FloatLit`]).
//! Calling `value()` returns the value that is represented by the literal.
//!
//! **Mini Example**
//!
//! ```ignore
//! use proc_macro::TokenStream;
//!
//! #[proc_macro]
//! pub fn foo(input: TokenStream) -> TokenStream {
//! let first_token = input.into_iter().next().unwrap(); // Do proper error handling!
//! let string_value = match litrs::StringLit::try_from(first_token) {
//! Ok(string_lit) => string_lit.value(),
//! Err(e) => return e.to_compile_error(),
//! };
//!
//! // `string_value` is the string value with all escapes resolved.
//! todo!()
//! }
//! ```
//!
//! # Overview
//!
//! The main types of this library are [`Literal`], representing any kind of
//! literal, and `*Lit`, like [`StringLit`] or [`FloatLit`], representing a
//! specific kind of literal.
//!
//! There are different ways to obtain such a literal type:
//!
//! - **`parse`**: parses a `&str` or `String` and returns `Result<_,
//! ParseError>`. For example: [`Literal::parse`] and
//! [`IntegerLit::parse`].
//!
//! - **`From<proc_macro::Literal> for Literal`**: turns a `Literal` value from
//! the `proc_macro` crate into a `Literal` from this crate.
//!
//! - **`TryFrom<proc_macro::Literal> for *Lit`**: tries to turn a
//! `proc_macro::Literal` into a specific literal type of this crate. If
//! the input is a literal of a different kind, `Err(InvalidToken)` is
//! returned.
//!
//! - **`TryFrom<proc_macro::TokenTree>`**: attempts to turn a token tree into a
//! literal type of this crate. An error is returned if the token tree is
//! not a literal, or if you are trying to turn it into a specific kind of
//! literal and the token tree is a different kind of literal.
//!
//! All of the `From` and `TryFrom` conversions also work for reference to
//! `proc_macro` types. Additionally, if the crate feature `proc-macro2` is
//! enabled (which it is by default), all these `From` and `TryFrom` impls also
//! exist for the corresponding `proc_macro2` types.
//!
//! **Note**: `true` and `false` are `Ident`s when passed to your proc macro.
//! The `TryFrom<TokenTree>` impls check for those two special idents and
//! return a [`BoolLit`] appropriately. For that reason, there is also no
//! `TryFrom<proc_macro::Literal>` impl for [`BoolLit`]. The `proc_macro::Literal`
//! simply cannot represent bool literals.
//!
//!
//! # Examples
//!
//! In a proc-macro:
//!
//! ```ignore
//! use std::convert::TryFrom;
//! use proc_macro::TokenStream;
//! use litrs::FloatLit;
//!
//! #[proc_macro]
//! pub fn foo(input: TokenStream) -> TokenStream {
//! let mut input = input.into_iter().collect::<Vec<_>>();
//! if input.len() != 1 {
//! // Please do proper error handling in your real code!
//! panic!("expected exactly one token as input");
//! }
//! let token = input.remove(0);
//!
//! match FloatLit::try_from(token) {
//! Ok(float_lit) => { /* do something */ }
//! Err(e) => return e.to_compile_error(),
//! }
//!
//! // Dummy output
//! TokenStream::new()
//! }
//! ```
//!
//! Parsing from string:
//!
//! ```
//! use litrs::{FloatLit, Literal};
//!
//! // Parse a specific kind of literal (float in this case):
//! let float_lit = FloatLit::parse("3.14f32");
//! assert!(float_lit.is_ok());
//! assert_eq!(float_lit.unwrap().suffix(), "f32");
//! assert!(FloatLit::parse("'c'").is_err());
//!
//! // Parse any kind of literal. After parsing, you can inspect the literal
//! // and decide what to do in each case.
//! let lit = Literal::parse("0xff80").expect("failed to parse literal");
//! match lit {
//! Literal::Integer(lit) => { /* ... */ }
//! Literal::Float(lit) => { /* ... */ }
//! Literal::Bool(lit) => { /* ... */ }
//! Literal::Char(lit) => { /* ... */ }
//! Literal::String(lit) => { /* ... */ }
//! Literal::Byte(lit) => { /* ... */ }
//! Literal::ByteString(lit) => { /* ... */ }
//! }
//! ```
//!
//!
//!
//! # Crate features
//!
//! - `proc-macro2` (**default**): adds the dependency `proc_macro2`, a bunch of
//! `From` and `TryFrom` impls, and [`InvalidToken::to_compile_error2`].
//! - `check_suffix`: if enabled, `parse` functions will exactly verify that the
//! literal suffix is valid. Adds the dependency `unicode-xid`. If disabled,
//! only an approximate check (only in ASCII range) is done. If you are
//! writing a proc macro, you don't need to enable this as the suffix is
//! already checked by the compiler.
//!
//!
//! [ref]: https://doc.rust-lang.org/reference/tokens.html#literals
//!
#![deny(missing_debug_implementations)]
extern crate proc_macro;
#[cfg(test)]
#[macro_use]
mod test_util;
#[cfg(test)]
mod tests;
mod bool;
mod byte;
mod bytestr;
mod char;
mod err;
mod escape;
mod float;
mod impls;
mod integer;
mod parse;
mod string;
use std::{borrow::{Borrow, Cow}, fmt, ops::{Deref, Range}};
pub use self::{
bool::BoolLit,
byte::ByteLit,
bytestr::ByteStringLit,
char::CharLit,
err::{InvalidToken, ParseError},
float::{FloatLit, FloatType},
integer::{FromIntegerLiteral, IntegerLit, IntegerBase, IntegerType},
string::StringLit,
};
// ==============================================================================================
// ===== `Literal` and type defs
// ==============================================================================================
/// A literal. This is the main type of this library.
///
/// This type is generic over the underlying buffer `B`, which can be `&str` or
/// `String`.
///
/// To create this type, you have to either call [`Literal::parse`] with an
/// input string or use the `From<_>` impls of this type. The impls are only
/// available of the corresponding crate features are enabled (they are enabled
/// by default).
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Literal<B: Buffer> {
Bool(BoolLit),
Integer(IntegerLit<B>),
Float(FloatLit<B>),
Char(CharLit<B>),
String(StringLit<B>),
Byte(ByteLit<B>),
ByteString(ByteStringLit<B>),
}
impl<B: Buffer> Literal<B> {
/// Parses the given input as a Rust literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
parse::parse(input)
}
/// Returns the suffix of this literal or `""` if it doesn't have one.
///
/// Rust token grammar actually allows suffixes for all kinds of tokens.
/// Most Rust programmer only know the type suffixes for integer and
/// floats, e.g. `0u32`. And in normal Rust code, everything else causes an
/// error. But it is possible to pass literals with arbitrary suffixes to
/// proc macros, for example:
///
/// ```ignore
/// some_macro!(3.14f33 16px '🦊'good_boy "toph"beifong);
/// ```
///
/// Boolean literals, not actually being literals, but idents, cannot have
/// suffixes and this method always returns `""` for those.
///
/// There are some edge cases to be aware of:
/// - Integer suffixes must not start with `e` or `E` as that conflicts with
/// the exponent grammar for floats. `0e1` is a float; `0eel` is also
/// parsed as a float and results in an error.
/// - Hexadecimal integers eagerly parse digits, so `0x5abcdefgh` has a
/// suffix von `gh`.
/// - Suffixes can contain and start with `_`, but for integer and number
/// literals, `_` is eagerly parsed as part of the number, so `1_x` has
/// the suffix `x`.
/// - The input `55f32` is regarded as integer literal with suffix `f32`.
///
/// # Example
///
/// ```
/// use litrs::Literal;
///
/// assert_eq!(Literal::parse(r##"3.14f33"##).unwrap().suffix(), "f33");
/// assert_eq!(Literal::parse(r##"123hackerman"##).unwrap().suffix(), "hackerman");
/// assert_eq!(Literal::parse(r##"0x0fuck"##).unwrap().suffix(), "uck");
/// assert_eq!(Literal::parse(r##"'🦊'good_boy"##).unwrap().suffix(), "good_boy");
/// assert_eq!(Literal::parse(r##""toph"beifong"##).unwrap().suffix(), "beifong");
/// ```
pub fn suffix(&self) -> &str {
match self {
Literal::Bool(_) => "",
Literal::Integer(l) => l.suffix(),
Literal::Float(l) => l.suffix(),
Literal::Char(l) => l.suffix(),
Literal::String(l) => l.suffix(),
Literal::Byte(l) => l.suffix(),
Literal::ByteString(l) => l.suffix(),
}
}
}
impl Literal<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn into_owned(self) -> Literal<String> {
match self {
Literal::Bool(l) => Literal::Bool(l.to_owned()),
Literal::Integer(l) => Literal::Integer(l.to_owned()),
Literal::Float(l) => Literal::Float(l.to_owned()),
Literal::Char(l) => Literal::Char(l.to_owned()),
Literal::String(l) => Literal::String(l.into_owned()),
Literal::Byte(l) => Literal::Byte(l.to_owned()),
Literal::ByteString(l) => Literal::ByteString(l.into_owned()),
}
}
}
impl<B: Buffer> fmt::Display for Literal<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Literal::Bool(l) => l.fmt(f),
Literal::Integer(l) => l.fmt(f),
Literal::Float(l) => l.fmt(f),
Literal::Char(l) => l.fmt(f),
Literal::String(l) => l.fmt(f),
Literal::Byte(l) => l.fmt(f),
Literal::ByteString(l) => l.fmt(f),
}
}
}
// ==============================================================================================
// ===== Buffer
// ==============================================================================================
/// A shared or owned string buffer. Implemented for `String` and `&str`. *Implementation detail*.
///
/// This is trait is implementation detail of this library, cannot be
/// implemented in other crates and is not subject to semantic versioning.
/// `litrs` only guarantees that this trait is implemented for `String` and
/// `for<'a> &'a str`.
pub trait Buffer: sealed::Sealed + Deref<Target = str> {
/// This is `Cow<'static, str>` for `String`, and `Cow<'a, str>` for `&'a str`.
type Cow: From<String> + AsRef<str> + Borrow<str> + Deref<Target = str>;
#[doc(hidden)]
fn into_cow(self) -> Self::Cow;
/// This is `Cow<'static, [u8]>` for `String`, and `Cow<'a, [u8]>` for `&'a str`.
type ByteCow: From<Vec<u8>> + AsRef<[u8]> + Borrow<[u8]> + Deref<Target = [u8]>;
#[doc(hidden)]
fn into_byte_cow(self) -> Self::ByteCow;
/// Cuts away some characters at the beginning and some at the end. Given
/// range has to be in bounds.
#[doc(hidden)]
fn cut(self, range: Range<usize>) -> Self;
}
mod sealed {
pub trait Sealed {}
}
impl<'a> sealed::Sealed for &'a str {}
impl<'a> Buffer for &'a str {
#[doc(hidden)]
fn cut(self, range: Range<usize>) -> Self {
&self[range]
}
type Cow = Cow<'a, str>;
#[doc(hidden)]
fn into_cow(self) -> Self::Cow {
self.into()
}
type ByteCow = Cow<'a, [u8]>;
#[doc(hidden)]
fn into_byte_cow(self) -> Self::ByteCow {
self.as_bytes().into()
}
}
impl sealed::Sealed for String {}
impl Buffer for String {
#[doc(hidden)]
fn cut(mut self, range: Range<usize>) -> Self {
// This is not the most efficient way, but it works. First we cut the
// end, then the beginning. Note that `drain` also removes the range if
// the iterator is not consumed.
self.truncate(range.end);
self.drain(..range.start);
self
}
type Cow = Cow<'static, str>;
#[doc(hidden)]
fn into_cow(self) -> Self::Cow {
self.into()
}
type ByteCow = Cow<'static, [u8]>;
#[doc(hidden)]
fn into_byte_cow(self) -> Self::ByteCow {
self.into_bytes().into()
}
}

125
third_party/rust/litrs/src/parse.rs vendored Normal file
View file

@ -0,0 +1,125 @@
use crate::{
BoolLit,
Buffer,
ByteLit,
ByteStringLit,
CharLit,
ParseError,
FloatLit,
IntegerLit,
Literal,
StringLit,
err::{perr, ParseErrorKind::{*, self}},
};
pub fn parse<B: Buffer>(input: B) -> Result<Literal<B>, ParseError> {
let (first, rest) = input.as_bytes().split_first().ok_or(perr(None, Empty))?;
let second = input.as_bytes().get(1).copied();
match first {
b'f' if &*input == "false" => Ok(Literal::Bool(BoolLit::False)),
b't' if &*input == "true" => Ok(Literal::Bool(BoolLit::True)),
// A number literal (integer or float).
b'0'..=b'9' => {
// To figure out whether this is a float or integer, we do some
// quick inspection here. Yes, this is technically duplicate
// work with what is happening in the integer/float parse
// methods, but it makes the code way easier for now and won't
// be a huge performance loss.
//
// The first non-decimal char in a float literal must
// be '.', 'e' or 'E'.
match input.as_bytes().get(1 + end_dec_digits(rest)) {
Some(b'.') | Some(b'e') | Some(b'E')
=> FloatLit::parse(input).map(Literal::Float),
_ => IntegerLit::parse(input).map(Literal::Integer),
}
},
b'\'' => CharLit::parse(input).map(Literal::Char),
b'"' | b'r' => StringLit::parse(input).map(Literal::String),
b'b' if second == Some(b'\'') => ByteLit::parse(input).map(Literal::Byte),
b'b' if second == Some(b'r') || second == Some(b'"')
=> ByteStringLit::parse(input).map(Literal::ByteString),
_ => Err(perr(None, InvalidLiteral)),
}
}
pub(crate) fn first_byte_or_empty(s: &str) -> Result<u8, ParseError> {
s.as_bytes().get(0).copied().ok_or(perr(None, Empty))
}
/// Returns the index of the first non-underscore, non-decimal digit in `input`,
/// or the `input.len()` if all characters are decimal digits.
pub(crate) fn end_dec_digits(input: &[u8]) -> usize {
input.iter()
.position(|b| !matches!(b, b'_' | b'0'..=b'9'))
.unwrap_or(input.len())
}
pub(crate) fn hex_digit_value(digit: u8) -> Option<u8> {
match digit {
b'0'..=b'9' => Some(digit - b'0'),
b'a'..=b'f' => Some(digit - b'a' + 10),
b'A'..=b'F' => Some(digit - b'A' + 10),
_ => None,
}
}
/// Makes sure that `s` is a valid literal suffix.
pub(crate) fn check_suffix(s: &str) -> Result<(), ParseErrorKind> {
if s.is_empty() {
return Ok(());
}
let mut chars = s.chars();
let first = chars.next().unwrap();
let rest = chars.as_str();
if first == '_' && rest.is_empty() {
return Err(InvalidSuffix);
}
// This is just an extra check to improve the error message. If the first
// character of the "suffix" is already some invalid ASCII
// char, "unexpected character" seems like the more fitting error.
if first.is_ascii() && !(first.is_ascii_alphabetic() || first == '_') {
return Err(UnexpectedChar);
}
// Proper check is optional as it's not really necessary in proc macro
// context.
#[cfg(feature = "check_suffix")]
fn is_valid_suffix(first: char, rest: &str) -> bool {
use unicode_xid::UnicodeXID;
(first == '_' || first.is_xid_start())
&& rest.chars().all(|c| c.is_xid_continue())
}
// When avoiding the dependency on `unicode_xid`, we just do a best effort
// to catch the most common errors.
#[cfg(not(feature = "check_suffix"))]
fn is_valid_suffix(first: char, rest: &str) -> bool {
if first.is_ascii() && !(first.is_ascii_alphabetic() || first == '_') {
return false;
}
for c in rest.chars() {
if c.is_ascii() && !(c.is_ascii_alphanumeric() || c == '_') {
return false;
}
}
true
}
if is_valid_suffix(first, rest) {
Ok(())
} else {
Err(InvalidSuffix)
}
}

125
third_party/rust/litrs/src/string/mod.rs vendored Normal file
View file

@ -0,0 +1,125 @@
use std::{fmt, ops::Range};
use crate::{
Buffer, ParseError,
err::{perr, ParseErrorKind::*},
escape::{scan_raw_string, unescape_string},
parse::first_byte_or_empty,
};
/// A string or raw string literal, e.g. `"foo"`, `"Grüße"` or `r#"a🦊c"d🦀f"#`.
///
/// See [the reference][ref] for more information.
///
/// [ref]: https://doc.rust-lang.org/reference/tokens.html#string-literals
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct StringLit<B: Buffer> {
/// The raw input.
raw: B,
/// The string value (with all escapes unescaped), or `None` if there were
/// no escapes. In the latter case, the string value is in `raw`.
value: Option<String>,
/// The number of hash signs in case of a raw string literal, or `None` if
/// it's not a raw string literal.
num_hashes: Option<u32>,
/// Start index of the suffix or `raw.len()` if there is no suffix.
start_suffix: usize,
}
impl<B: Buffer> StringLit<B> {
/// Parses the input as a (raw) string literal. Returns an error if the
/// input is invalid or represents a different kind of literal.
pub fn parse(input: B) -> Result<Self, ParseError> {
match first_byte_or_empty(&input)? {
b'r' | b'"' => {
let (value, num_hashes, start_suffix) = parse_impl(&input)?;
Ok(Self { raw: input, value, num_hashes, start_suffix })
}
_ => Err(perr(0, InvalidStringLiteralStart)),
}
}
/// Returns the string value this literal represents (where all escapes have
/// been turned into their respective values).
pub fn value(&self) -> &str {
self.value.as_deref().unwrap_or(&self.raw[self.inner_range()])
}
/// Like `value` but returns a potentially owned version of the value.
///
/// The return value is either `Cow<'static, str>` if `B = String`, or
/// `Cow<'a, str>` if `B = &'a str`.
pub fn into_value(self) -> B::Cow {
let inner_range = self.inner_range();
let Self { raw, value, .. } = self;
value.map(B::Cow::from).unwrap_or_else(|| raw.cut(inner_range).into_cow())
}
/// The optional suffix. Returns `""` if the suffix is empty/does not exist.
pub fn suffix(&self) -> &str {
&(*self.raw)[self.start_suffix..]
}
/// Returns whether this literal is a raw string literal (starting with
/// `r`).
pub fn is_raw_string(&self) -> bool {
self.num_hashes.is_some()
}
/// Returns the raw input that was passed to `parse`.
pub fn raw_input(&self) -> &str {
&self.raw
}
/// Returns the raw input that was passed to `parse`, potentially owned.
pub fn into_raw_input(self) -> B {
self.raw
}
/// The range within `self.raw` that excludes the quotes and potential `r#`.
fn inner_range(&self) -> Range<usize> {
match self.num_hashes {
None => 1..self.start_suffix - 1,
Some(n) => 1 + n as usize + 1..self.start_suffix - n as usize - 1,
}
}
}
impl StringLit<&str> {
/// Makes a copy of the underlying buffer and returns the owned version of
/// `Self`.
pub fn into_owned(self) -> StringLit<String> {
StringLit {
raw: self.raw.to_owned(),
value: self.value,
num_hashes: self.num_hashes,
start_suffix: self.start_suffix,
}
}
}
impl<B: Buffer> fmt::Display for StringLit<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(&self.raw)
}
}
/// Precondition: input has to start with either `"` or `r`.
#[inline(never)]
pub(crate) fn parse_impl(input: &str) -> Result<(Option<String>, Option<u32>, usize), ParseError> {
if input.starts_with('r') {
scan_raw_string::<char>(&input, 1)
.map(|(v, hashes, start_suffix)| (v, Some(hashes), start_suffix))
} else {
unescape_string::<char>(&input, 1)
.map(|(v, start_suffix)| (v, None, start_suffix))
}
}
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,278 @@
use crate::{Literal, StringLit, test_util::{assert_parse_ok_eq, assert_roundtrip}};
// ===== Utility functions =======================================================================
macro_rules! check {
($lit:literal, $has_escapes:expr, $num_hashes:expr) => {
check!($lit, stringify!($lit), $has_escapes, $num_hashes, "")
};
($lit:literal, $input:expr, $has_escapes:expr, $num_hashes:expr, $suffix:literal) => {
let input = $input;
let expected = StringLit {
raw: input,
value: if $has_escapes { Some($lit.to_string()) } else { None },
num_hashes: $num_hashes,
start_suffix: input.len() - $suffix.len(),
};
assert_parse_ok_eq(input, StringLit::parse(input), expected.clone(), "StringLit::parse");
assert_parse_ok_eq(
input, Literal::parse(input), Literal::String(expected.clone()), "Literal::parse");
let lit = StringLit::parse(input).unwrap();
assert_eq!(lit.value(), $lit);
assert_eq!(lit.suffix(), $suffix);
assert_eq!(lit.into_value(), $lit);
assert_roundtrip(expected.into_owned(), input);
};
}
// ===== Actual tests ============================================================================
#[test]
fn simple() {
check!("", false, None);
check!("a", false, None);
check!("peter", false, None);
check!("Sei gegrüßt, Bärthelt!", false, None);
check!("أنا لا أتحدث العربية", false, None);
check!("お前はもう死んでいる", false, None);
check!("Пушки - интересные музыкальные инструменты", false, None);
check!("lit 👌 😂 af", false, None);
}
#[test]
fn special_whitespace() {
let strings = ["\n", "\t", "foo\tbar", "🦊\n"];
for &s in &strings {
let input = format!(r#""{}""#, s);
let input_raw = format!(r#"r"{}""#, s);
for (input, num_hashes) in vec![(input, None), (input_raw, Some(0))] {
let expected = StringLit {
raw: &*input,
value: None,
num_hashes,
start_suffix: input.len(),
};
assert_parse_ok_eq(
&input, StringLit::parse(&*input), expected.clone(), "StringLit::parse");
assert_parse_ok_eq(
&input, Literal::parse(&*input), Literal::String(expected), "Literal::parse");
assert_eq!(StringLit::parse(&*input).unwrap().value(), s);
assert_eq!(StringLit::parse(&*input).unwrap().into_value(), s);
}
}
}
#[test]
fn simple_escapes() {
check!("a\nb", true, None);
check!("\nb", true, None);
check!("a\n", true, None);
check!("\n", true, None);
check!("\x60\t\r\n うさぎ \0ネズミ", true, None);
check!("నా \\పిల్లి లావుగా ఉంది", true, None);
check!("నా \\పిల్లి లావుగా 🐈\"ఉంది", true, None);
check!("\\నా\\ పిల్లి లావుగా\" ఉంది\"", true, None);
check!("\"నా \\🐈 పిల్లి లావుగా \" ఉంది\\", true, None);
check!("\x00", true, None);
check!(" \x01", true, None);
check!("\x0c 🦊", true, None);
check!(" 🦊\x0D ", true, None);
check!("\\x13", true, None);
check!("\"x30", true, None);
}
#[test]
fn unicode_escapes() {
check!("\u{0}", true, None);
check!(" \u{00}", true, None);
check!("\u{b} ", true, None);
check!(" \u{B} ", true, None);
check!("\u{7e}", true, None);
check!("నక్క\u{E4}", true, None);
check!("\u{e4} నక్క", true, None);
check!(" \u{fc}నక్క ", true, None);
check!("\u{Fc}", true, None);
check!("\u{fC}🦊\nлиса", true, None);
check!("лиса\u{FC}", true, None);
check!("лиса\u{b10}నక్క🦊", true, None);
check!("\"నక్క\u{B10}", true, None);
check!("лиса\\\u{0b10}", true, None);
check!("ли🦊са\\\"\u{0b10}", true, None);
check!("నక్క\\\\u{0b10}", true, None);
check!("\u{2764}Füchsin", true, None);
check!("Füchse \u{1f602}", true, None);
check!("cd\u{1F602}ab", true, None);
check!("\u{0}🦊", true, None);
check!("лиса\u{0__}", true, None);
check!("\\🦊\u{3_b}", true, None);
check!("🦊\u{1_F_6_0_2}Füchsin", true, None);
check!("నక్క\\\u{1_F6_02_____}నక్క", true, None);
}
#[test]
fn string_continue() {
check!("నక్క\
bar", true, None);
check!("foo\
🦊", true, None);
check!("foo\
banana", true, None);
// Weird whitespace characters
let lit = StringLit::parse("\"foo\\\n\r\t\n \n\tbar\"").expect("failed to parse");
assert_eq!(lit.value(), "foobar");
let lit = StringLit::parse("\"foo\\\n\u{85}bar\"").expect("failed to parse");
assert_eq!(lit.value(), "foo\u{85}bar");
let lit = StringLit::parse("\"foo\\\n\u{a0}bar\"").expect("failed to parse");
assert_eq!(lit.value(), "foo\u{a0}bar");
// Raw strings do not handle "string continues"
check!(r"foo\
bar", false, Some(0));
}
#[test]
fn crlf_newlines() {
let lit = StringLit::parse("\"foo\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), "foo\nbar");
let lit = StringLit::parse("\"\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), "\nbar");
let lit = StringLit::parse("\"лиса\r\n\"").expect("failed to parse");
assert_eq!(lit.value(), "лиса\n");
let lit = StringLit::parse("r\"foo\r\nbar\"").expect("failed to parse");
assert_eq!(lit.value(), "foo\nbar");
let lit = StringLit::parse("r#\"\r\nbar\"#").expect("failed to parse");
assert_eq!(lit.value(), "\nbar");
let lit = StringLit::parse("r##\"лиса\r\n\"##").expect("failed to parse");
assert_eq!(lit.value(), "лиса\n");
}
#[test]
fn raw_string() {
check!(r"", false, Some(0));
check!(r"a", false, Some(0));
check!(r"peter", false, Some(0));
check!(r"Sei gegrüßt, Bärthelt!", false, Some(0));
check!(r"أنا لا أتحدث العربية", false, Some(0));
check!(r"お前はもう死んでいる", false, Some(0));
check!(r"Пушки - интересные музыкальные инструменты", false, Some(0));
check!(r"lit 👌 😂 af", false, Some(0));
check!(r#""#, false, Some(1));
check!(r#"a"#, false, Some(1));
check!(r##"peter"##, false, Some(2));
check!(r###"Sei gegrüßt, Bärthelt!"###, false, Some(3));
check!(r########"lit 👌 😂 af"########, false, Some(8));
check!(r#"foo " bar"#, false, Some(1));
check!(r##"foo " bar"##, false, Some(2));
check!(r#"foo """" '"'" bar"#, false, Some(1));
check!(r#""foo""#, false, Some(1));
check!(r###""foo'"###, false, Some(3));
check!(r#""x'#_#s'"#, false, Some(1));
check!(r"#", false, Some(0));
check!(r"foo#", false, Some(0));
check!(r"##bar", false, Some(0));
check!(r###""##foo"##bar'"###, false, Some(3));
check!(r"さび\n\t\r\0\\x60\u{123}フェリス", false, Some(0));
check!(r#"さび\n\t\r\0\\x60\u{123}フェリス"#, false, Some(1));
}
#[test]
fn suffixes() {
check!("hello", r###""hello"suffix"###, false, None, "suffix");
check!(r"お前はもう死んでいる", r###"r"お前はもう死んでいる"_banana"###, false, Some(0), "_banana");
check!("fox", r#""fox"peter"#, false, None, "peter");
check!("🦊", r#""🦊"peter"#, false, None, "peter");
check!("నక్క\\\\u{0b10}", r###""నక్క\\\\u{0b10}"jü_rgen"###, true, None, "jü_rgen");
}
#[test]
fn parse_err() {
assert_err!(StringLit, r#"""#, UnterminatedString, None);
assert_err!(StringLit, r#""犬"#, UnterminatedString, None);
assert_err!(StringLit, r#""Jürgen"#, UnterminatedString, None);
assert_err!(StringLit, r#""foo bar baz"#, UnterminatedString, None);
assert_err!(StringLit, r#""fox"peter""#, InvalidSuffix, 5);
assert_err!(StringLit, r###"r#"foo "# bar"#"###, UnexpectedChar, 9);
assert_err!(StringLit, "\"\r\"", IsolatedCr, 1);
assert_err!(StringLit, "\"fo\rx\"", IsolatedCr, 3);
assert_err!(StringLit, "r\"\r\"", IsolatedCr, 2);
assert_err!(StringLit, "r\"fo\rx\"", IsolatedCr, 4);
assert_err!(StringLit, r##"r####""##, UnterminatedRawString, None);
assert_err!(StringLit, r#####"r##"foo"#bar"#####, UnterminatedRawString, None);
assert_err!(StringLit, r##"r####"##, InvalidLiteral, None);
assert_err!(StringLit, r##"r####x"##, InvalidLiteral, None);
}
#[test]
fn invald_ascii_escapes() {
assert_err!(StringLit, r#""\x80""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#""🦊\x81""#, NonAsciiXEscape, 5..9);
assert_err!(StringLit, r#"" \x8a""#, NonAsciiXEscape, 2..6);
assert_err!(StringLit, r#""\x8Ff""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#""\xa0 ""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#""నక్క\xB0""#, NonAsciiXEscape, 13..17);
assert_err!(StringLit, r#""\xc3నక్క""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#""\xDf🦊""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#""నక్క\xffనక్క""#, NonAsciiXEscape, 13..17);
assert_err!(StringLit, r#""\xfF ""#, NonAsciiXEscape, 1..5);
assert_err!(StringLit, r#"" \xFf""#, NonAsciiXEscape, 2..6);
assert_err!(StringLit, r#""నక్క \xFF""#, NonAsciiXEscape, 15..19);
}
#[test]
fn invalid_escapes() {
assert_err!(StringLit, r#""\a""#, UnknownEscape, 1..3);
assert_err!(StringLit, r#""foo\y""#, UnknownEscape, 4..6);
assert_err!(StringLit, r#""\"#, UnterminatedEscape, 1);
assert_err!(StringLit, r#""\x""#, UnterminatedEscape, 1..3);
assert_err!(StringLit, r#""🦊\x1""#, UnterminatedEscape, 5..8);
assert_err!(StringLit, r#"" \xaj""#, InvalidXEscape, 2..6);
assert_err!(StringLit, r#""నక్క\xjb""#, InvalidXEscape, 13..17);
}
#[test]
fn invalid_unicode_escapes() {
assert_err!(StringLit, r#""\u""#, UnicodeEscapeWithoutBrace, 1..3);
assert_err!(StringLit, r#""🦊\u ""#, UnicodeEscapeWithoutBrace, 5..7);
assert_err!(StringLit, r#""\u3""#, UnicodeEscapeWithoutBrace, 1..3);
assert_err!(StringLit, r#""\u{""#, UnterminatedUnicodeEscape, 1..4);
assert_err!(StringLit, r#""\u{12""#, UnterminatedUnicodeEscape, 1..6);
assert_err!(StringLit, r#""🦊\u{a0b""#, UnterminatedUnicodeEscape, 5..11);
assert_err!(StringLit, r#""\u{a0_b ""#, UnterminatedUnicodeEscape, 1..10);
assert_err!(StringLit, r#""\u{_}నక్క""#, InvalidStartOfUnicodeEscape, 4);
assert_err!(StringLit, r#""\u{_5f}""#, InvalidStartOfUnicodeEscape, 4);
assert_err!(StringLit, r#""fox\u{x}""#, NonHexDigitInUnicodeEscape, 7);
assert_err!(StringLit, r#""\u{0x}🦊""#, NonHexDigitInUnicodeEscape, 5);
assert_err!(StringLit, r#""నక్క\u{3bx}""#, NonHexDigitInUnicodeEscape, 18);
assert_err!(StringLit, r#""\u{3b_x}лиса""#, NonHexDigitInUnicodeEscape, 7);
assert_err!(StringLit, r#""\u{4x_}""#, NonHexDigitInUnicodeEscape, 5);
assert_err!(StringLit, r#""\u{1234567}""#, TooManyDigitInUnicodeEscape, 10);
assert_err!(StringLit, r#""నక్క\u{1234567}🦊""#, TooManyDigitInUnicodeEscape, 22);
assert_err!(StringLit, r#""నక్క\u{1_23_4_56_7}""#, TooManyDigitInUnicodeEscape, 26);
assert_err!(StringLit, r#""\u{abcdef123}лиса""#, TooManyDigitInUnicodeEscape, 10);
assert_err!(StringLit, r#""\u{110000}fox""#, InvalidUnicodeEscapeChar, 1..10);
}

128
third_party/rust/litrs/src/test_util.rs vendored Normal file
View file

@ -0,0 +1,128 @@
use crate::*;
use std::fmt::{Debug, Display};
#[track_caller]
pub(crate) fn assert_parse_ok_eq<T: PartialEq + Debug + Display>(
input: &str,
result: Result<T, ParseError>,
expected: T,
parse_method: &str,
) {
match result {
Ok(actual) if actual == expected => {
if actual.to_string() != input {
panic!(
"formatting does not yield original input `{}`: {:?}",
input,
actual,
);
}
}
Ok(actual) => {
panic!(
"unexpected parsing result (with `{}`) for `{}`:\nactual: {:?}\nexpected: {:?}",
parse_method,
input,
actual,
expected,
);
}
Err(e) => {
panic!(
"expected `{}` to be parsed (with `{}`) successfully, but it failed: {:?}",
input,
parse_method,
e,
);
}
}
}
// This is not ideal, but to perform this check we need `proc-macro2`. So we
// just don't do anything if that feature is not enabled.
#[cfg(not(feature = "proc-macro2"))]
pub(crate) fn assert_roundtrip<T>(_: T, _: &str) {}
#[cfg(feature = "proc-macro2")]
#[track_caller]
pub(crate) fn assert_roundtrip<T>(ours: T, input: &str)
where
T: std::convert::TryFrom<proc_macro2::Literal> + fmt::Debug + PartialEq + Clone,
proc_macro2::Literal: From<T>,
<T as std::convert::TryFrom<proc_macro2::Literal>>::Error: std::fmt::Display,
{
let pm_lit = input.parse::<proc_macro2::Literal>()
.expect("failed to parse input as proc_macro2::Literal");
let t_name = std::any::type_name::<T>();
// Unfortunately, `proc_macro2::Literal` does not implement `PartialEq`, so
// this is the next best thing.
if proc_macro2::Literal::from(ours.clone()).to_string() != pm_lit.to_string() {
panic!(
"Converting {} to proc_macro2::Literal has unexpected result:\
\nconverted: {:?}\nexpected: {:?}",
t_name,
proc_macro2::Literal::from(ours),
pm_lit,
);
}
match T::try_from(pm_lit) {
Err(e) => {
panic!("Trying to convert proc_macro2::Literal to {} results in error: {}", t_name, e);
}
Ok(res) => {
if res != ours {
panic!(
"Converting proc_macro2::Literal to {} has unexpected result:\
\nactual: {:?}\nexpected: {:?}",
t_name,
res,
ours,
);
}
}
}
}
macro_rules! assert_err {
($ty:ident, $input:literal, $kind:ident, $( $span:tt )+ ) => {
assert_err_single!($ty::parse($input), $kind, $($span)+);
assert_err_single!($crate::Literal::parse($input), $kind, $($span)+);
};
}
macro_rules! assert_err_single {
($expr:expr, $kind:ident, $( $span:tt )+ ) => {
let res = $expr;
let err = match res {
Err(e) => e,
Ok(v) => panic!(
"Expected `{}` to return an error, but it returned Ok({:?})",
stringify!($expr),
v,
),
};
if err.kind != $crate::err::ParseErrorKind::$kind {
panic!(
"Expected error kind {} for `{}` but got {:?}",
stringify!($kind),
stringify!($expr),
err.kind,
)
}
let expected_span = assert_err_single!(@span $($span)+);
if err.span != expected_span {
panic!(
"Expected error span {:?} for `{}` but got {:?}",
expected_span,
stringify!($expr),
err.span,
)
}
};
(@span $start:literal .. $end:literal) => { Some($start .. $end) };
(@span $at:literal) => { Some($at.. $at + 1) };
(@span None) => { None };
}

349
third_party/rust/litrs/src/tests.rs vendored Normal file
View file

@ -0,0 +1,349 @@
use crate::Literal;
#[test]
fn empty() {
assert_err!(Literal, "", Empty, None);
}
#[test]
fn invalid_literals() {
assert_err_single!(Literal::parse("."), InvalidLiteral, None);
assert_err_single!(Literal::parse("+"), InvalidLiteral, None);
assert_err_single!(Literal::parse("-"), InvalidLiteral, None);
assert_err_single!(Literal::parse("e"), InvalidLiteral, None);
assert_err_single!(Literal::parse("e8"), InvalidLiteral, None);
assert_err_single!(Literal::parse("f32"), InvalidLiteral, None);
assert_err_single!(Literal::parse("foo"), InvalidLiteral, None);
assert_err_single!(Literal::parse("inf"), InvalidLiteral, None);
assert_err_single!(Literal::parse("nan"), InvalidLiteral, None);
assert_err_single!(Literal::parse("NaN"), InvalidLiteral, None);
assert_err_single!(Literal::parse("NAN"), InvalidLiteral, None);
assert_err_single!(Literal::parse("_2.7"), InvalidLiteral, None);
assert_err_single!(Literal::parse(".5"), InvalidLiteral, None);
}
#[test]
fn misc() {
assert_err_single!(Literal::parse("0x44.5"), UnexpectedChar, 4..6);
assert_err_single!(Literal::parse("a"), InvalidLiteral, None);
assert_err_single!(Literal::parse(";"), InvalidLiteral, None);
assert_err_single!(Literal::parse("0;"), UnexpectedChar, 1);
assert_err_single!(Literal::parse(" 0"), InvalidLiteral, None);
assert_err_single!(Literal::parse("0 "), UnexpectedChar, 1);
assert_err_single!(Literal::parse("_"), InvalidLiteral, None);
assert_err_single!(Literal::parse("_3"), InvalidLiteral, None);
assert_err_single!(Literal::parse("a_123"), InvalidLiteral, None);
assert_err_single!(Literal::parse("B_123"), InvalidLiteral, None);
}
macro_rules! assert_no_panic {
($input:expr) => {
let arr = $input;
let input = std::str::from_utf8(&arr).expect("not unicode");
let res = std::panic::catch_unwind(move || {
let _ = Literal::parse(input);
let _ = crate::BoolLit::parse(input);
let _ = crate::IntegerLit::parse(input);
let _ = crate::FloatLit::parse(input);
let _ = crate::CharLit::parse(input);
let _ = crate::StringLit::parse(input);
let _ = crate::ByteLit::parse(input);
let _ = crate::ByteStringLit::parse(input);
});
if let Err(e) = res {
println!("\n!!! panic for: {:?}", input);
std::panic::resume_unwind(e);
}
};
}
#[test]
#[ignore]
fn never_panic_up_to_3() {
for a in 0..128 {
assert_no_panic!([a]);
for b in 0..128 {
assert_no_panic!([a, b]);
for c in 0..128 {
assert_no_panic!([a, b, c]);
}
}
}
}
// This test takes super long in debug mode, but in release mode it's fine.
#[test]
#[ignore]
fn never_panic_len_4() {
for a in 0..128 {
for b in 0..128 {
for c in 0..128 {
for d in 0..128 {
assert_no_panic!([a, b, c, d]);
}
}
}
}
}
#[cfg(feature = "proc-macro2")]
#[test]
fn proc_macro() {
use std::convert::TryFrom;
use proc_macro2::{
self as pm2, TokenTree, Group, TokenStream, Delimiter, Spacing, Punct, Span, Ident,
};
use crate::{
BoolLit, ByteLit, ByteStringLit, CharLit, FloatLit, IntegerLit, StringLit, err::TokenKind
};
macro_rules! assert_invalid_token {
($input:expr, expected: $expected:path, actual: $actual:path $(,)?) => {
let err = $input.unwrap_err();
if err.expected != $expected {
panic!(
"err.expected was expected to be {:?}, but is {:?}",
$expected,
err.expected,
);
}
if err.actual != $actual {
panic!("err.actual was expected to be {:?}, but is {:?}", $actual, err.actual);
}
};
}
let pm_u16_lit = pm2::Literal::u16_suffixed(2700);
let pm_i16_lit = pm2::Literal::i16_unsuffixed(3912);
let pm_f32_lit = pm2::Literal::f32_unsuffixed(3.14);
let pm_f64_lit = pm2::Literal::f64_suffixed(99.3);
let pm_string_lit = pm2::Literal::string("hello 🦊");
let pm_bytestr_lit = pm2::Literal::byte_string(b"hello \nfoxxo");
let pm_char_lit = pm2::Literal::character('🦀');
let u16_lit = Literal::parse("2700u16".to_string()).unwrap();
let i16_lit = Literal::parse("3912".to_string()).unwrap();
let f32_lit = Literal::parse("3.14".to_string()).unwrap();
let f64_lit = Literal::parse("99.3f64".to_string()).unwrap();
let string_lit = Literal::parse(r#""hello 🦊""#.to_string()).unwrap();
let bytestr_lit = Literal::parse(r#"b"hello \nfoxxo""#.to_string()).unwrap();
let char_lit = Literal::parse("'🦀'".to_string()).unwrap();
assert_eq!(Literal::from(&pm_u16_lit), u16_lit);
assert_eq!(Literal::from(&pm_i16_lit), i16_lit);
assert_eq!(Literal::from(&pm_f32_lit), f32_lit);
assert_eq!(Literal::from(&pm_f64_lit), f64_lit);
assert_eq!(Literal::from(&pm_string_lit), string_lit);
assert_eq!(Literal::from(&pm_bytestr_lit), bytestr_lit);
assert_eq!(Literal::from(&pm_char_lit), char_lit);
let group = TokenTree::from(Group::new(Delimiter::Brace, TokenStream::new()));
let punct = TokenTree::from(Punct::new(':', Spacing::Alone));
let ident = TokenTree::from(Ident::new("peter", Span::call_site()));
assert_eq!(
Literal::try_from(TokenTree::Literal(pm2::Literal::string("hello 🦊"))).unwrap(),
Literal::String(StringLit::parse(r#""hello 🦊""#.to_string()).unwrap()),
);
assert_invalid_token!(
Literal::try_from(punct.clone()),
expected: TokenKind::Literal,
actual: TokenKind::Punct,
);
assert_invalid_token!(
Literal::try_from(group.clone()),
expected: TokenKind::Literal,
actual: TokenKind::Group,
);
assert_invalid_token!(
Literal::try_from(ident.clone()),
expected: TokenKind::Literal,
actual: TokenKind::Ident,
);
assert_eq!(Literal::from(IntegerLit::try_from(pm_u16_lit.clone()).unwrap()), u16_lit);
assert_eq!(Literal::from(IntegerLit::try_from(pm_i16_lit.clone()).unwrap()), i16_lit);
assert_eq!(Literal::from(FloatLit::try_from(pm_f32_lit.clone()).unwrap()), f32_lit);
assert_eq!(Literal::from(FloatLit::try_from(pm_f64_lit.clone()).unwrap()), f64_lit);
assert_eq!(Literal::from(StringLit::try_from(pm_string_lit.clone()).unwrap()), string_lit);
assert_eq!(
Literal::from(ByteStringLit::try_from(pm_bytestr_lit.clone()).unwrap()),
bytestr_lit,
);
assert_eq!(Literal::from(CharLit::try_from(pm_char_lit.clone()).unwrap()), char_lit);
assert_invalid_token!(
StringLit::try_from(pm_u16_lit.clone()),
expected: TokenKind::StringLit,
actual: TokenKind::IntegerLit,
);
assert_invalid_token!(
StringLit::try_from(pm_f32_lit.clone()),
expected: TokenKind::StringLit,
actual: TokenKind::FloatLit,
);
assert_invalid_token!(
ByteLit::try_from(pm_bytestr_lit.clone()),
expected: TokenKind::ByteLit,
actual: TokenKind::ByteStringLit,
);
assert_invalid_token!(
ByteLit::try_from(pm_i16_lit.clone()),
expected: TokenKind::ByteLit,
actual: TokenKind::IntegerLit,
);
assert_invalid_token!(
IntegerLit::try_from(pm_string_lit.clone()),
expected: TokenKind::IntegerLit,
actual: TokenKind::StringLit,
);
assert_invalid_token!(
IntegerLit::try_from(pm_char_lit.clone()),
expected: TokenKind::IntegerLit,
actual: TokenKind::CharLit,
);
assert_eq!(
Literal::from(IntegerLit::try_from(TokenTree::from(pm_u16_lit.clone())).unwrap()),
u16_lit,
);
assert_eq!(
Literal::from(IntegerLit::try_from(TokenTree::from(pm_i16_lit.clone())).unwrap()),
i16_lit,
);
assert_eq!(
Literal::from(FloatLit::try_from(TokenTree::from(pm_f32_lit.clone())).unwrap()),
f32_lit,
);
assert_eq!(
Literal::from(FloatLit::try_from(TokenTree::from(pm_f64_lit.clone())).unwrap()),
f64_lit,
);
assert_eq!(
Literal::from(StringLit::try_from(TokenTree::from(pm_string_lit.clone())).unwrap()),
string_lit,
);
assert_eq!(
Literal::from(ByteStringLit::try_from(TokenTree::from(pm_bytestr_lit.clone())).unwrap()),
bytestr_lit,
);
assert_eq!(
Literal::from(CharLit::try_from(TokenTree::from(pm_char_lit.clone())).unwrap()),
char_lit,
);
assert_invalid_token!(
StringLit::try_from(TokenTree::from(pm_u16_lit.clone())),
expected: TokenKind::StringLit,
actual: TokenKind::IntegerLit,
);
assert_invalid_token!(
StringLit::try_from(TokenTree::from(pm_f32_lit.clone())),
expected: TokenKind::StringLit,
actual: TokenKind::FloatLit,
);
assert_invalid_token!(
BoolLit::try_from(TokenTree::from(pm_bytestr_lit.clone())),
expected: TokenKind::BoolLit,
actual: TokenKind::ByteStringLit,
);
assert_invalid_token!(
BoolLit::try_from(TokenTree::from(pm_i16_lit.clone())),
expected: TokenKind::BoolLit,
actual: TokenKind::IntegerLit,
);
assert_invalid_token!(
IntegerLit::try_from(TokenTree::from(pm_string_lit.clone())),
expected: TokenKind::IntegerLit,
actual: TokenKind::StringLit,
);
assert_invalid_token!(
IntegerLit::try_from(TokenTree::from(pm_char_lit.clone())),
expected: TokenKind::IntegerLit,
actual: TokenKind::CharLit,
);
assert_invalid_token!(
StringLit::try_from(TokenTree::from(group)),
expected: TokenKind::StringLit,
actual: TokenKind::Group,
);
assert_invalid_token!(
BoolLit::try_from(TokenTree::from(punct)),
expected: TokenKind::BoolLit,
actual: TokenKind::Punct,
);
assert_invalid_token!(
FloatLit::try_from(TokenTree::from(ident)),
expected: TokenKind::FloatLit,
actual: TokenKind::Ident,
);
}
#[cfg(feature = "proc-macro2")]
#[test]
fn bool_try_from_tt() {
use std::convert::TryFrom;
use proc_macro2::{Ident, Span, TokenTree};
use crate::BoolLit;
let ident = |s: &str| Ident::new(s, Span::call_site());
assert_eq!(BoolLit::try_from(TokenTree::Ident(ident("true"))).unwrap(), BoolLit::True);
assert_eq!(BoolLit::try_from(TokenTree::Ident(ident("false"))).unwrap(), BoolLit::False);
assert!(BoolLit::try_from(TokenTree::Ident(ident("falsex"))).is_err());
assert!(BoolLit::try_from(TokenTree::Ident(ident("_false"))).is_err());
assert!(BoolLit::try_from(TokenTree::Ident(ident("False"))).is_err());
assert!(BoolLit::try_from(TokenTree::Ident(ident("True"))).is_err());
assert!(BoolLit::try_from(TokenTree::Ident(ident("ltrue"))).is_err());
assert_eq!(
Literal::try_from(TokenTree::Ident(ident("true"))).unwrap(),
Literal::Bool(BoolLit::True),
);
assert_eq!(
Literal::try_from(TokenTree::Ident(ident("false"))).unwrap(),
Literal::Bool(BoolLit::False),
);
assert!(Literal::try_from(TokenTree::Ident(ident("falsex"))).is_err());
assert!(Literal::try_from(TokenTree::Ident(ident("_false"))).is_err());
assert!(Literal::try_from(TokenTree::Ident(ident("False"))).is_err());
assert!(Literal::try_from(TokenTree::Ident(ident("True"))).is_err());
assert!(Literal::try_from(TokenTree::Ident(ident("ltrue"))).is_err());
}
#[cfg(feature = "proc-macro2")]
#[test]
fn invalid_token_display() {
use crate::{InvalidToken, err::TokenKind};
let span = crate::err::Span::Two(proc_macro2::Span::call_site());
assert_eq!(
InvalidToken {
actual: TokenKind::StringLit,
expected: TokenKind::FloatLit,
span,
}.to_string(),
r#"expected a float literal (e.g. `3.14`), but found a string literal (e.g. "Ferris")"#,
);
assert_eq!(
InvalidToken {
actual: TokenKind::Punct,
expected: TokenKind::Literal,
span,
}.to_string(),
r#"expected a literal, but found a punctuation character"#,
);
}

File diff suppressed because one or more lines are too long

View file

@ -1,8 +1,8 @@
use super::{BackendResult, Error, Version, Writer};
use crate::{
back::glsl::{Options, WriterFlags},
AddressSpace, Binding, Expression, Handle, ImageClass, ImageDimension, Interpolation, Sampling,
Scalar, ScalarKind, ShaderStage, StorageFormat, Type, TypeInner,
AddressSpace, Binding, Expression, Handle, ImageClass, ImageDimension, Interpolation,
SampleLevel, Sampling, Scalar, ScalarKind, ShaderStage, StorageFormat, Type, TypeInner,
};
use std::fmt::Write;
@ -48,6 +48,8 @@ bitflags::bitflags! {
///
/// We can always support this, either through the language or a polyfill
const INSTANCE_INDEX = 1 << 22;
/// Sample specific LODs of cube / array shadow textures
const TEXTURE_SHADOW_LOD = 1 << 23;
}
}
@ -125,6 +127,7 @@ impl FeaturesManager {
check_feature!(TEXTURE_SAMPLES, 150);
check_feature!(TEXTURE_LEVELS, 130);
check_feature!(IMAGE_SIZE, 430, 310);
check_feature!(TEXTURE_SHADOW_LOD, 200, 300);
// Return an error if there are missing features
if missing.is_empty() {
@ -251,6 +254,11 @@ impl FeaturesManager {
}
}
if self.0.contains(Features::TEXTURE_SHADOW_LOD) {
// https://registry.khronos.org/OpenGL/extensions/EXT/EXT_texture_shadow_lod.txt
writeln!(out, "#extension GL_EXT_texture_shadow_lod : require")?;
}
Ok(())
}
}
@ -469,6 +477,47 @@ impl<'a, W> Writer<'a, W> {
}
}
}
Expression::ImageSample { image, level, offset, .. } => {
if let TypeInner::Image {
dim,
arrayed,
class: ImageClass::Depth { .. },
} = *info[image].ty.inner_with(&module.types) {
let lod = matches!(level, SampleLevel::Zero | SampleLevel::Exact(_));
let bias = matches!(level, SampleLevel::Bias(_));
let auto = matches!(level, SampleLevel::Auto);
let cube = dim == ImageDimension::Cube;
let array2d = dim == ImageDimension::D2 && arrayed;
let gles = self.options.version.is_es();
// We have a workaround of using `textureGrad` instead of `textureLod` if the LOD is zero,
// so we don't *need* this extension for those cases.
// But if we're explicitly allowed to use the extension (`WriterFlags::TEXTURE_SHADOW_LOD`),
// we always use it instead of the workaround.
let grad_workaround_applicable = (array2d || (cube && !arrayed)) && level == SampleLevel::Zero;
let prefer_grad_workaround = grad_workaround_applicable && !self.options.writer_flags.contains(WriterFlags::TEXTURE_SHADOW_LOD);
let mut ext_used = false;
// float texture(sampler2DArrayShadow sampler, vec4 P [, float bias])
// float texture(samplerCubeArrayShadow sampler, vec4 P, float compare [, float bias])
ext_used |= (array2d || cube && arrayed) && bias;
// The non `bias` version of this was standardized in GL 4.3, but never in GLES.
// float textureOffset(sampler2DArrayShadow sampler, vec4 P, ivec2 offset [, float bias])
ext_used |= array2d && (bias || (gles && auto)) && offset.is_some();
// float textureLod(sampler2DArrayShadow sampler, vec4 P, float lod)
// float textureLodOffset(sampler2DArrayShadow sampler, vec4 P, float lod, ivec2 offset)
// float textureLod(samplerCubeShadow sampler, vec4 P, float lod)
// float textureLod(samplerCubeArrayShadow sampler, vec4 P, float compare, float lod)
ext_used |= (cube || array2d) && lod && !prefer_grad_workaround;
if ext_used {
features.request(Features::TEXTURE_SHADOW_LOD);
}
}
}
_ => {}
}
}

View file

@ -646,16 +646,6 @@ impl<'a, W: Write> Writer<'a, W> {
// preprocessor not the processor ¯\_(ツ)_/¯
self.features.write(self.options, &mut self.out)?;
// Write the additional extensions
if self
.options
.writer_flags
.contains(WriterFlags::TEXTURE_SHADOW_LOD)
{
// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shadow_lod.txt
writeln!(self.out, "#extension GL_EXT_texture_shadow_lod : require")?;
}
// glsl es requires a precision to be specified for floats and ints
// TODO: Should this be user configurable?
if es {
@ -2620,51 +2610,49 @@ impl<'a, W: Write> Writer<'a, W> {
level,
depth_ref,
} => {
let dim = match *ctx.resolve_type(image, &self.module.types) {
TypeInner::Image { dim, .. } => dim,
let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
TypeInner::Image {
dim,
class,
arrayed,
..
} => (dim, class, arrayed),
_ => unreachable!(),
};
if dim == crate::ImageDimension::Cube
&& array_index.is_some()
&& depth_ref.is_some()
{
match level {
crate::SampleLevel::Zero
| crate::SampleLevel::Exact(_)
| crate::SampleLevel::Gradient { .. }
| crate::SampleLevel::Bias(_) => {
return Err(Error::Custom(String::from(
"gsamplerCubeArrayShadow isn't supported in textureGrad, \
textureLod or texture with bias",
)))
}
crate::SampleLevel::Auto => {}
let mut err = None;
if dim == crate::ImageDimension::Cube {
if offset.is_some() {
err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
}
if arrayed
&& matches!(class, crate::ImageClass::Depth { .. })
&& matches!(level, crate::SampleLevel::Gradient { .. })
{
err = Some("samplerCubeArrayShadow don't support textureGrad");
}
}
if gather.is_some() && level != crate::SampleLevel::Zero {
err = Some("textureGather doesn't support LOD parameters");
}
if let Some(err) = err {
return Err(Error::Custom(String::from(err)));
}
// textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL.
// To emulate this, we will have to use textureGrad with a constant gradient of 0.
let workaround_lod_array_shadow_as_grad = (array_index.is_some()
|| dim == crate::ImageDimension::Cube)
&& depth_ref.is_some()
&& gather.is_none()
&& !self
.options
.writer_flags
.contains(WriterFlags::TEXTURE_SHADOW_LOD);
// `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
// unless `GL_EXT_texture_shadow_lod` is present.
// But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
|| (dim == crate::ImageDimension::D2 && arrayed))
&& level == crate::SampleLevel::Zero
&& matches!(class, crate::ImageClass::Depth { .. })
&& !self.features.contains(Features::TEXTURE_SHADOW_LOD);
//Write the function to be used depending on the sample level
// Write the function to be used depending on the sample level
let fun_name = match level {
crate::SampleLevel::Zero if gather.is_some() => "textureGather",
crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => {
if workaround_lod_array_shadow_as_grad {
"textureGrad"
} else {
"textureLod"
}
}
crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
crate::SampleLevel::Gradient { .. } => "textureGrad",
};
let offset_name = match offset {
@ -2727,7 +2715,7 @@ impl<'a, W: Write> Writer<'a, W> {
crate::SampleLevel::Auto => (),
// Zero needs level set to 0
crate::SampleLevel::Zero => {
if workaround_lod_array_shadow_as_grad {
if workaround_lod_with_grad {
let vec_dim = match dim {
crate::ImageDimension::Cube => 3,
_ => 2,
@ -2739,13 +2727,8 @@ impl<'a, W: Write> Writer<'a, W> {
}
// Exact and bias require another argument
crate::SampleLevel::Exact(expr) => {
if workaround_lod_array_shadow_as_grad {
log::warn!("Unable to `textureLod` a shadow array, ignoring the LOD");
write!(self.out, ", vec2(0,0), vec2(0,0)")?;
} else {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
crate::SampleLevel::Bias(_) => {
// This needs to be done after the offset writing
@ -3155,7 +3138,29 @@ impl<'a, W: Write> Writer<'a, W> {
Mf::Abs => "abs",
Mf::Min => "min",
Mf::Max => "max",
Mf::Clamp => "clamp",
Mf::Clamp => {
let scalar_kind = ctx
.resolve_type(arg, &self.module.types)
.scalar_kind()
.unwrap();
match scalar_kind {
crate::ScalarKind::Float => "clamp",
// Clamp is undefined if min > max. In practice this means it can use a median-of-three
// instruction to determine the value. This is fine according to the WGSL spec for float
// clamp, but integer clamp _must_ use min-max. As such we write out min/max.
_ => {
write!(self.out, "min(max(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ", ")?;
self.write_expr(arg1.unwrap(), ctx)?;
write!(self.out, "), ")?;
self.write_expr(arg2.unwrap(), ctx)?;
write!(self.out, ")")?;
return Ok(());
}
}
}
Mf::Saturate => {
write!(self.out, "clamp(")?;

View file

@ -731,12 +731,41 @@ impl<'w> BlockContext<'w> {
Some(crate::ScalarKind::Uint) => spirv::GLOp::UMax,
other => unimplemented!("Unexpected max({:?})", other),
}),
Mf::Clamp => MathOp::Ext(match arg_scalar_kind {
Some(crate::ScalarKind::Float) => spirv::GLOp::FClamp,
Some(crate::ScalarKind::Sint) => spirv::GLOp::SClamp,
Some(crate::ScalarKind::Uint) => spirv::GLOp::UClamp,
Mf::Clamp => match arg_scalar_kind {
// Clamp is undefined if min > max. In practice this means it can use a median-of-three
// instruction to determine the value. This is fine according to the WGSL spec for float
// clamp, but integer clamp _must_ use min-max. As such we write out min/max.
Some(crate::ScalarKind::Float) => MathOp::Ext(spirv::GLOp::FClamp),
Some(_) => {
let (min_op, max_op) = match arg_scalar_kind {
Some(crate::ScalarKind::Sint) => {
(spirv::GLOp::SMin, spirv::GLOp::SMax)
}
Some(crate::ScalarKind::Uint) => {
(spirv::GLOp::UMin, spirv::GLOp::UMax)
}
_ => unreachable!(),
};
let max_id = self.gen_id();
block.body.push(Instruction::ext_inst(
self.writer.gl450_ext_inst_id,
max_op,
result_type_id,
max_id,
&[arg0_id, arg1_id],
));
MathOp::Custom(Instruction::ext_inst(
self.writer.gl450_ext_inst_id,
min_op,
result_type_id,
id,
&[max_id, arg2_id],
))
}
other => unimplemented!("Unexpected max({:?})", other),
}),
},
Mf::Saturate => {
let (maybe_size, scalar) = match *arg_ty {
crate::TypeInner::Vector { size, scalar } => (Some(size), scalar),

View file

@ -593,6 +593,7 @@ impl<W: Write> Writer<W> {
}
write!(self.out, ">")?;
}
TypeInner::AccelerationStructure => write!(self.out, "acceleration_structure")?,
_ => {
return Err(Error::Unimplemented(format!("write_value_type {inner:?}")));
}

View file

@ -292,278 +292,286 @@ impl<I: Iterator<Item = u32>> super::Frontend<I> {
);
if let Some(ep) = self.lookup_entry_point.remove(&fun_id) {
// create a wrapping function
let mut function = crate::Function {
name: Some(format!("{}_wrap", ep.name)),
self.deferred_entry_points.push((ep, fun_id));
}
Ok(())
}
pub(super) fn process_entry_point(
&mut self,
module: &mut crate::Module,
ep: super::EntryPoint,
fun_id: u32,
) -> Result<(), Error> {
// create a wrapping function
let mut function = crate::Function {
name: Some(format!("{}_wrap", ep.name)),
arguments: Vec::new(),
result: None,
local_variables: Arena::new(),
expressions: Arena::new(),
named_expressions: crate::NamedExpressions::default(),
body: crate::Block::new(),
};
// 1. copy the inputs from arguments to privates
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Input(ref arg) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let arg_expr = function.expressions.append(
crate::Expression::FunctionArgument(function.arguments.len() as u32),
span,
);
let load_expr = if arg.ty == module.global_variables[lvar.handle].ty {
arg_expr
} else {
// The only case where the type is different is if we need to treat
// unsigned integer as signed.
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
let handle = function.expressions.append(
crate::Expression::As {
expr: arg_expr,
kind: crate::ScalarKind::Sint,
convert: Some(4),
},
span,
);
function.body.extend(emitter.finish(&function.expressions));
handle
};
function.body.push(
crate::Statement::Store {
pointer: function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span),
value: load_expr,
},
span,
);
let mut arg = arg.clone();
if ep.stage == crate::ShaderStage::Fragment {
if let Some(ref mut binding) = arg.binding {
binding.apply_default_interpolation(&module.types[arg.ty].inner);
}
}
function.arguments.push(arg);
}
}
// 2. call the wrapped function
let fake_id = !(module.entry_points.len() as u32); // doesn't matter, as long as it's not a collision
let dummy_handle = self.add_call(fake_id, fun_id);
function.body.push(
crate::Statement::Call {
function: dummy_handle,
arguments: Vec::new(),
result: None,
local_variables: Arena::new(),
expressions: Arena::new(),
named_expressions: crate::NamedExpressions::default(),
body: crate::Block::new(),
};
},
crate::Span::default(),
);
// 1. copy the inputs from arguments to privates
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Input(ref arg) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let arg_expr = function.expressions.append(
crate::Expression::FunctionArgument(function.arguments.len() as u32),
// 3. copy the outputs from privates to the result
let mut members = Vec::new();
let mut components = Vec::new();
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Output(ref result) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let expr_handle = function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span);
// Cull problematic builtins of gl_PerVertex.
// See the docs for `Frontend::gl_per_vertex_builtin_access`.
{
let ty = &module.types[result.ty];
if let crate::TypeInner::Struct {
members: ref original_members,
span,
} = ty.inner
{
let mut new_members = None;
for (idx, member) in original_members.iter().enumerate() {
if let Some(crate::Binding::BuiltIn(built_in)) = member.binding {
if !self.gl_per_vertex_builtin_access.contains(&built_in) {
new_members.get_or_insert_with(|| original_members.clone())
[idx]
.binding = None;
}
}
}
if let Some(new_members) = new_members {
module.types.replace(
result.ty,
crate::Type {
name: ty.name.clone(),
inner: crate::TypeInner::Struct {
members: new_members,
span,
},
},
);
}
}
}
match module.types[result.ty].inner {
crate::TypeInner::Struct {
members: ref sub_members,
..
} => {
for (index, sm) in sub_members.iter().enumerate() {
if sm.binding.is_none() {
continue;
}
let mut sm = sm.clone();
if let Some(ref mut binding) = sm.binding {
if ep.stage == crate::ShaderStage::Vertex {
binding.apply_default_interpolation(&module.types[sm.ty].inner);
}
}
members.push(sm);
components.push(function.expressions.append(
crate::Expression::AccessIndex {
base: expr_handle,
index: index as u32,
},
span,
));
}
}
ref inner => {
let mut binding = result.binding.clone();
if let Some(ref mut binding) = binding {
if ep.stage == crate::ShaderStage::Vertex {
binding.apply_default_interpolation(inner);
}
}
members.push(crate::StructMember {
name: None,
ty: result.ty,
binding,
offset: 0,
});
// populate just the globals first, then do `Load` in a
// separate step, so that we can get a range.
components.push(expr_handle);
}
}
}
}
for (member_index, member) in members.iter().enumerate() {
match member.binding {
Some(crate::Binding::BuiltIn(crate::BuiltIn::Position { .. }))
if self.options.adjust_coordinate_space =>
{
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
let global_expr = components[member_index];
let span = function.expressions.get_span(global_expr);
let access_expr = function.expressions.append(
crate::Expression::AccessIndex {
base: global_expr,
index: 1,
},
span,
);
let load_expr = if arg.ty == module.global_variables[lvar.handle].ty {
arg_expr
} else {
// The only case where the type is different is if we need to treat
// unsigned integer as signed.
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
let handle = function.expressions.append(
crate::Expression::As {
expr: arg_expr,
kind: crate::ScalarKind::Sint,
convert: Some(4),
},
span,
);
function.body.extend(emitter.finish(&function.expressions));
handle
};
let load_expr = function.expressions.append(
crate::Expression::Load {
pointer: access_expr,
},
span,
);
let neg_expr = function.expressions.append(
crate::Expression::Unary {
op: crate::UnaryOperator::Negate,
expr: load_expr,
},
span,
);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
crate::Statement::Store {
pointer: function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span),
value: load_expr,
pointer: access_expr,
value: neg_expr,
},
span,
);
let mut arg = arg.clone();
if ep.stage == crate::ShaderStage::Fragment {
if let Some(ref mut binding) = arg.binding {
binding.apply_default_interpolation(&module.types[arg.ty].inner);
}
}
function.arguments.push(arg);
}
_ => {}
}
// 2. call the wrapped function
let fake_id = !(module.entry_points.len() as u32); // doesn't matter, as long as it's not a collision
let dummy_handle = self.add_call(fake_id, fun_id);
function.body.push(
crate::Statement::Call {
function: dummy_handle,
arguments: Vec::new(),
result: None,
},
crate::Span::default(),
);
// 3. copy the outputs from privates to the result
let mut members = Vec::new();
let mut components = Vec::new();
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Output(ref result) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let expr_handle = function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span);
// Cull problematic builtins of gl_PerVertex.
// See the docs for `Frontend::gl_per_vertex_builtin_access`.
{
let ty = &module.types[result.ty];
match ty.inner {
crate::TypeInner::Struct {
members: ref original_members,
span,
} if ty.name.as_deref() == Some("gl_PerVertex") => {
let mut new_members = original_members.clone();
for member in &mut new_members {
if let Some(crate::Binding::BuiltIn(built_in)) = member.binding
{
if !self.gl_per_vertex_builtin_access.contains(&built_in) {
member.binding = None
}
}
}
if &new_members != original_members {
module.types.replace(
result.ty,
crate::Type {
name: ty.name.clone(),
inner: crate::TypeInner::Struct {
members: new_members,
span,
},
},
);
}
}
_ => {}
}
}
match module.types[result.ty].inner {
crate::TypeInner::Struct {
members: ref sub_members,
..
} => {
for (index, sm) in sub_members.iter().enumerate() {
if sm.binding.is_none() {
continue;
}
let mut sm = sm.clone();
if let Some(ref mut binding) = sm.binding {
if ep.stage == crate::ShaderStage::Vertex {
binding.apply_default_interpolation(
&module.types[sm.ty].inner,
);
}
}
members.push(sm);
components.push(function.expressions.append(
crate::Expression::AccessIndex {
base: expr_handle,
index: index as u32,
},
span,
));
}
}
ref inner => {
let mut binding = result.binding.clone();
if let Some(ref mut binding) = binding {
if ep.stage == crate::ShaderStage::Vertex {
binding.apply_default_interpolation(inner);
}
}
members.push(crate::StructMember {
name: None,
ty: result.ty,
binding,
offset: 0,
});
// populate just the globals first, then do `Load` in a
// separate step, so that we can get a range.
components.push(expr_handle);
}
}
}
}
for (member_index, member) in members.iter().enumerate() {
match member.binding {
Some(crate::Binding::BuiltIn(crate::BuiltIn::Position { .. }))
if self.options.adjust_coordinate_space =>
{
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
let global_expr = components[member_index];
let span = function.expressions.get_span(global_expr);
let access_expr = function.expressions.append(
crate::Expression::AccessIndex {
base: global_expr,
index: 1,
},
span,
);
let load_expr = function.expressions.append(
crate::Expression::Load {
pointer: access_expr,
},
span,
);
let neg_expr = function.expressions.append(
crate::Expression::Unary {
op: crate::UnaryOperator::Negate,
expr: load_expr,
},
span,
);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
crate::Statement::Store {
pointer: access_expr,
value: neg_expr,
},
span,
);
}
_ => {}
}
}
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
for component in components.iter_mut() {
let load_expr = crate::Expression::Load {
pointer: *component,
};
let span = function.expressions.get_span(*component);
*component = function.expressions.append(load_expr, span);
}
match members[..] {
[] => {}
[ref member] => {
function.body.extend(emitter.finish(&function.expressions));
let span = function.expressions.get_span(components[0]);
function.body.push(
crate::Statement::Return {
value: components.first().cloned(),
},
span,
);
function.result = Some(crate::FunctionResult {
ty: member.ty,
binding: member.binding.clone(),
});
}
_ => {
let span = crate::Span::total_span(
components.iter().map(|h| function.expressions.get_span(*h)),
);
let ty = module.types.insert(
crate::Type {
name: None,
inner: crate::TypeInner::Struct {
members,
span: 0xFFFF, // shouldn't matter
},
},
span,
);
let result_expr = function
.expressions
.append(crate::Expression::Compose { ty, components }, span);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
crate::Statement::Return {
value: Some(result_expr),
},
span,
);
function.result = Some(crate::FunctionResult { ty, binding: None });
}
}
module.entry_points.push(crate::EntryPoint {
name: ep.name,
stage: ep.stage,
early_depth_test: ep.early_depth_test,
workgroup_size: ep.workgroup_size,
function,
});
}
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
for component in components.iter_mut() {
let load_expr = crate::Expression::Load {
pointer: *component,
};
let span = function.expressions.get_span(*component);
*component = function.expressions.append(load_expr, span);
}
match members[..] {
[] => {}
[ref member] => {
function.body.extend(emitter.finish(&function.expressions));
let span = function.expressions.get_span(components[0]);
function.body.push(
crate::Statement::Return {
value: components.first().cloned(),
},
span,
);
function.result = Some(crate::FunctionResult {
ty: member.ty,
binding: member.binding.clone(),
});
}
_ => {
let span = crate::Span::total_span(
components.iter().map(|h| function.expressions.get_span(*h)),
);
let ty = module.types.insert(
crate::Type {
name: None,
inner: crate::TypeInner::Struct {
members,
span: 0xFFFF, // shouldn't matter
},
},
span,
);
let result_expr = function
.expressions
.append(crate::Expression::Compose { ty, components }, span);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
crate::Statement::Return {
value: Some(result_expr),
},
span,
);
function.result = Some(crate::FunctionResult { ty, binding: None });
}
}
module.entry_points.push(crate::EntryPoint {
name: ep.name,
stage: ep.stage,
early_depth_test: ep.early_depth_test,
workgroup_size: ep.workgroup_size,
function,
});
Ok(())
}
}

View file

@ -577,6 +577,9 @@ pub struct Frontend<I> {
lookup_function_type: FastHashMap<spirv::Word, LookupFunctionType>,
lookup_function: FastHashMap<spirv::Word, LookupFunction>,
lookup_entry_point: FastHashMap<spirv::Word, EntryPoint>,
// When parsing functions, each entry point function gets an entry here so that additional
// processing for them can be performed after all function parsing.
deferred_entry_points: Vec<(EntryPoint, spirv::Word)>,
//Note: each `OpFunctionCall` gets a single entry here, indexed by the
// dummy `Handle<crate::Function>` of the call site.
deferred_function_calls: Vec<spirv::Word>,
@ -628,6 +631,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
lookup_function_type: FastHashMap::default(),
lookup_function: FastHashMap::default(),
lookup_entry_point: FastHashMap::default(),
deferred_entry_points: Vec::default(),
deferred_function_calls: Vec::default(),
dummy_functions: Arena::new(),
function_call_graph: GraphMap::new(),
@ -1561,12 +1565,10 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
span,
);
if ty.name.as_deref() == Some("gl_PerVertex") {
if let Some(crate::Binding::BuiltIn(built_in)) =
members[index as usize].binding
{
self.gl_per_vertex_builtin_access.insert(built_in);
}
if let Some(crate::Binding::BuiltIn(built_in)) =
members[index as usize].binding
{
self.gl_per_vertex_builtin_access.insert(built_in);
}
AccessExpression {
@ -3956,6 +3958,12 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
}?;
}
// Do entry point specific processing after all functions are parsed so that we can
// cull unused problematic builtins of gl_PerVertex.
for (ep, fun_id) in core::mem::take(&mut self.deferred_entry_points) {
self.process_entry_point(&mut module, ep, fun_id)?;
}
log::info!("Patching...");
{
let mut nodes = petgraph::algo::toposort(&self.function_call_graph, None)
@ -5081,7 +5089,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
None
};
let span = self.span_from_with_op(start);
let mut dec = self.future_decor.remove(&id).unwrap_or_default();
let dec = self.future_decor.remove(&id).unwrap_or_default();
let original_ty = self.lookup_type.lookup(type_id)?.handle;
let mut ty = original_ty;
@ -5127,17 +5135,6 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
None => map_storage_class(storage_class)?,
};
// Fix empty name for gl_PerVertex struct generated by glslang
if let crate::TypeInner::Pointer { .. } = module.types[original_ty].inner {
if ext_class == ExtendedClass::Input || ext_class == ExtendedClass::Output {
if let Some(ref dec_name) = dec.name {
if dec_name.is_empty() {
dec.name = Some("perVertexStruct".to_string())
}
}
}
}
let (inner, var) = match ext_class {
ExtendedClass::Global(mut space) => {
if let crate::AddressSpace::Storage { ref mut access } = space {

View file

@ -107,6 +107,12 @@ pub enum TypeError {
MatrixElementNotFloat,
#[error("The constant {0:?} is specialized, and cannot be used as an array size")]
UnsupportedSpecializedArrayLength(Handle<crate::Constant>),
#[error("{} of dimensionality {dim:?} and class {class:?} are not supported", if *.arrayed {"Arrayed images"} else {"Images"})]
UnsupportedImageType {
dim: crate::ImageDimension,
arrayed: bool,
class: crate::ImageClass,
},
#[error("Array stride {stride} does not match the expected {expected}")]
InvalidArrayStride { stride: u32, expected: u32 },
#[error("Field '{0}' can't be dynamically-sized, has type {1:?}")]
@ -596,8 +602,15 @@ impl super::Validator {
Ti::Image {
dim,
arrayed,
class: _,
class,
} => {
if arrayed && matches!(dim, crate::ImageDimension::D3) {
return Err(TypeError::UnsupportedImageType {
dim,
arrayed,
class,
});
}
if arrayed && matches!(dim, crate::ImageDimension::Cube) {
self.require_type_capability(Capabilities::CUBE_ARRAY_TEXTURES)?;
}

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"92c0bcfb5bf68fb55acb6e7b826ec07c1cfdd6d53b057c16a5c698e044ea228e","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"2bd4e4a36742ccf0cab0afa039411a791e2a6e9ea3909d0b85cc9a84cc151c6b","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"91513a3be0adf46a9f3454b6a3d00ff6686729eb91fe9dd6d732cbfa1ff6d1d8","src/command/clear.rs":"b20e93c4b8cb47062b38e472f78d28d9ec00fd1169b17a87094be7f9d1c995e1","src/command/compute.rs":"eb60f0e2842dd20b366905225af24f4ca2a1b0c67914b86009c5b870b26f747f","src/command/draw.rs":"e8a664fc248e273e8c0e4aaeb645010b3f4ec61d29d858137f31f6f653c86542","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"d6a66a5796bd824be72af2c8d3ece59a507090c61cb50e9856eb4c70a28945e2","src/command/query.rs":"dffc843746b10ba9a50b8a2b92a59b407b56a845619a96d72a5883588fcb50f0","src/command/render.rs":"c3783b4f19b4eafb33f94554aea69408d42e40b5e98da22aa804a0931430ea6f","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"7d70a45bd39e251c6945fc475883c4e69632f92a7abe263adab6e47a248de5a4","src/device/life.rs":"cd12343d5a14d82b18b787991811b36f420719776336f8a65b45c32fd47a77d4","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"2ffc477d1bebb35a1fc8e46f4ca2c5ef50a4eb6034968f076062461b2e678699","src/device/resource.rs":"4f22cf27da8d829b624877d7d3bb10971a0e8fb7c4f95d85d5011049a010684a","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"c736c0b3d35cf620e2c01322d57c4938b42828b39948ecad82d39fc39c1093c1","src/identity.rs":"c6a719389d71bb11c9ceaeadb0496f8b4c6ad24e35597e12b40980ad7ad72f10","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"c9b5b53a0aeac8e117d49a3a007fab001cd5737e29dd75388cdbfc24f3d8df08","src/lib.rs":"49174591f8116c3b8fadb185f89ce69ae931ee6e9f639d2558848db82ea1651f","src/pipeline.rs":"300f58afc16c454ce52aabff6debd7a7db85ed627b111a8801bcb201827f110c","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"86b1e8bd7314f77f083be6d89a2f734e92f2ed11c86eb4c912c754fcdaa2e597","src/registry.rs":"dbc9310a24a843cf6b94a4bab78b0bb5f325e18c1f3c19c94d4f12b4f29e8598","src/resource.rs":"cd568c9d1abd4bf740cb86efae7862b5478518f3b1cdaf792ae05b3c0920c8e0","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"65c27dfabe7a1c3e4ddbde7189e53b2e95f3f3663aa82b121801a2fd0dcbd304","src/track/metadata.rs":"ac82a9c69b0a141b5c3ca69b203c5aa2a17578b598cab3ae156b917cef734b97","src/track/mod.rs":"8f03955447544f3ebcb48547440a48d321ad1ff0e0c601a62623b5457763b8de","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"2da10160c46d07ad15986ba6f1356b7933806fc5c3fa5a9d8deea44d9a3c93a7","src/track/texture.rs":"15892e639f2ecbb13c8d34c29e3fd6ad719cb71e2d40c64910b552b8985ddab0","src/validation.rs":"613c58c3601f36d6aa5986cea01f30497c6bd4ceb990824904d101b2327941a9"},"package":null}
{"files":{"Cargo.toml":"4880d66b004519ca6e424fc9e2e6ac065536d36334a2e327b90422e97f2a2a35","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"bb4aefad17957e770a5f70f00bf5853dc13da1d9f836493c9aa9adbbe7bb8147","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"3857e572cbf1512acb725d734001857ae2a15cce5a6a00d66d835081a11ad3cf","src/command/clear.rs":"03cfc0d4c689d56010391440ab279e615ef1d3235eb1f9f9df0323682d275109","src/command/compute.rs":"2b6beed328ed351ad6fe7088cfa1824c1bf4be50deaeab971cdcb09914d791de","src/command/draw.rs":"15f9ad857504d8098279f9c789317feba321c9b6b8f0de20b8ba98f358c99d89","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"1d347e1746194f7a07d1f75bd3a9d3cbe121fbaa479c25ba6b8c16e9d699e06b","src/command/query.rs":"43b78a163eb0eb5f1427b7a57b6d39a2748c25f880ba024c91e2f71e2a6a817d","src/command/render.rs":"808dc8106811b32877637851e63baeba7c7438748dec67cbb17ea93c58dc61bd","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"bec78c0295dc57fdf92e26aed7634e9bf4f9c733f0781bf073f05f53908f0779","src/device/life.rs":"3cacaaa74df04bb1285a36d70395b35cfa17059f8d6289b41e665ecbc64cb66a","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"415c7ac0f0555a5fa0a9b977436ccfe47f2ce892529f7bb4774eeb93702bc62c","src/device/resource.rs":"4185fdb69f140c2b7d45bb06150aa66f8b94a2cf9aa2339471058a26263aacf5","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"c736c0b3d35cf620e2c01322d57c4938b42828b39948ecad82d39fc39c1093c1","src/identity.rs":"12b820eb4b8bd7b226e15eec97d0f100a695f6b9be7acd79ad2421f2d0fe1985","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"b6de2a371ef3b43d3217102fe87e423dd1eb12da86b65f54b902d9eaa38b6b9f","src/lib.rs":"4ad9979442cf88557fb3b9f8d3b26c7b929a710c60cabcd1f51788917c95aecb","src/pipeline.rs":"300f58afc16c454ce52aabff6debd7a7db85ed627b111a8801bcb201827f110c","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"411d3db5ce4190ce139f2271909a8e4ed45aec5573eee4c080e663e623fc54da","src/registry.rs":"913e651dc585ff12fe7659443c38d635a2904881e56cb7159c5ca72d45ae5800","src/resource.rs":"59731bc9a207d87b07b6db9c897e20d64be27c144bb8eb8ab2505807163acfc4","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"83a0cbb8026dbd651d32ea5a47f332f691afed1c5e6f14e78a4fe8aa25e2ad12","src/track/metadata.rs":"655985fdfdd1c7fe8220af98abadf33de7e8920b485e3dd27c28688c3dd2e47d","src/track/mod.rs":"52470a48de6b5dce55385e23ba7a3cbf512cc10cdf431a35aa42190e2fc4306d","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"305e0a493fb1cd0a325274c0757e99c19f9d14deaa8ca11ada41c1399a4ae5c4","src/track/texture.rs":"ba3e3814b341b5242548b55d77bef1d1d9e7d52d63784be98c51e342da7fefff","src/validation.rs":"a82ef120ac62a14b7aaed47062d8fb2d85be41c34ab79ad54889ef56bbd5f0d3"},"package":null}

View file

@ -41,6 +41,7 @@ arrayvec = "0.7"
bit-vec = "0.6"
bitflags = "2"
codespan-reporting = "0.11"
document-features = "0.2.8"
indexmap = "2"
log = "0.4"
once_cell = "1"

View file

@ -38,6 +38,8 @@ pub enum BindGroupLayoutEntryError {
ArrayUnsupported,
#[error("Multisampled binding with sample type `TextureSampleType::Float` must have filterable set to false.")]
SampleTypeFloatFilterableBindingMultisampled,
#[error("Multisampled texture binding view dimension must be 2d, got {0:?}")]
Non2DMultisampled(wgt::TextureViewDimension),
#[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
@ -219,7 +221,7 @@ pub enum BindingZone {
}
#[derive(Clone, Debug, Error)]
#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}")]
#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}. Check the limit `{}` passed to `Adapter::request_device`", .kind.to_config_str())]
pub struct BindingTypeMaxCountError {
pub kind: BindingTypeMaxCountErrorKind,
pub zone: BindingZone,
@ -238,6 +240,28 @@ pub enum BindingTypeMaxCountErrorKind {
UniformBuffers,
}
impl BindingTypeMaxCountErrorKind {
fn to_config_str(&self) -> &'static str {
match self {
BindingTypeMaxCountErrorKind::DynamicUniformBuffers => {
"max_dynamic_uniform_buffers_per_pipeline_layout"
}
BindingTypeMaxCountErrorKind::DynamicStorageBuffers => {
"max_dynamic_storage_buffers_per_pipeline_layout"
}
BindingTypeMaxCountErrorKind::SampledTextures => {
"max_sampled_textures_per_shader_stage"
}
BindingTypeMaxCountErrorKind::Samplers => "max_samplers_per_shader_stage",
BindingTypeMaxCountErrorKind::StorageBuffers => "max_storage_buffers_per_shader_stage",
BindingTypeMaxCountErrorKind::StorageTextures => {
"max_storage_textures_per_shader_stage"
}
BindingTypeMaxCountErrorKind::UniformBuffers => "max_uniform_buffers_per_shader_stage",
}
}
}
#[derive(Debug, Default)]
pub(crate) struct PerStageBindingTypeCounter {
vertex: u32,

View file

@ -97,7 +97,7 @@ use crate::{
id,
init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction},
pipeline::{PipelineFlags, RenderPipeline, VertexStep},
resource::{Resource, ResourceInfo, ResourceType},
resource::{Buffer, Resource, ResourceInfo, ResourceType},
resource_log,
track::RenderBundleScope,
validation::check_buffer_usage,
@ -110,9 +110,11 @@ use thiserror::Error;
use hal::CommandEncoder as _;
use super::ArcRenderCommand;
/// https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-draw
fn validate_draw(
vertex: &[Option<VertexState>],
fn validate_draw<A: HalApi>(
vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
first_vertex: u32,
vertex_count: u32,
@ -152,10 +154,10 @@ fn validate_draw(
}
// See https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-drawindexed
fn validate_indexed_draw(
vertex: &[Option<VertexState>],
fn validate_indexed_draw<A: HalApi>(
vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
index_state: &IndexState,
index_state: &IndexState<A>,
first_index: u32,
index_count: u32,
first_instance: u32,
@ -260,6 +262,9 @@ impl RenderBundleEncoder {
None => (true, true),
};
// TODO: should be device.limits.max_color_attachments
let max_color_attachments = hal::MAX_COLOR_ATTACHMENTS;
//TODO: validate that attachment formats are renderable,
// have expected aspects, support multisampling.
Ok(Self {
@ -267,11 +272,11 @@ impl RenderBundleEncoder {
parent_id,
context: RenderPassContext {
attachments: AttachmentData {
colors: if desc.color_formats.len() > hal::MAX_COLOR_ATTACHMENTS {
colors: if desc.color_formats.len() > max_color_attachments {
return Err(CreateRenderBundleError::ColorAttachment(
ColorAttachmentError::TooMany {
given: desc.color_formats.len(),
limit: hal::MAX_COLOR_ATTACHMENTS,
limit: max_color_attachments,
},
));
} else {
@ -345,24 +350,44 @@ impl RenderBundleEncoder {
) -> Result<RenderBundle<A>, RenderBundleError> {
let bind_group_guard = hub.bind_groups.read();
let pipeline_guard = hub.render_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
let texture_guard = hub.textures.read();
let mut state = State {
trackers: RenderBundleScope::new(
&*buffer_guard,
&*texture_guard,
&*bind_group_guard,
&*pipeline_guard,
&*query_set_guard,
),
trackers: RenderBundleScope::new(),
pipeline: None,
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
vertex: (0..hal::MAX_VERTEX_BUFFERS).map(|_| None).collect(),
index: None,
flat_dynamic_offsets: Vec::new(),
};
let indices = &device.tracker_indices;
state
.trackers
.buffers
.write()
.set_size(indices.buffers.size());
state
.trackers
.textures
.write()
.set_size(indices.textures.size());
state
.trackers
.bind_groups
.write()
.set_size(indices.bind_groups.size());
state
.trackers
.render_pipelines
.write()
.set_size(indices.render_pipelines.size());
state
.trackers
.query_sets
.write()
.set_size(indices.query_sets.size());
let mut commands = Vec::new();
let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new();
@ -471,7 +496,7 @@ impl RenderBundleEncoder {
let pipeline_state = PipelineState::new(pipeline);
commands.push(command);
commands.push(ArcRenderCommand::SetPipeline(pipeline.clone()));
// If this pipeline uses push constants, zero out their values.
if let Some(iter) = pipeline_state.zero_push_constants() {
@ -496,7 +521,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX)
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDEX)
.map_pass_err(scope)?;
let end = match size {
@ -508,7 +533,7 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
state.set_index_buffer(buffer_id, index_format, offset..end);
state.set_index_buffer(buffer.clone(), index_format, offset..end);
}
RenderCommand::SetVertexBuffer {
slot,
@ -535,7 +560,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX)
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::VERTEX)
.map_pass_err(scope)?;
let end = match size {
@ -547,13 +572,13 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
state.vertex[slot as usize] = Some(VertexState::new(buffer_id, offset..end));
state.vertex[slot as usize] = Some(VertexState::new(buffer.clone(), offset..end));
}
RenderCommand::SetPushConstant {
stages,
offset,
size_bytes,
values_offset: _,
values_offset,
} => {
let scope = PassErrorScope::SetPushConstant;
let end_offset = offset + size_bytes;
@ -564,7 +589,7 @@ impl RenderBundleEncoder {
.validate_push_constant_ranges(stages, offset, end_offset)
.map_pass_err(scope)?;
commands.push(command);
commands.push(ArcRenderCommand::SetPushConstant { stages, offset, size_bytes, values_offset });
}
RenderCommand::Draw {
vertex_count,
@ -592,14 +617,19 @@ impl RenderBundleEncoder {
if instance_count > 0 && vertex_count > 0 {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command);
commands.push(ArcRenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
});
}
}
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex: _,
base_vertex,
first_instance,
} => {
let scope = PassErrorScope::Draw {
@ -628,7 +658,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_index());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command);
commands.push(ArcRenderCommand::DrawIndexed { index_count, instance_count, first_index, base_vertex, first_instance });
}
}
RenderCommand::MultiDrawIndirect {
@ -657,7 +687,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@ -668,7 +698,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command);
commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: false });
}
RenderCommand::MultiDrawIndirect {
buffer_id,
@ -696,7 +726,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@ -713,7 +743,7 @@ impl RenderBundleEncoder {
commands.extend(index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.push(command);
commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: true });
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
@ -748,7 +778,10 @@ impl RenderBundleEncoder {
buffer_memory_init_actions,
texture_memory_init_actions,
context: self.context,
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(device.tracker_indices.bundles.clone()),
),
discard_hal_labels: device
.instance_flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS),
@ -824,7 +857,7 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
pub struct RenderBundle<A: HalApi> {
// Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change.
base: BasePass<RenderCommand>,
base: BasePass<ArcRenderCommand<A>>,
pub(super) is_depth_read_only: bool,
pub(super) is_stencil_read_only: bool,
pub(crate) device: Arc<Device<A>>,
@ -863,7 +896,6 @@ impl<A: HalApi> RenderBundle<A> {
/// All the validation has already been done by this point.
/// The only failure condition is if some of the used buffers are destroyed.
pub(super) unsafe fn execute(&self, raw: &mut A::CommandEncoder) -> Result<(), ExecutionError> {
let trackers = &self.used;
let mut offsets = self.base.dynamic_offsets.as_slice();
let mut pipeline_layout = None::<Arc<PipelineLayout<A>>>;
if !self.discard_hal_labels {
@ -874,74 +906,65 @@ impl<A: HalApi> RenderBundle<A> {
let snatch_guard = self.device.snatchable_lock.read();
use ArcRenderCommand as Cmd;
for command in self.base.commands.iter() {
match *command {
RenderCommand::SetBindGroup {
match command {
Cmd::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
bind_group,
} => {
let bind_groups = trackers.bind_groups.read();
let bind_group = bind_groups.get(bind_group_id).unwrap();
let raw_bg = bind_group
.raw(&snatch_guard)
.ok_or(ExecutionError::InvalidBindGroup(bind_group_id))?;
.ok_or(ExecutionError::InvalidBindGroup(bind_group.info.id()))?;
unsafe {
raw.set_bind_group(
pipeline_layout.as_ref().unwrap().raw(),
index,
*index,
raw_bg,
&offsets[..num_dynamic_offsets],
&offsets[..*num_dynamic_offsets],
)
};
offsets = &offsets[num_dynamic_offsets..];
offsets = &offsets[*num_dynamic_offsets..];
}
RenderCommand::SetPipeline(pipeline_id) => {
let render_pipelines = trackers.render_pipelines.read();
let pipeline = render_pipelines.get(pipeline_id).unwrap();
Cmd::SetPipeline(pipeline) => {
unsafe { raw.set_render_pipeline(pipeline.raw()) };
pipeline_layout = Some(pipeline.layout.clone());
}
RenderCommand::SetIndexBuffer {
buffer_id,
Cmd::SetIndexBuffer {
buffer,
index_format,
offset,
size,
} => {
let buffers = trackers.buffers.read();
let buffer: &A::Buffer = buffers
.get(buffer_id)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
let buffer: &A::Buffer = buffer
.raw(&snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
offset,
size,
offset: *offset,
size: *size,
};
unsafe { raw.set_index_buffer(bb, index_format) };
unsafe { raw.set_index_buffer(bb, *index_format) };
}
RenderCommand::SetVertexBuffer {
Cmd::SetVertexBuffer {
slot,
buffer_id,
buffer,
offset,
size,
} => {
let buffers = trackers.buffers.read();
let buffer = buffers
.get(buffer_id)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
let buffer = buffer
.raw(&snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
offset,
size,
offset: *offset,
size: *size,
};
unsafe { raw.set_vertex_buffer(slot, bb) };
unsafe { raw.set_vertex_buffer(*slot, bb) };
}
RenderCommand::SetPushConstant {
Cmd::SetPushConstant {
stages,
offset,
size_bytes,
@ -949,7 +972,7 @@ impl<A: HalApi> RenderBundle<A> {
} => {
let pipeline_layout = pipeline_layout.as_ref().unwrap();
if let Some(values_offset) = values_offset {
if let Some(values_offset) = *values_offset {
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice = &self.base.push_constant_data
@ -958,20 +981,20 @@ impl<A: HalApi> RenderBundle<A> {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
stages,
offset,
*stages,
*offset,
data_slice,
)
}
} else {
super::push_constant_clear(
offset,
size_bytes,
*offset,
*size_bytes,
|clear_offset, clear_data| {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
stages,
*stages,
clear_offset,
clear_data,
)
@ -980,15 +1003,22 @@ impl<A: HalApi> RenderBundle<A> {
);
}
}
RenderCommand::Draw {
Cmd::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
unsafe { raw.draw(first_vertex, vertex_count, first_instance, instance_count) };
unsafe {
raw.draw(
*first_vertex,
*vertex_count,
*first_instance,
*instance_count,
)
};
}
RenderCommand::DrawIndexed {
Cmd::DrawIndexed {
index_count,
instance_count,
first_index,
@ -997,63 +1027,54 @@ impl<A: HalApi> RenderBundle<A> {
} => {
unsafe {
raw.draw_indexed(
first_index,
index_count,
base_vertex,
first_instance,
instance_count,
*first_index,
*index_count,
*base_vertex,
*first_instance,
*instance_count,
)
};
}
RenderCommand::MultiDrawIndirect {
buffer_id,
Cmd::MultiDrawIndirect {
buffer,
offset,
count: None,
indexed: false,
} => {
let buffers = trackers.buffers.read();
let buffer = buffers
.get(buffer_id)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
let buffer = buffer
.raw(&snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
unsafe { raw.draw_indirect(buffer, offset, 1) };
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
unsafe { raw.draw_indirect(buffer, *offset, 1) };
}
RenderCommand::MultiDrawIndirect {
buffer_id,
Cmd::MultiDrawIndirect {
buffer,
offset,
count: None,
indexed: true,
} => {
let buffers = trackers.buffers.read();
let buffer = buffers
.get(buffer_id)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
let buffer = buffer
.raw(&snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
unsafe { raw.draw_indexed_indirect(buffer, offset, 1) };
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
unsafe { raw.draw_indexed_indirect(buffer, *offset, 1) };
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => {
Cmd::MultiDrawIndirect { .. } | Cmd::MultiDrawIndirectCount { .. } => {
return Err(ExecutionError::Unimplemented("multi-draw-indirect"))
}
RenderCommand::PushDebugGroup { .. }
| RenderCommand::InsertDebugMarker { .. }
| RenderCommand::PopDebugGroup => {
Cmd::PushDebugGroup { .. } | Cmd::InsertDebugMarker { .. } | Cmd::PopDebugGroup => {
return Err(ExecutionError::Unimplemented("debug-markers"))
}
RenderCommand::WriteTimestamp { .. }
| RenderCommand::BeginOcclusionQuery { .. }
| RenderCommand::EndOcclusionQuery
| RenderCommand::BeginPipelineStatisticsQuery { .. }
| RenderCommand::EndPipelineStatisticsQuery => {
Cmd::WriteTimestamp { .. }
| Cmd::BeginOcclusionQuery { .. }
| Cmd::EndOcclusionQuery
| Cmd::BeginPipelineStatisticsQuery { .. }
| Cmd::EndPipelineStatisticsQuery => {
return Err(ExecutionError::Unimplemented("queries"))
}
RenderCommand::ExecuteBundle(_)
| RenderCommand::SetBlendConstant(_)
| RenderCommand::SetStencilReference(_)
| RenderCommand::SetViewport { .. }
| RenderCommand::SetScissor(_) => unreachable!(),
Cmd::ExecuteBundle(_)
| Cmd::SetBlendConstant(_)
| Cmd::SetStencilReference(_)
| Cmd::SetViewport { .. }
| Cmd::SetScissor(_) => unreachable!(),
}
}
@ -1087,14 +1108,14 @@ impl<A: HalApi> Resource for RenderBundle<A> {
/// and calls [`State::flush_index`] before any indexed draw command to produce
/// a `SetIndexBuffer` command if one is necessary.
#[derive(Debug)]
struct IndexState {
buffer: id::BufferId,
struct IndexState<A: HalApi> {
buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
impl IndexState {
impl<A: HalApi> IndexState<A> {
/// Return the number of entries in the current index buffer.
///
/// Panic if no index buffer has been set.
@ -1109,11 +1130,11 @@ impl IndexState {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
fn flush(&mut self) -> Option<RenderCommand> {
fn flush(&mut self) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
Some(RenderCommand::SetIndexBuffer {
buffer_id: self.buffer,
Some(ArcRenderCommand::SetIndexBuffer {
buffer: self.buffer.clone(),
index_format: self.format,
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
@ -1134,14 +1155,14 @@ impl IndexState {
///
/// [`flush`]: IndexState::flush
#[derive(Debug)]
struct VertexState {
buffer: id::BufferId,
struct VertexState<A: HalApi> {
buffer: Arc<Buffer<A>>,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
impl VertexState {
fn new(buffer: id::BufferId, range: Range<wgt::BufferAddress>) -> Self {
impl<A: HalApi> VertexState<A> {
fn new(buffer: Arc<Buffer<A>>, range: Range<wgt::BufferAddress>) -> Self {
Self {
buffer,
range,
@ -1152,12 +1173,12 @@ impl VertexState {
/// Generate a `SetVertexBuffer` command for this slot, if necessary.
///
/// `slot` is the index of the vertex buffer slot that `self` tracks.
fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
fn flush(&mut self, slot: u32) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
Some(RenderCommand::SetVertexBuffer {
Some(ArcRenderCommand::SetVertexBuffer {
slot,
buffer_id: self.buffer,
buffer: self.buffer.clone(),
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
})
@ -1219,7 +1240,7 @@ impl<A: HalApi> PipelineState<A> {
/// Return a sequence of commands to zero the push constant ranges this
/// pipeline uses. If no initialization is necessary, return `None`.
fn zero_push_constants(&self) -> Option<impl Iterator<Item = RenderCommand>> {
fn zero_push_constants(&self) -> Option<impl Iterator<Item = ArcRenderCommand<A>>> {
if !self.push_constant_ranges.is_empty() {
let nonoverlapping_ranges =
super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges);
@ -1227,7 +1248,7 @@ impl<A: HalApi> PipelineState<A> {
Some(
nonoverlapping_ranges
.into_iter()
.map(|range| RenderCommand::SetPushConstant {
.map(|range| ArcRenderCommand::SetPushConstant {
stages: range.stages,
offset: range.range.start,
size_bytes: range.range.end - range.range.start,
@ -1261,11 +1282,11 @@ struct State<A: HalApi> {
bind: ArrayVec<Option<BindState<A>>, { hal::MAX_BIND_GROUPS }>,
/// The state of each vertex buffer slot.
vertex: ArrayVec<Option<VertexState>, { hal::MAX_VERTEX_BUFFERS }>,
vertex: ArrayVec<Option<VertexState<A>>, { hal::MAX_VERTEX_BUFFERS }>,
/// The current index buffer, if one has been set. We flush this state
/// before indexed draw commands.
index: Option<IndexState>,
index: Option<IndexState<A>>,
/// Dynamic offset values used by the cleaned-up command sequence.
///
@ -1375,13 +1396,13 @@ impl<A: HalApi> State<A> {
/// Set the bundle's current index buffer and its associated parameters.
fn set_index_buffer(
&mut self,
buffer: id::BufferId,
buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
) {
match self.index {
Some(ref current)
if current.buffer == buffer
if Arc::ptr_eq(&current.buffer, &buffer)
&& current.format == format
&& current.range == range =>
{
@ -1400,11 +1421,11 @@ impl<A: HalApi> State<A> {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
fn flush_index(&mut self) -> Option<RenderCommand> {
fn flush_index(&mut self) -> Option<ArcRenderCommand<A>> {
self.index.as_mut().and_then(|index| index.flush())
}
fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
fn flush_vertices(&mut self) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
self.vertex
.iter_mut()
.enumerate()
@ -1416,7 +1437,7 @@ impl<A: HalApi> State<A> {
&mut self,
used_bind_groups: usize,
dynamic_offsets: &[wgt::DynamicOffset],
) -> impl Iterator<Item = RenderCommand> + '_ {
) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
// Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`.
for contents in self.bind[..used_bind_groups].iter().flatten() {
if contents.is_dirty {
@ -1435,9 +1456,9 @@ impl<A: HalApi> State<A> {
if contents.is_dirty {
contents.is_dirty = false;
let offsets = &contents.dynamic_offsets;
return Some(RenderCommand::SetBindGroup {
return Some(ArcRenderCommand::SetBindGroup {
index: i.try_into().unwrap(),
bind_group_id: contents.bind_group.as_info().id(),
bind_group: contents.bind_group.clone(),
num_dynamic_offsets: offsets.end - offsets.start,
});
}

View file

@ -39,6 +39,11 @@ pub enum ClearError {
UnalignedFillSize(BufferAddress),
#[error("Buffer offset {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(BufferAddress),
#[error("Clear starts at offset {start_offset} with size of {requested_size}, but these added together exceed `u64::MAX`")]
OffsetPlusSizeExceeds64BitBounds {
start_offset: BufferAddress,
requested_size: BufferAddress,
},
#[error("Clear of {start_offset}..{end_offset} would end up overrunning the bounds of the buffer of size {buffer_size}")]
BufferOverrun {
start_offset: BufferAddress,
@ -117,25 +122,27 @@ impl Global {
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(ClearError::UnalignedBufferOffset(offset));
}
if let Some(size) = size {
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(ClearError::UnalignedFillSize(size));
}
let destination_end_offset = offset + size;
if destination_end_offset > dst_buffer.size {
return Err(ClearError::BufferOverrun {
let size = size.unwrap_or(dst_buffer.size.saturating_sub(offset));
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(ClearError::UnalignedFillSize(size));
}
let end_offset =
offset
.checked_add(size)
.ok_or(ClearError::OffsetPlusSizeExceeds64BitBounds {
start_offset: offset,
end_offset: destination_end_offset,
buffer_size: dst_buffer.size,
});
}
requested_size: size,
})?;
if end_offset > dst_buffer.size {
return Err(ClearError::BufferOverrun {
start_offset: offset,
end_offset,
buffer_size: dst_buffer.size,
});
}
let end = match size {
Some(size) => offset + size,
None => dst_buffer.size,
};
if offset == end {
if offset == end_offset {
log::trace!("Ignoring fill_buffer of size 0");
return Ok(());
}
@ -144,7 +151,7 @@ impl Global {
cmd_buf_data.buffer_memory_init_actions.extend(
dst_buffer.initialization_status.read().create_action(
&dst_buffer,
offset..end,
offset..end_offset,
MemoryInitKind::ImplicitlyInitialized,
),
);
@ -154,7 +161,7 @@ impl Global {
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
cmd_buf_raw.clear_buffer(dst_raw, offset..end);
cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
}
Ok(())
}
@ -366,7 +373,7 @@ fn clear_texture_via_buffer_copies<A: HalApi>(
assert!(
max_rows_per_copy > 0,
"Zero buffer size is too small to fill a single row \
of a texture with format {:?} and desc {:?}",
of a texture with format {:?} and desc {:?}",
texture_desc.format,
texture_desc.size
);

View file

@ -1,6 +1,7 @@
use crate::device::DeviceError;
use crate::resource::Resource;
use crate::snatch::SnatchGuard;
use crate::track::TrackerIndex;
use crate::{
binding_model::{
BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError,
@ -305,7 +306,7 @@ impl<A: HalApi> State<A> {
raw_encoder: &mut A::CommandEncoder,
base_trackers: &mut Tracker<A>,
bind_group_guard: &Storage<BindGroup<A>>,
indirect_buffer: Option<id::BufferId>,
indirect_buffer: Option<TrackerIndex>,
snatch_guard: &SnatchGuard,
) -> Result<(), UsageConflict> {
for id in self.binder.list_active() {
@ -402,12 +403,11 @@ impl Global {
let pipeline_guard = hub.compute_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
let texture_guard = hub.textures.read();
let mut state = State {
binder: Binder::new(),
pipeline: None,
scope: UsageScope::new(&*buffer_guard, &*texture_guard),
scope: UsageScope::new(&device.tracker_indices),
debug_scope_depth: 0,
};
let mut temp_offsets = Vec::new();
@ -452,17 +452,14 @@ impl Global {
let snatch_guard = device.snatchable_lock.read();
tracker.set_size(
Some(&*buffer_guard),
Some(&*texture_guard),
None,
None,
Some(&*bind_group_guard),
Some(&*pipeline_guard),
None,
None,
Some(&*query_set_guard),
);
let indices = &device.tracker_indices;
tracker.buffers.set_size(indices.buffers.size());
tracker.textures.set_size(indices.textures.size());
tracker.bind_groups.set_size(indices.bind_groups.size());
tracker
.compute_pipelines
.set_size(indices.compute_pipelines.size());
tracker.query_sets.set_size(indices.query_sets.size());
let discard_hal_labels = self
.instance
@ -719,8 +716,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(
buffer_id,
indirect_buffer.usage,
wgt::BufferUsages::INDIRECT,
)
.map_pass_err(scope)?;
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > indirect_buffer.size {
@ -753,7 +754,7 @@ impl Global {
raw,
&mut intermediate_trackers,
&*bind_group_guard,
Some(buffer_id),
Some(indirect_buffer.as_info().tracker_index()),
&snatch_guard,
)
.map_pass_err(scope)?;

View file

@ -2,17 +2,22 @@
!*/
use crate::{
binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError},
error::ErrorFormatter,
hal_api::HalApi,
id,
pipeline::RenderPipeline,
resource::{Buffer, QuerySet},
track::UsageConflict,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
use wgt::{BufferAddress, BufferSize, Color, VertexStepMode};
use std::num::NonZeroU32;
use std::{num::NonZeroU32, sync::Arc};
use thiserror::Error;
use super::RenderBundle;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, Eq, PartialEq)]
#[non_exhaustive]
@ -245,3 +250,114 @@ pub enum RenderCommand {
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId),
}
/// Equivalent to `RenderCommand` with the Ids resolved into resource Arcs.
#[doc(hidden)]
#[derive(Clone, Debug)]
pub enum ArcRenderCommand<A: HalApi> {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup<A>>,
},
SetPipeline(Arc<RenderPipeline<A>>),
SetIndexBuffer {
buffer: Arc<Buffer<A>>,
index_format: wgt::IndexFormat,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendConstant(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
count_buffer: Arc<Buffer<A>>,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
WriteTimestamp {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
BeginOcclusionQuery {
query_index: u32,
},
EndOcclusionQuery,
BeginPipelineStatisticsQuery {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(Arc<RenderBundle<A>>),
}

View file

@ -75,7 +75,7 @@ impl<A: HalApi> CommandEncoder<A> {
Ok(())
}
fn discard(&mut self) {
pub(crate) fn discard(&mut self) {
if self.is_open {
self.is_open = false;
unsafe { self.raw.discard_encoding() };
@ -112,7 +112,7 @@ pub(crate) struct DestroyedBufferError(pub id::BufferId);
pub(crate) struct DestroyedTextureError(pub id::TextureId);
pub struct CommandBufferMutable<A: HalApi> {
encoder: CommandEncoder<A>,
pub(crate) encoder: CommandEncoder<A>,
status: CommandEncoderStatus,
pub(crate) trackers: Tracker<A>,
buffer_memory_init_actions: Vec<BufferInitTrackerAction<A>>,
@ -174,6 +174,7 @@ impl<A: HalApi> CommandBuffer<A> {
.as_ref()
.unwrap_or(&String::from("<CommandBuffer>"))
.as_str(),
None,
),
data: Mutex::new(Some(CommandBufferMutable {
encoder: CommandEncoder {

View file

@ -4,7 +4,7 @@ use hal::CommandEncoder as _;
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
device::DeviceError,
device::{DeviceError, MissingFeatures},
global::Global,
hal_api::HalApi,
id::{self, Id},
@ -108,6 +108,8 @@ pub enum QueryError {
Device(#[from] DeviceError),
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error(transparent)]
MissingFeature(#[from] MissingFeatures),
#[error("Error encountered while trying to use queries")]
Use(#[from] QueryUseError),
#[error("Error encountered while trying to resolve a query")]
@ -355,6 +357,11 @@ impl Global {
let hub = A::hub(self);
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
cmd_buf
.device
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();

View file

@ -22,7 +22,7 @@ use crate::{
hal_label, id,
init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction},
pipeline::{self, PipelineFlags},
resource::{Buffer, QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
resource::{QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
storage::Storage,
track::{TextureSelector, Tracker, UsageConflict, UsageScope},
validation::{
@ -531,6 +531,8 @@ pub enum ColorAttachmentError {
InvalidFormat(wgt::TextureFormat),
#[error("The number of color attachments {given} exceeds the limit {limit}")]
TooMany { given: usize, limit: usize },
#[error("The total number of bytes per sample in color attachments {total} exceeds the limit {limit}")]
TooManyBytesPerSample { total: u32, limit: u32 },
}
/// Error encountered when performing a render pass.
@ -799,8 +801,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
texture_memory_actions: &mut CommandBufferTextureMemoryActions<A>,
pending_query_resets: &mut QueryResetMap<A>,
view_guard: &'a Storage<TextureView<A>>,
buffer_guard: &'a Storage<Buffer<A>>,
texture_guard: &'a Storage<Texture<A>>,
query_set_guard: &'a Storage<QuerySet<A>>,
snatch_guard: &SnatchGuard<'a>,
) -> Result<Self, RenderPassErrorInner> {
@ -1214,7 +1214,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
Ok(Self {
context,
usage_scope: UsageScope::new(buffer_guard, texture_guard),
usage_scope: UsageScope::new(&device.tracker_indices),
render_attachments,
is_depth_read_only,
is_stencil_read_only,
@ -1386,7 +1386,6 @@ impl Global {
let render_pipeline_guard = hub.render_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
let texture_guard = hub.textures.read();
let view_guard = hub.texture_views.read();
log::trace!(
@ -1406,24 +1405,21 @@ impl Global {
texture_memory_actions,
pending_query_resets,
&*view_guard,
&*buffer_guard,
&*texture_guard,
&*query_set_guard,
&snatch_guard,
)
.map_pass_err(pass_scope)?;
tracker.set_size(
Some(&*buffer_guard),
Some(&*texture_guard),
Some(&*view_guard),
None,
Some(&*bind_group_guard),
None,
Some(&*render_pipeline_guard),
Some(&*bundle_guard),
Some(&*query_set_guard),
);
let indices = &device.tracker_indices;
tracker.buffers.set_size(indices.buffers.size());
tracker.textures.set_size(indices.textures.size());
tracker.views.set_size(indices.texture_views.size());
tracker.bind_groups.set_size(indices.bind_groups.size());
tracker
.render_pipelines
.set_size(indices.render_pipelines.size());
tracker.bundles.set_size(indices.bundles.size());
tracker.query_sets.set_size(indices.query_sets.size());
let raw = &mut encoder.raw;
@ -1675,7 +1671,7 @@ impl Global {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
check_buffer_usage(buffer.usage, BufferUsages::INDEX)
check_buffer_usage(buffer_id, buffer.usage, BufferUsages::INDEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@ -1737,7 +1733,7 @@ impl Global {
.map_pass_err(scope);
}
check_buffer_usage(buffer.usage, BufferUsages::VERTEX)
check_buffer_usage(buffer_id, buffer.usage, BufferUsages::VERTEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@ -2034,8 +2030,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(
buffer_id,
indirect_buffer.usage,
BufferUsages::INDIRECT,
)
.map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@ -2106,8 +2106,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(
buffer_id,
indirect_buffer.usage,
BufferUsages::INDIRECT,
)
.map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@ -2123,7 +2127,7 @@ impl Global {
hal::BufferUses::INDIRECT,
)
.map_pass_err(scope)?;
check_buffer_usage(count_buffer.usage, BufferUsages::INDIRECT)
check_buffer_usage(buffer_id, count_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
let count_raw = count_buffer
.raw

View file

@ -26,9 +26,7 @@ use wgt::{BufferAddress, TextureFormat};
use std::{
borrow::Cow,
iter,
ops::Range,
ptr,
iter, ptr,
sync::{atomic::Ordering, Arc},
};
@ -219,7 +217,7 @@ impl Global {
mapped_at_creation: false,
};
let stage = match device.create_buffer(&stage_desc, true) {
Ok(stage) => stage,
Ok(stage) => Arc::new(stage),
Err(e) => {
to_destroy.push(buffer);
break e;
@ -232,14 +230,10 @@ impl Global {
Ok(mapping) => mapping,
Err(e) => {
to_destroy.push(buffer);
to_destroy.push(stage);
break CreateBufferError::Device(e.into());
}
};
let stage_fid = hub.buffers.request();
let stage = stage_fid.init(stage);
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero initialize memory and then mark both staging and buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
@ -262,7 +256,7 @@ impl Global {
.trackers
.lock()
.buffers
.insert_single(id, resource, buffer_use);
.insert_single(resource, buffer_use);
return (id, None);
};
@ -383,7 +377,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
#[cfg(feature = "trace")]
@ -446,7 +440,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_READ)?;
//assert!(buffer isn't used by the GPU);
let raw_buf = buffer
@ -529,7 +523,7 @@ impl Global {
.lock_life()
.suspected_resources
.buffers
.insert(buffer_id, buffer);
.insert(buffer.info.tracker_index(), buffer);
}
if wait {
@ -573,11 +567,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
device.trackers.lock().textures.insert_single(
id,
resource,
hal::TextureUses::UNINITIALIZED,
);
device
.trackers
.lock()
.textures
.insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@ -647,11 +641,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
device.trackers.lock().textures.insert_single(
id,
resource,
hal::TextureUses::UNINITIALIZED,
);
device
.trackers
.lock()
.textures
.insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@ -704,7 +698,7 @@ impl Global {
.trackers
.lock()
.buffers
.insert_single(id, buffer, hal::BufferUses::empty());
.insert_single(buffer, hal::BufferUses::empty());
return (id, None);
};
@ -764,7 +758,7 @@ impl Global {
.lock_life()
.suspected_resources
.textures
.insert(texture_id, texture.clone());
.insert(texture.info.tracker_index(), texture.clone());
}
}
@ -824,7 +818,7 @@ impl Global {
}
api_log!("Texture::create_view({texture_id:?}) -> {id:?}");
device.trackers.lock().views.insert_single(id, resource);
device.trackers.lock().views.insert_single(resource);
return (id, None);
};
@ -854,7 +848,7 @@ impl Global {
.lock_life()
.suspected_resources
.texture_views
.insert(texture_view_id, view.clone());
.insert(view.info.tracker_index(), view.clone());
if wait {
match view.device.wait_for_submit(last_submit_index) {
@ -900,7 +894,7 @@ impl Global {
let (id, resource) = fid.assign(sampler);
api_log!("Device::create_sampler -> {id:?}");
device.trackers.lock().samplers.insert_single(id, resource);
device.trackers.lock().samplers.insert_single(resource);
return (id, None);
};
@ -925,7 +919,7 @@ impl Global {
.lock_life()
.suspected_resources
.samplers
.insert(sampler_id, sampler.clone());
.insert(sampler.info.tracker_index(), sampler.clone());
}
}
@ -1024,7 +1018,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_group_layouts
.insert(bind_group_layout_id, layout.clone());
.insert(layout.info.tracker_index(), layout.clone());
}
}
@ -1085,7 +1079,7 @@ impl Global {
.lock_life()
.suspected_resources
.pipeline_layouts
.insert(pipeline_layout_id, layout.clone());
.insert(layout.info.tracker_index(), layout.clone());
}
}
@ -1140,11 +1134,7 @@ impl Global {
api_log!("Device::create_bind_group -> {id:?}");
device
.trackers
.lock()
.bind_groups
.insert_single(id, resource);
device.trackers.lock().bind_groups.insert_single(resource);
return (id, None);
};
@ -1168,7 +1158,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_groups
.insert(bind_group_id, bind_group.clone());
.insert(bind_group.info.tracker_index(), bind_group.clone());
}
}
@ -1332,9 +1322,8 @@ impl Global {
if !device.is_valid() {
break DeviceError::Lost;
}
let queue = match hub.queues.get(device.queue_id.read().unwrap()) {
Ok(queue) => queue,
Err(_) => break DeviceError::InvalidQueueId,
let Some(queue) = device.get_queue() else {
break DeviceError::InvalidQueueId;
};
let encoder = match device
.command_allocator
@ -1379,6 +1368,7 @@ impl Global {
.command_buffers
.unregister(command_encoder_id.transmute())
{
cmd_buf.data.lock().as_mut().unwrap().encoder.discard();
cmd_buf
.device
.untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers);
@ -1450,7 +1440,7 @@ impl Global {
let (id, resource) = fid.assign(render_bundle);
api_log!("RenderBundleEncoder::finish -> {id:?}");
device.trackers.lock().bundles.insert_single(id, resource);
device.trackers.lock().bundles.insert_single(resource);
return (id, None);
};
@ -1474,7 +1464,7 @@ impl Global {
.lock_life()
.suspected_resources
.render_bundles
.insert(render_bundle_id, bundle.clone());
.insert(bundle.info.tracker_index(), bundle.clone());
}
}
@ -1513,11 +1503,7 @@ impl Global {
let (id, resource) = fid.assign(query_set);
api_log!("Device::create_query_set -> {id:?}");
device
.trackers
.lock()
.query_sets
.insert_single(id, resource);
device.trackers.lock().query_sets.insert_single(resource);
return (id, None);
};
@ -1544,7 +1530,7 @@ impl Global {
.lock_life()
.suspected_resources
.query_sets
.insert(query_set_id, query_set.clone());
.insert(query_set.info.tracker_index(), query_set.clone());
}
}
@ -1600,7 +1586,7 @@ impl Global {
.trackers
.lock()
.render_pipelines
.insert_single(id, resource);
.insert_single(resource);
return (id, None);
};
@ -1672,18 +1658,17 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) {
let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.render_pipelines
.insert(render_pipeline_id, pipeline.clone());
.insert(pipeline.info.tracker_index(), pipeline.clone());
life_lock
.suspected_resources
.pipeline_layouts
.insert(layout_id, pipeline.layout.clone());
life_lock.suspected_resources.pipeline_layouts.insert(
pipeline.layout.info.tracker_index(),
pipeline.layout.clone(),
);
}
}
@ -1734,7 +1719,7 @@ impl Global {
.trackers
.lock()
.compute_pipelines
.insert_single(id, resource);
.insert_single(resource);
return (id, None);
};
@ -1804,17 +1789,16 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) {
let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.compute_pipelines
.insert(compute_pipeline_id, pipeline.clone());
life_lock
.suspected_resources
.pipeline_layouts
.insert(layout_id, pipeline.layout.clone());
.insert(pipeline.info.tracker_index(), pipeline.clone());
life_lock.suspected_resources.pipeline_layouts.insert(
pipeline.layout.info.tracker_index(),
pipeline.layout.clone(),
);
}
}
@ -2336,15 +2320,18 @@ impl Global {
pub fn buffer_map_async<A: HalApi>(
&self,
buffer_id: id::BufferId,
range: Range<BufferAddress>,
offset: BufferAddress,
size: Option<BufferAddress>,
op: BufferMapOperation,
) -> BufferAccessResult {
api_log!("Buffer::map_async {buffer_id:?} range {range:?} op: {op:?}");
api_log!("Buffer::map_async {buffer_id:?} offset {offset:?} size {size:?} op: {op:?}");
// User callbacks must not be called while holding buffer_map_async_inner's locks, so we
// defer the error callback if it needs to be called immediately (typically when running
// into errors).
if let Err((mut operation, err)) = self.buffer_map_async_inner::<A>(buffer_id, range, op) {
if let Err((mut operation, err)) =
self.buffer_map_async_inner::<A>(buffer_id, offset, size, op)
{
if let Some(callback) = operation.callback.take() {
callback.call(Err(err.clone()));
}
@ -2360,7 +2347,8 @@ impl Global {
fn buffer_map_async_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
range: Range<BufferAddress>,
offset: BufferAddress,
size: Option<BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
profiling::scope!("Buffer::map_async");
@ -2372,29 +2360,50 @@ impl Global {
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRange));
}
let buffer = {
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid);
let buffer = hub.buffers.get(buffer_id);
let buffer = match buffer {
Ok(b) => b,
Err(e) => {
return Err((op, e));
Err(_) => {
return Err((op, BufferAccessError::Invalid));
}
};
{
let snatch_guard = buffer.device.snatchable_lock.read();
if buffer.is_destroyed(&snatch_guard) {
return Err((op, BufferAccessError::Destroyed));
}
}
let range_size = if let Some(size) = size {
size
} else if offset > buffer.size {
0
} else {
buffer.size - offset
};
if offset % wgt::MAP_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedOffset { offset }));
}
if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRangeSize { range_size }));
}
let range = offset..(offset + range_size);
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
{
return Err((op, BufferAccessError::UnalignedRange));
}
let device = &buffer.device;
if !device.is_valid() {
return Err((op, DeviceError::Lost.into()));
}
if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) {
if let Err(e) = check_buffer_usage(buffer.info.id(), buffer.usage, pub_usage) {
return Err((op, e.into()));
}
@ -2417,11 +2426,6 @@ impl Global {
));
}
let snatch_guard = device.snatchable_lock.read();
if buffer.is_destroyed(&snatch_guard) {
return Err((op, BufferAccessError::Destroyed));
}
{
let map_state = &mut *buffer.map_state.lock();
*map_state = match *map_state {
@ -2442,6 +2446,8 @@ impl Global {
};
}
let snatch_guard = buffer.device.snatchable_lock.read();
{
let mut trackers = buffer.device.as_ref().trackers.lock();
trackers.buffers.set_single(&buffer, internal_use);

View file

@ -6,17 +6,13 @@ use crate::{
DeviceError, DeviceLostClosure,
},
hal_api::HalApi,
id::{
self, BindGroupId, BindGroupLayoutId, BufferId, ComputePipelineId, Id, PipelineLayoutId,
QuerySetId, RenderBundleId, RenderPipelineId, SamplerId, StagingBufferId, TextureId,
TextureViewId,
},
id,
pipeline::{ComputePipeline, RenderPipeline},
resource::{
self, Buffer, DestroyedBuffer, DestroyedTexture, QuerySet, Resource, Sampler,
StagingBuffer, Texture, TextureView,
},
track::{ResourceTracker, Tracker},
track::{ResourceTracker, Tracker, TrackerIndex},
FastHashMap, SubmissionIndex,
};
use smallvec::SmallVec;
@ -28,20 +24,20 @@ use thiserror::Error;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Default)]
pub(crate) struct ResourceMaps<A: HalApi> {
pub buffers: FastHashMap<BufferId, Arc<Buffer<A>>>,
pub staging_buffers: FastHashMap<StagingBufferId, Arc<StagingBuffer<A>>>,
pub textures: FastHashMap<TextureId, Arc<Texture<A>>>,
pub texture_views: FastHashMap<TextureViewId, Arc<TextureView<A>>>,
pub samplers: FastHashMap<SamplerId, Arc<Sampler<A>>>,
pub bind_groups: FastHashMap<BindGroupId, Arc<BindGroup<A>>>,
pub bind_group_layouts: FastHashMap<BindGroupLayoutId, Arc<BindGroupLayout<A>>>,
pub render_pipelines: FastHashMap<RenderPipelineId, Arc<RenderPipeline<A>>>,
pub compute_pipelines: FastHashMap<ComputePipelineId, Arc<ComputePipeline<A>>>,
pub pipeline_layouts: FastHashMap<PipelineLayoutId, Arc<PipelineLayout<A>>>,
pub render_bundles: FastHashMap<RenderBundleId, Arc<RenderBundle<A>>>,
pub query_sets: FastHashMap<QuerySetId, Arc<QuerySet<A>>>,
pub destroyed_buffers: FastHashMap<BufferId, Arc<DestroyedBuffer<A>>>,
pub destroyed_textures: FastHashMap<TextureId, Arc<DestroyedTexture<A>>>,
pub buffers: FastHashMap<TrackerIndex, Arc<Buffer<A>>>,
pub staging_buffers: FastHashMap<TrackerIndex, Arc<StagingBuffer<A>>>,
pub textures: FastHashMap<TrackerIndex, Arc<Texture<A>>>,
pub texture_views: FastHashMap<TrackerIndex, Arc<TextureView<A>>>,
pub samplers: FastHashMap<TrackerIndex, Arc<Sampler<A>>>,
pub bind_groups: FastHashMap<TrackerIndex, Arc<BindGroup<A>>>,
pub bind_group_layouts: FastHashMap<TrackerIndex, Arc<BindGroupLayout<A>>>,
pub render_pipelines: FastHashMap<TrackerIndex, Arc<RenderPipeline<A>>>,
pub compute_pipelines: FastHashMap<TrackerIndex, Arc<ComputePipeline<A>>>,
pub pipeline_layouts: FastHashMap<TrackerIndex, Arc<PipelineLayout<A>>>,
pub render_bundles: FastHashMap<TrackerIndex, Arc<RenderBundle<A>>>,
pub query_sets: FastHashMap<TrackerIndex, Arc<QuerySet<A>>>,
pub destroyed_buffers: FastHashMap<TrackerIndex, Arc<DestroyedBuffer<A>>>,
pub destroyed_textures: FastHashMap<TrackerIndex, Arc<DestroyedTexture<A>>>,
}
impl<A: HalApi> ResourceMaps<A> {
@ -276,25 +272,29 @@ impl<A: HalApi> LifetimeTracker<A> {
for res in temp_resources {
match res {
TempResource::Buffer(raw) => {
last_resources.buffers.insert(raw.as_info().id(), raw);
last_resources
.buffers
.insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
last_resources
.staging_buffers
.insert(raw.as_info().id(), raw);
.insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
last_resources
.destroyed_buffers
.insert(destroyed.id, destroyed);
.insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
last_resources.textures.insert(raw.as_info().id(), raw);
last_resources
.textures
.insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
last_resources
.destroyed_textures
.insert(destroyed.id, destroyed);
.insert(destroyed.tracker_index, destroyed);
}
}
}
@ -310,12 +310,14 @@ impl<A: HalApi> LifetimeTracker<A> {
pub fn post_submit(&mut self) {
for v in self.future_suspected_buffers.drain(..).take(1) {
self.suspected_resources.buffers.insert(v.as_info().id(), v);
self.suspected_resources
.buffers
.insert(v.as_info().tracker_index(), v);
}
for v in self.future_suspected_textures.drain(..).take(1) {
self.suspected_resources
.textures
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
}
@ -386,19 +388,27 @@ impl<A: HalApi> LifetimeTracker<A> {
if let Some(resources) = resources {
match temp_resource {
TempResource::Buffer(raw) => {
resources.buffers.insert(raw.as_info().id(), raw);
resources.buffers.insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
resources.staging_buffers.insert(raw.as_info().id(), raw);
resources
.staging_buffers
.insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
resources.destroyed_buffers.insert(destroyed.id, destroyed);
resources
.destroyed_buffers
.insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
resources.textures.insert(raw.as_info().id(), raw);
resources
.textures
.insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
resources.destroyed_textures.insert(destroyed.id, destroyed);
resources
.destroyed_textures
.insert(destroyed.tracker_index, destroyed);
}
}
}
@ -420,27 +430,27 @@ impl<A: HalApi> LifetimeTracker<A> {
impl<A: HalApi> LifetimeTracker<A> {
fn triage_resources<R>(
resources_map: &mut FastHashMap<Id<R::Marker>, Arc<R>>,
resources_map: &mut FastHashMap<TrackerIndex, Arc<R>>,
active: &mut [ActiveSubmission<A>],
trackers: &mut impl ResourceTracker<R>,
get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<Id<R::Marker>, Arc<R>>,
trackers: &mut impl ResourceTracker,
get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<TrackerIndex, Arc<R>>,
) -> Vec<Arc<R>>
where
R: Resource,
{
let mut removed_resources = Vec::new();
resources_map.retain(|&id, resource| {
resources_map.retain(|&index, resource| {
let submit_index = resource.as_info().submission_index();
let non_referenced_resources = active
.iter_mut()
.find(|a| a.index == submit_index)
.map(|a| &mut a.last_resources);
let is_removed = trackers.remove_abandoned(id);
let is_removed = trackers.remove_abandoned(index);
if is_removed {
removed_resources.push(resource.clone());
if let Some(resources) = non_referenced_resources {
get_resource_map(resources).insert(id, resource.clone());
get_resource_map(resources).insert(index, resource.clone());
}
}
!is_removed
@ -459,27 +469,29 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|bundle| {
for v in bundle.used.buffers.write().drain_resources() {
self.suspected_resources.buffers.insert(v.as_info().id(), v);
self.suspected_resources
.buffers
.insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.textures.write().drain_resources() {
self.suspected_resources
.textures
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.bind_groups.write().drain_resources() {
self.suspected_resources
.bind_groups
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.render_pipelines.write().drain_resources() {
self.suspected_resources
.render_pipelines
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.query_sets.write().drain_resources() {
self.suspected_resources
.query_sets
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
});
self
@ -496,27 +508,30 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resource.drain(..).for_each(|bind_group| {
for v in bind_group.used.buffers.drain_resources() {
self.suspected_resources.buffers.insert(v.as_info().id(), v);
self.suspected_resources
.buffers
.insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.textures.drain_resources() {
self.suspected_resources
.textures
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.views.drain_resources() {
self.suspected_resources
.texture_views
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.samplers.drain_resources() {
self.suspected_resources
.samplers
.insert(v.as_info().id(), v);
.insert(v.as_info().tracker_index(), v);
}
self.suspected_resources
.bind_group_layouts
.insert(bind_group.layout.as_info().id(), bind_group.layout.clone());
self.suspected_resources.bind_group_layouts.insert(
bind_group.layout.as_info().tracker_index(),
bind_group.layout.clone(),
);
});
self
}
@ -605,7 +620,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|compute_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
compute_pipeline.layout.as_info().id(),
compute_pipeline.layout.as_info().tracker_index(),
compute_pipeline.layout.clone(),
);
});
@ -623,7 +638,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|render_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
render_pipeline.layout.as_info().id(),
render_pipeline.layout.as_info().tracker_index(),
render_pipeline.layout.clone(),
);
});
@ -642,7 +657,7 @@ impl<A: HalApi> LifetimeTracker<A> {
for bgl in &pipeline_layout.bind_group_layouts {
self.suspected_resources
.bind_group_layouts
.insert(bgl.as_info().id(), bgl.clone());
.insert(bgl.as_info().tracker_index(), bgl.clone());
}
});
self
@ -773,14 +788,14 @@ impl<A: HalApi> LifetimeTracker<A> {
Vec::with_capacity(self.ready_to_map.len());
for buffer in self.ready_to_map.drain(..) {
let buffer_id = buffer.info.id();
let tracker_index = buffer.info.tracker_index();
let is_removed = {
let mut trackers = trackers.lock();
trackers.buffers.remove_abandoned(buffer_id)
trackers.buffers.remove_abandoned(tracker_index)
};
if is_removed {
*buffer.map_state.lock() = resource::BufferMapState::Idle;
log::trace!("Buffer ready to map {:?} is not tracked anymore", buffer_id);
log::trace!("Buffer ready to map {tracker_index:?} is not tracked anymore");
} else {
let mapping = match std::mem::replace(
&mut *buffer.map_state.lock(),
@ -798,7 +813,7 @@ impl<A: HalApi> LifetimeTracker<A> {
_ => panic!("No pending mapping."),
};
let status = if mapping.range.start != mapping.range.end {
log::debug!("Buffer {:?} map state -> Active", buffer_id);
log::debug!("Buffer {tracker_index:?} map state -> Active");
let host = mapping.op.host;
let size = mapping.range.end - mapping.range.start;
match super::map_buffer(raw, &buffer, mapping.range.start, size, host) {

View file

@ -188,10 +188,17 @@ impl<A: HalApi> EncoderInFlight<A> {
#[derive(Debug)]
pub(crate) struct PendingWrites<A: HalApi> {
pub command_encoder: A::CommandEncoder,
pub is_active: bool,
/// True if `command_encoder` is in the "recording" state, as
/// described in the docs for the [`wgpu_hal::CommandEncoder`]
/// trait.
pub is_recording: bool,
pub temp_resources: Vec<TempResource<A>>,
pub dst_buffers: FastHashMap<id::BufferId, Arc<Buffer<A>>>,
pub dst_textures: FastHashMap<id::TextureId, Arc<Texture<A>>>,
/// All command buffers allocated from `command_encoder`.
pub executing_command_buffers: Vec<A::CommandBuffer>,
}
@ -199,7 +206,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn new(command_encoder: A::CommandEncoder) -> Self {
Self {
command_encoder,
is_active: false,
is_recording: false,
temp_resources: Vec::new(),
dst_buffers: FastHashMap::default(),
dst_textures: FastHashMap::default(),
@ -209,7 +216,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn dispose(mut self, device: &A::Device) {
unsafe {
if self.is_active {
if self.is_recording {
self.command_encoder.discard_encoding();
}
self.command_encoder
@ -232,9 +239,9 @@ impl<A: HalApi> PendingWrites<A> {
fn pre_submit(&mut self) -> Result<Option<&A::CommandBuffer>, DeviceError> {
self.dst_buffers.clear();
self.dst_textures.clear();
if self.is_active {
if self.is_recording {
let cmd_buf = unsafe { self.command_encoder.end_encoding()? };
self.is_active = false;
self.is_recording = false;
self.executing_command_buffers.push(cmd_buf);
return Ok(self.executing_command_buffers.last());
@ -262,23 +269,23 @@ impl<A: HalApi> PendingWrites<A> {
}
pub fn activate(&mut self) -> &mut A::CommandEncoder {
if !self.is_active {
if !self.is_recording {
unsafe {
self.command_encoder
.begin_encoding(Some("(wgpu internal) PendingWrites"))
.unwrap();
}
self.is_active = true;
self.is_recording = true;
}
&mut self.command_encoder
}
pub fn deactivate(&mut self) {
if self.is_active {
if self.is_recording {
unsafe {
self.command_encoder.discard_encoding();
}
self.is_active = false;
self.is_recording = false;
}
}
}
@ -303,7 +310,10 @@ fn prepare_staging_buffer<A: HalApi>(
raw: Mutex::new(Some(buffer)),
device: device.clone(),
size,
info: ResourceInfo::new("<StagingBuffer>"),
info: ResourceInfo::new(
"<StagingBuffer>",
Some(device.tracker_indices.staging_buffers.clone()),
),
is_coherent: mapping.is_coherent,
};
@ -1188,11 +1198,13 @@ impl Global {
// update submission IDs
for buffer in cmd_buf_trackers.buffers.used_resources() {
let id = buffer.info.id();
let tracker_index = buffer.info.tracker_index();
let raw_buf = match buffer.raw.get(&snatch_guard) {
Some(raw) => raw,
None => {
return Err(QueueSubmitError::DestroyedBuffer(id));
return Err(QueueSubmitError::DestroyedBuffer(
buffer.info.id(),
));
}
};
buffer.info.use_at(submit_index);
@ -1207,28 +1219,28 @@ impl Global {
.as_mut()
.unwrap()
.buffers
.insert(id, buffer.clone());
.insert(tracker_index, buffer.clone());
} else {
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
_ => return Err(QueueSubmitError::BufferStillMapped(id)),
_ => {
return Err(QueueSubmitError::BufferStillMapped(
buffer.info.id(),
))
}
}
}
}
for texture in cmd_buf_trackers.textures.used_resources() {
let id = texture.info.id();
let tracker_index = texture.info.tracker_index();
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(id));
return Err(QueueSubmitError::DestroyedTexture(
texture.info.id(),
));
}
Some(TextureInner::Native { .. }) => false,
Some(TextureInner::Surface {
ref has_work,
ref raw,
..
}) => {
has_work.store(true, Ordering::Relaxed);
Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}
@ -1242,7 +1254,7 @@ impl Global {
.as_mut()
.unwrap()
.textures
.insert(id, texture.clone());
.insert(tracker_index, texture.clone());
}
if should_extend {
unsafe {
@ -1255,11 +1267,10 @@ impl Global {
for texture_view in cmd_buf_trackers.views.used_resources() {
texture_view.info.use_at(submit_index);
if texture_view.is_unique() {
temp_suspected
.as_mut()
.unwrap()
.texture_views
.insert(texture_view.as_info().id(), texture_view.clone());
temp_suspected.as_mut().unwrap().texture_views.insert(
texture_view.as_info().tracker_index(),
texture_view.clone(),
);
}
}
{
@ -1279,7 +1290,7 @@ impl Global {
.as_mut()
.unwrap()
.bind_groups
.insert(bg.as_info().id(), bg.clone());
.insert(bg.as_info().tracker_index(), bg.clone());
}
}
}
@ -1290,7 +1301,7 @@ impl Global {
compute_pipeline.info.use_at(submit_index);
if compute_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().compute_pipelines.insert(
compute_pipeline.as_info().id(),
compute_pipeline.as_info().tracker_index(),
compute_pipeline.clone(),
);
}
@ -1301,7 +1312,7 @@ impl Global {
render_pipeline.info.use_at(submit_index);
if render_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().render_pipelines.insert(
render_pipeline.as_info().id(),
render_pipeline.as_info().tracker_index(),
render_pipeline.clone(),
);
}
@ -1309,11 +1320,10 @@ impl Global {
for query_set in cmd_buf_trackers.query_sets.used_resources() {
query_set.info.use_at(submit_index);
if query_set.is_unique() {
temp_suspected
.as_mut()
.unwrap()
.query_sets
.insert(query_set.as_info().id(), query_set.clone());
temp_suspected.as_mut().unwrap().query_sets.insert(
query_set.as_info().tracker_index(),
query_set.clone(),
);
}
}
for bundle in cmd_buf_trackers.bundles.used_resources() {
@ -1334,7 +1344,7 @@ impl Global {
.as_mut()
.unwrap()
.render_bundles
.insert(bundle.as_info().id(), bundle.clone());
.insert(bundle.as_info().tracker_index(), bundle.clone());
}
}
}
@ -1423,13 +1433,7 @@ impl Global {
return Err(QueueSubmitError::DestroyedTexture(id));
}
Some(TextureInner::Native { .. }) => {}
Some(TextureInner::Surface {
ref has_work,
ref raw,
..
}) => {
has_work.store(true, Ordering::Relaxed);
Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}

View file

@ -13,7 +13,6 @@ use crate::{
hal_api::HalApi,
hal_label,
hub::Hub,
id::QueueId,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
@ -29,13 +28,16 @@ use crate::{
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
storage::Storage,
track::{BindGroupStates, TextureSelector, Tracker},
validation::{self, check_buffer_usage, check_texture_usage},
track::{BindGroupStates, TextureSelector, Tracker, TrackerIndexAllocators},
validation::{
self, check_buffer_usage, check_texture_usage, validate_color_attachment_bytes_per_sample,
},
FastHashMap, LabelHelpers as _, SubmissionIndex,
};
use arrayvec::ArrayVec;
use hal::{CommandEncoder as _, Device as _};
use once_cell::sync::OnceCell;
use parking_lot::{Mutex, MutexGuard, RwLock};
use smallvec::SmallVec;
@ -54,7 +56,7 @@ use std::{
use super::{
life::{self, ResourceMaps},
queue::{self},
queue::{self, Queue},
DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, ENTRYPOINT_FAILURE_ERROR,
IMPLICIT_BIND_GROUP_LAYOUT_ERROR_LABEL, ZERO_BUFFER_SIZE,
};
@ -87,8 +89,8 @@ use super::{
pub struct Device<A: HalApi> {
raw: Option<A::Device>,
pub(crate) adapter: Arc<Adapter<A>>,
pub(crate) queue_id: RwLock<Option<QueueId>>,
queue_to_drop: RwLock<Option<A::Queue>>,
pub(crate) queue: OnceCell<Weak<Queue<A>>>,
queue_to_drop: OnceCell<A::Queue>,
pub(crate) zero_buffer: Option<A::Buffer>,
pub(crate) info: ResourceInfo<Device<A>>,
@ -116,6 +118,7 @@ pub struct Device<A: HalApi> {
/// Has to be locked temporarily only (locked last)
/// and never before pending_writes
pub(crate) trackers: Mutex<Tracker<A>>,
pub(crate) tracker_indices: TrackerIndexAllocators,
// Life tracker should be locked right after the device and before anything else.
life_tracker: Mutex<LifetimeTracker<A>>,
/// Temporary storage for resource management functions. Cleared at the end
@ -160,7 +163,7 @@ impl<A: HalApi> Drop for Device<A> {
unsafe {
raw.destroy_buffer(self.zero_buffer.take().unwrap());
raw.destroy_fence(self.fence.write().take().unwrap());
let queue = self.queue_to_drop.write().take().unwrap();
let queue = self.queue_to_drop.take().unwrap();
raw.exit(queue);
}
}
@ -258,16 +261,17 @@ impl<A: HalApi> Device<A> {
Ok(Self {
raw: Some(raw_device),
adapter: adapter.clone(),
queue_id: RwLock::new(None),
queue_to_drop: RwLock::new(None),
queue: OnceCell::new(),
queue_to_drop: OnceCell::new(),
zero_buffer: Some(zero_buffer),
info: ResourceInfo::new("<device>"),
info: ResourceInfo::new("<device>", None),
command_allocator: Mutex::new(Some(com_alloc)),
active_submission_index: AtomicU64::new(0),
fence: RwLock::new(Some(fence)),
snatchable_lock: unsafe { SnatchLock::new() },
valid: AtomicBool::new(true),
trackers: Mutex::new(Tracker::new()),
tracker_indices: TrackerIndexAllocators::new(),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: Mutex::new(Some(life::ResourceMaps::new())),
bgl_pool: ResourcePool::new(),
@ -300,7 +304,7 @@ impl<A: HalApi> Device<A> {
}
pub(crate) fn release_queue(&self, queue: A::Queue) {
self.queue_to_drop.write().replace(queue);
assert!(self.queue_to_drop.set(queue).is_ok());
}
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker<A>> {
@ -357,6 +361,14 @@ impl<A: HalApi> Device<A> {
}
}
pub fn get_queue(&self) -> Option<Arc<Queue<A>>> {
self.queue.get().as_ref()?.upgrade()
}
pub fn set_queue(&self, queue: Arc<Queue<A>>) {
assert!(self.queue.set(Arc::downgrade(&queue)).is_ok());
}
/// Check this device for completed commands.
///
/// The `maintain` argument tells how the maintence function should behave, either
@ -483,56 +495,56 @@ impl<A: HalApi> Device<A> {
if resource.is_unique() {
temp_suspected
.buffers
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.textures.used_resources() {
if resource.is_unique() {
temp_suspected
.textures
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.views.used_resources() {
if resource.is_unique() {
temp_suspected
.texture_views
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.bind_groups.used_resources() {
if resource.is_unique() {
temp_suspected
.bind_groups
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.samplers.used_resources() {
if resource.is_unique() {
temp_suspected
.samplers
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.compute_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.compute_pipelines
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.render_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.render_pipelines
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.query_sets.used_resources() {
if resource.is_unique() {
temp_suspected
.query_sets
.insert(resource.as_info().id(), resource.clone());
.insert(resource.as_info().tracker_index(), resource.clone());
}
}
}
@ -633,7 +645,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(aligned_size)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
bind_groups: Mutex::new(Vec::new()),
})
}
@ -662,7 +677,10 @@ impl<A: HalApi> Device<A> {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.textures.clone()),
),
clear_mode: RwLock::new(clear_mode),
views: Mutex::new(Vec::new()),
bind_groups: Mutex::new(Vec::new()),
@ -684,7 +702,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(0)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
bind_groups: Mutex::new(Vec::new()),
}
}
@ -1262,7 +1283,10 @@ impl<A: HalApi> Device<A> {
render_extent,
samples: texture.desc.sample_count,
selector,
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.texture_views.clone()),
),
})
}
@ -1366,7 +1390,10 @@ impl<A: HalApi> Device<A> {
Ok(Sampler {
raw: Some(raw),
device: self.clone(),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.samplers.clone()),
),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
@ -1559,7 +1586,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: Some(interface),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@ -1600,7 +1627,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: None,
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@ -1704,10 +1731,23 @@ impl<A: HalApi> Device<A> {
BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
});
}
Bt::Texture { .. } => (
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Texture {
multisampled,
view_dimension,
..
} => {
if multisampled && view_dimension != TextureViewDimension::D2 {
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: BindGroupLayoutEntryError::Non2DMultisampled(view_dimension),
});
}
(
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
)
}
Bt::StorageTexture {
access,
view_dimension,
@ -1840,7 +1880,10 @@ impl<A: HalApi> Device<A> {
entries: entry_map,
origin,
binding_count_validator: count_validator,
info: ResourceInfo::new(label.unwrap_or("<BindGroupLayout>")),
info: ResourceInfo::new(
label.unwrap_or("<BindGroupLayout>"),
Some(self.tracker_indices.bind_group_layouts.clone()),
),
label: label.unwrap_or_default().to_string(),
})
}
@ -1905,7 +1948,7 @@ impl<A: HalApi> Device<A> {
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
check_buffer_usage(buffer.usage, pub_usage)?;
check_buffer_usage(bb.buffer_id, buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.get(snatch_guard)
@ -2273,7 +2316,10 @@ impl<A: HalApi> Device<A> {
raw: Snatchable::new(raw),
device: self.clone(),
layout: layout.clone(),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.bind_groups.clone()),
),
used,
used_buffer_ranges,
used_texture_ranges,
@ -2555,7 +2601,10 @@ impl<A: HalApi> Device<A> {
Ok(binding_model::PipelineLayout {
raw: Some(raw),
device: self.clone(),
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.pipeline_layouts.clone()),
),
bind_group_layouts,
push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
})
@ -2720,7 +2769,10 @@ impl<A: HalApi> Device<A> {
device: self.clone(),
_shader_module: shader_module,
late_sized_buffer_groups,
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.compute_pipelines.clone()),
),
};
Ok(pipeline)
}
@ -2749,11 +2801,12 @@ impl<A: HalApi> Device<A> {
let mut shader_binding_sizes = FastHashMap::default();
let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
if num_attachments > hal::MAX_COLOR_ATTACHMENTS {
let max_attachments = self.limits.max_color_attachments as usize;
if num_attachments > max_attachments {
return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
command::ColorAttachmentError::TooMany {
given: num_attachments,
limit: hal::MAX_COLOR_ATTACHMENTS,
limit: max_attachments,
},
));
}
@ -2959,6 +3012,7 @@ impl<A: HalApi> Device<A> {
}
}
}
break None;
};
if let Some(e) = error {
@ -2967,6 +3021,16 @@ impl<A: HalApi> Device<A> {
}
}
let limit = self.limits.max_color_attachment_bytes_per_sample;
let formats = color_targets
.iter()
.map(|cs| cs.as_ref().map(|cs| cs.format));
if let Err(total) = validate_color_attachment_bytes_per_sample(formats, limit) {
return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
command::ColorAttachmentError::TooManyBytesPerSample { total, limit },
));
}
if let Some(ds) = depth_stencil_state {
let error = loop {
let format_features = self.describe_format_features(adapter, ds.format)?;
@ -3302,7 +3366,10 @@ impl<A: HalApi> Device<A> {
strip_index_format: desc.primitive.strip_index_format,
vertex_steps,
late_sized_buffer_groups,
info: ResourceInfo::new(desc.label.borrow_or_default()),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.render_pipelines.clone()),
),
};
Ok(pipeline)
}
@ -3415,7 +3482,7 @@ impl<A: HalApi> Device<A> {
Ok(QuerySet {
raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }),
device: self.clone(),
info: ResourceInfo::new(""),
info: ResourceInfo::new("", Some(self.tracker_indices.query_sets.clone())),
desc: desc.map_label(|_| ()),
})
}

View file

@ -3,10 +3,17 @@ use wgt::Backend;
use crate::{
id::{Id, Marker},
Epoch, FastHashMap, Index,
Epoch, Index,
};
use std::{fmt::Debug, marker::PhantomData};
#[derive(Copy, Clone, Debug, PartialEq)]
enum IdSource {
External,
Allocated,
None,
}
/// A simple structure to allocate [`Id`] identifiers.
///
/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`]
@ -34,12 +41,15 @@ use std::{fmt::Debug, marker::PhantomData};
/// [`Backend`]: wgt::Backend;
/// [`alloc`]: IdentityManager::alloc
/// [`free`]: IdentityManager::free
#[derive(Debug, Default)]
#[derive(Debug)]
pub(super) struct IdentityValues {
free: Vec<(Index, Epoch)>,
//sorted by Index
used: FastHashMap<Epoch, Vec<Index>>,
next_index: Index,
count: usize,
// Sanity check: The allocation logic works under the assumption that we don't
// do a mix of allocating ids from here and providing ids manually for the same
// storage container.
id_source: IdSource,
}
impl IdentityValues {
@ -48,35 +58,41 @@ impl IdentityValues {
/// The backend is incorporated into the id, so that ids allocated with
/// different `backend` values are always distinct.
pub fn alloc<T: Marker>(&mut self, backend: Backend) -> Id<T> {
assert!(
self.id_source != IdSource::External,
"Mix of internally allocated and externally provided IDs"
);
self.id_source = IdSource::Allocated;
self.count += 1;
match self.free.pop() {
Some((index, epoch)) => Id::zip(index, epoch + 1, backend),
None => {
let index = self.next_index;
self.next_index += 1;
let epoch = 1;
let used = self.used.entry(epoch).or_insert_with(Default::default);
let index = if let Some(i) = used.iter().max_by_key(|v| *v) {
i + 1
} else {
0
};
used.push(index);
Id::zip(index, epoch, backend)
}
}
}
pub fn mark_as_used<T: Marker>(&mut self, id: Id<T>) -> Id<T> {
assert!(
self.id_source != IdSource::Allocated,
"Mix of internally allocated and externally provided IDs"
);
self.id_source = IdSource::External;
self.count += 1;
let (index, epoch, _backend) = id.unzip();
let used = self.used.entry(epoch).or_insert_with(Default::default);
used.push(index);
id
}
/// Free `id`. It will never be returned from `alloc` again.
pub fn release<T: Marker>(&mut self, id: Id<T>) {
let (index, epoch, _backend) = id.unzip();
self.free.push((index, epoch));
if let IdSource::Allocated = self.id_source {
let (index, epoch, _backend) = id.unzip();
self.free.push((index, epoch));
}
self.count -= 1;
}
@ -106,7 +122,12 @@ impl<T: Marker> IdentityManager<T> {
impl<T: Marker> IdentityManager<T> {
pub fn new() -> Self {
Self {
values: Mutex::new(IdentityValues::default()),
values: Mutex::new(IdentityValues {
free: Vec::new(),
next_index: 0,
count: 0,
id_source: IdSource::None,
}),
_phantom: PhantomData,
}
}
@ -115,15 +136,11 @@ impl<T: Marker> IdentityManager<T> {
#[test]
fn test_epoch_end_of_life() {
use crate::id;
let man = IdentityManager::<id::markers::Buffer>::new();
let forced_id = man.mark_as_used(id::BufferId::zip(0, 1, Backend::Empty));
assert_eq!(forced_id.unzip().0, 0);
let id1 = man.process(Backend::Empty);
assert_eq!(id1.unzip().0, 1);
assert_eq!(id1.unzip(), (0, 1, Backend::Empty));
man.free(id1);
let id2 = man.process(Backend::Empty);
// confirm that the epoch 1 is no longer re-used
assert_eq!(id2.unzip().0, 1);
assert_eq!(id2.unzip().1, 2);
assert_eq!(id2.unzip(), (0, 2, Backend::Empty));
}

View file

@ -198,7 +198,7 @@ impl<A: HalApi> Adapter<A> {
Self {
raw,
info: ResourceInfo::new("<Adapter>"),
info: ResourceInfo::new("<Adapter>", None),
}
}
@ -303,7 +303,7 @@ impl<A: HalApi> Adapter<A> {
let queue = Queue {
device: None,
raw: Some(hal_device.queue),
info: ResourceInfo::new("<Queue>"),
info: ResourceInfo::new("<Queue>", None),
};
return Ok((device, queue));
}
@ -521,7 +521,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
raw: hal_surface,
};
@ -542,7 +542,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@ -575,7 +575,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@ -604,7 +604,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@ -633,7 +633,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@ -1072,10 +1072,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
let (queue_id, _) = queue_fid.assign(queue);
let (queue_id, queue) = queue_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
device.queue_id.write().replace(queue_id);
device.set_queue(queue);
return (device_id, queue_id, None);
};
@ -1124,10 +1124,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
let (queue_id, _) = queues_fid.assign(queue);
let (queue_id, queue) = queues_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
device.queue_id.write().replace(queue_id);
device.set_queue(queue);
return (device_id, queue_id, None);
};

View file

@ -3,36 +3,8 @@
//! into other language-specific user-friendly libraries.
//!
//! ## Feature flags
// NOTE: feature docs. below should be kept in sync. with `Cargo.toml`!
#![doc = document_features::document_features!()]
//!
//! - **`api_log_info`** --- Log all API entry points at info instead of trace level.
//! - **`resource_log_info`** --- Log resource lifecycle management at info instead of trace level.
//! - **`link`** _(enabled by default)_ --- Use static linking for libraries. Disable to manually
//! link. Enabled by default.
//! - **`renderdoc`** --- Support the Renderdoc graphics debugger:
//! [https://renderdoc.org/](https://renderdoc.org/)
//! - **`strict_asserts`** --- Apply run-time checks, even in release builds. These are in addition
//! to the validation carried out at public APIs in all builds.
//! - **`serde`** --- Enables serialization via `serde` on common wgpu types.
//! - **`trace`** --- Enable API tracing.
//! - **`replay`** --- Enable API replaying
//! - **`wgsl`** --- Enable `ShaderModuleSource::Wgsl`
//! - **`fragile-send-sync-non-atomic-wasm`** --- Implement `Send` and `Sync` on Wasm, but only if
//! atomics are not enabled.
//!
//! WebGL/WebGPU objects can not be shared between threads. However, it can be useful to
//! artificially mark them as `Send` and `Sync` anyways to make it easier to write cross-platform
//! code. This is technically _very_ unsafe in a multithreaded environment, but on a wasm binary
//! compiled without atomics we know we are definitely not in a multithreaded environment.
//!
//! ### Backends, passed through to wgpu-hal
//!
//! - **`metal`** --- Enable the `metal` backend.
//! - **`vulkan`** --- Enable the `vulkan` backend.
//! - **`gles`** --- Enable the `GLES` backend.
//!
//! This is used for all of GLES, OpenGL, and WebGL.
//! - **`dx12`** --- Enable the `dx12` backend.
// When we have no backends, we end up with a lot of dead or otherwise unreachable code.
#![cfg_attr(

View file

@ -9,10 +9,7 @@ When this texture is presented, we remove it from the device tracker as well as
extract it from the hub.
!*/
use std::{
borrow::Borrow,
sync::atomic::{AtomicBool, Ordering},
};
use std::borrow::Borrow;
#[cfg(feature = "trace")]
use crate::device::trace::Action;
@ -213,7 +210,6 @@ impl Global {
inner: Snatchable::new(resource::TextureInner::Surface {
raw: Some(ast.texture),
parent_id: surface_id,
has_work: AtomicBool::new(false),
}),
device: device.clone(),
desc: texture_desc,
@ -224,7 +220,7 @@ impl Global {
layers: 0..1,
mips: 0..1,
},
info: ResourceInfo::new("<Surface>"),
info: ResourceInfo::new("<Surface>", None),
clear_mode: RwLock::new(resource::TextureClearMode::Surface {
clear_view: Some(clear_view),
}),
@ -240,7 +236,7 @@ impl Global {
let mut trackers = device.trackers.lock();
trackers
.textures
.insert_single(id, resource, hal::TextureUses::UNINITIALIZED);
.insert_single(resource, hal::TextureUses::UNINITIALIZED);
}
if present.acquired_texture.is_some() {
@ -298,8 +294,7 @@ impl Global {
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
let queue_id = device.queue_id.read().unwrap();
let queue = hub.queues.get(queue_id).unwrap();
let queue = device.get_queue().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -318,10 +313,13 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
device.trackers.lock().textures.remove(texture_id);
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
device
.trackers
.lock()
.textures
.remove(texture.info.tracker_index());
let mut exclusive_snatch_guard = device.snatchable_lock.write();
let suf = A::get_surface(&surface);
let mut inner = texture.inner_mut(&mut exclusive_snatch_guard);
@ -331,15 +329,10 @@ impl Global {
resource::TextureInner::Surface {
ref mut raw,
ref parent_id,
ref has_work,
} => {
if surface_id != *parent_id {
log::error!("Presented frame is from a different surface");
Err(hal::SurfaceError::Lost)
} else if !has_work.load(Ordering::Relaxed) {
log::error!("No work has been submitted for this frame");
unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
Err(hal::SurfaceError::Outdated)
} else {
unsafe {
queue
@ -413,18 +406,19 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
device.trackers.lock().textures.remove(texture_id);
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
device
.trackers
.lock()
.textures
.remove(texture.info.tracker_index());
let suf = A::get_surface(&surface);
let exclusive_snatch_guard = device.snatchable_lock.write();
match texture.inner.snatch(exclusive_snatch_guard).unwrap() {
resource::TextureInner::Surface {
mut raw,
parent_id,
has_work: _,
} => {
resource::TextureInner::Surface { mut raw, parent_id } => {
if surface_id == parent_id {
unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
} else {

View file

@ -60,7 +60,6 @@ impl<T: Resource> Registry<T> {
#[must_use]
pub(crate) struct FutureId<'a, T: Resource> {
id: Id<T::Marker>,
identity: Arc<IdentityManager<T::Marker>>,
data: &'a RwLock<Storage<T>>,
}
@ -75,7 +74,7 @@ impl<T: Resource> FutureId<'_, T> {
}
pub fn init(&self, mut value: T) -> Arc<T> {
value.as_info_mut().set_id(self.id, &self.identity);
value.as_info_mut().set_id(self.id);
Arc::new(value)
}
@ -117,7 +116,6 @@ impl<T: Resource> Registry<T> {
}
None => self.identity.process(self.backend),
},
identity: self.identity.clone(),
data: &self.storage,
}
}
@ -125,7 +123,6 @@ impl<T: Resource> Registry<T> {
pub(crate) fn request(&self) -> FutureId<T> {
FutureId {
id: self.identity.process(self.backend),
identity: self.identity.clone(),
data: &self.storage,
}
}
@ -142,11 +139,12 @@ impl<T: Resource> Registry<T> {
self.storage.write()
}
pub fn unregister_locked(&self, id: Id<T::Marker>, storage: &mut Storage<T>) -> Option<Arc<T>> {
self.identity.free(id);
storage.remove(id)
}
pub fn force_replace(&self, id: Id<T::Marker>, mut value: T) {
let mut storage = self.storage.write();
value.as_info_mut().set_id(id, &self.identity);
value.as_info_mut().set_id(id);
storage.force_replace(id, value)
}
pub fn force_replace_with_error(&self, id: Id<T::Marker>, label: &str) {
@ -155,6 +153,7 @@ impl<T: Resource> Registry<T> {
storage.insert_error(id, label);
}
pub(crate) fn unregister(&self, id: Id<T::Marker>) -> Option<Arc<T>> {
self.identity.free(id);
let value = self.storage.write().remove(id);
//Returning None is legal if it's an error ID
value

View file

@ -9,11 +9,10 @@ use crate::{
global::Global,
hal_api::HalApi,
id::{AdapterId, BufferId, DeviceId, Id, Marker, SurfaceId, TextureId},
identity::IdentityManager,
init_tracker::{BufferInitTracker, TextureInitTracker},
resource, resource_log,
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
track::TextureSelector,
track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
validation::MissingBufferUsageError,
Label, SubmissionIndex,
};
@ -31,7 +30,7 @@ use std::{
ops::Range,
ptr::NonNull,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
atomic::{AtomicUsize, Ordering},
Arc, Weak,
},
};
@ -58,7 +57,8 @@ use std::{
#[derive(Debug)]
pub struct ResourceInfo<T: Resource> {
id: Option<Id<T::Marker>>,
identity: Option<Arc<IdentityManager<T::Marker>>>,
tracker_index: TrackerIndex,
tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
/// The index of the last queue submission in which the resource
/// was used.
///
@ -74,19 +74,26 @@ pub struct ResourceInfo<T: Resource> {
impl<T: Resource> Drop for ResourceInfo<T> {
fn drop(&mut self) {
if let Some(identity) = self.identity.as_ref() {
let id = self.id.as_ref().unwrap();
identity.free(*id);
if let Some(indices) = &self.tracker_indices {
indices.free(self.tracker_index);
}
}
}
impl<T: Resource> ResourceInfo<T> {
#[allow(unused_variables)]
pub(crate) fn new(label: &str) -> Self {
pub(crate) fn new(
label: &str,
tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
) -> Self {
let tracker_index = tracker_indices
.as_ref()
.map(|indices| indices.alloc())
.unwrap_or(TrackerIndex::INVALID);
Self {
id: None,
identity: None,
tracker_index,
tracker_indices,
submission_index: AtomicUsize::new(0),
label: label.to_string(),
}
@ -111,9 +118,13 @@ impl<T: Resource> ResourceInfo<T> {
self.id.unwrap()
}
pub(crate) fn set_id(&mut self, id: Id<T::Marker>, identity: &Arc<IdentityManager<T::Marker>>) {
pub(crate) fn tracker_index(&self) -> TrackerIndex {
debug_assert!(self.tracker_index != TrackerIndex::INVALID);
self.tracker_index
}
pub(crate) fn set_id(&mut self, id: Id<T::Marker>) {
self.id = Some(id);
self.identity = Some(identity.clone());
}
/// Record that this resource will be used by the queue submission with the
@ -551,6 +562,7 @@ impl<A: HalApi> Buffer<A> {
device: Arc::clone(&self.device),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
tracker_index: self.info.tracker_index(),
label: self.info.label.clone(),
bind_groups,
}))
@ -611,6 +623,7 @@ pub struct DestroyedBuffer<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: BufferId,
pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
bind_groups: Vec<Weak<BindGroup<A>>>,
}
@ -717,7 +730,6 @@ pub(crate) enum TextureInner<A: HalApi> {
Surface {
raw: Option<A::SurfaceTexture>,
parent_id: SurfaceId,
has_work: AtomicBool,
},
}
@ -886,6 +898,7 @@ impl<A: HalApi> Texture<A> {
views,
bind_groups,
device: Arc::clone(&self.device),
tracker_index: self.info.tracker_index(),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
label: self.info.label.clone(),
@ -1003,6 +1016,7 @@ pub struct DestroyedTexture<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: TextureId,
pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
}

View file

@ -7,7 +7,7 @@
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
use super::{PendingTransition, ResourceTracker};
use super::{PendingTransition, ResourceTracker, TrackerIndex};
use crate::{
hal_api::HalApi,
id::BufferId,
@ -64,16 +64,16 @@ impl<A: HalApi> BufferBindGroupState<A> {
#[allow(clippy::pattern_type_mismatch)]
pub(crate) fn optimize(&self) {
let mut buffers = self.buffers.lock();
buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0);
buffers.sort_unstable_by_key(|(b, _)| b.as_info().tracker_index());
}
/// Returns a list of all buffers tracked. May contain duplicates.
#[allow(clippy::pattern_type_mismatch)]
pub fn used_ids(&self) -> impl Iterator<Item = BufferId> + '_ {
pub fn used_tracker_indices(&self) -> impl Iterator<Item = TrackerIndex> + '_ {
let buffers = self.buffers.lock();
buffers
.iter()
.map(|(ref b, _)| b.as_info().id())
.map(|(ref b, _)| b.as_info().tracker_index())
.collect::<Vec<_>>()
.into_iter()
}
@ -149,20 +149,6 @@ impl<A: HalApi> BufferUsageScope<A> {
resources.into_iter()
}
pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
let index = id.unzip().0 as usize;
if index > self.metadata.size() {
return None;
}
self.tracker_assert_in_bounds(index);
unsafe {
if self.metadata.contains_unchecked(index) {
return Some(self.metadata.get_resource_unchecked(index));
}
}
None
}
/// Merge the list of buffer states in the given bind group into this usage scope.
///
/// If any of the resulting states is invalid, stops the merge and returns a usage
@ -181,7 +167,7 @@ impl<A: HalApi> BufferUsageScope<A> {
) -> Result<(), UsageConflict> {
let buffers = bind_group.buffers.lock();
for &(ref resource, state) in &*buffers {
let index = resource.as_info().id().unzip().0 as usize;
let index = resource.as_info().tracker_index().as_usize();
unsafe {
insert_or_merge(
@ -255,7 +241,7 @@ impl<A: HalApi> BufferUsageScope<A> {
.get(id)
.map_err(|_| UsageConflict::BufferInvalid { id })?;
let index = id.unzip().0 as usize;
let index = buffer.info.tracker_index().as_usize();
self.allow_index(index);
@ -292,7 +278,7 @@ pub(crate) struct BufferTracker<A: HalApi> {
temp: Vec<PendingTransition<BufferUses>>,
}
impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
impl<A: HalApi> ResourceTracker for BufferTracker<A> {
/// Try to remove the buffer `id` from this tracker if it is otherwise unused.
///
/// A buffer is 'otherwise unused' when the only references to it are:
@ -313,8 +299,8 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
/// [`Device::trackers`]: crate::device::Device
/// [`self.metadata`]: BufferTracker::metadata
/// [`Hub::buffers`]: crate::hub::Hub::buffers
fn remove_abandoned(&mut self, id: BufferId) -> bool {
let index = id.unzip().0 as usize;
fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
let index = index.as_usize();
if index > self.metadata.size() {
return false;
@ -329,16 +315,10 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
log::trace!("Buffer {:?} is not tracked anymore", id,);
return true;
} else {
log::trace!(
"Buffer {:?} is still referenced from {}",
id,
existing_ref_count
);
return false;
}
return false;
}
}
true
@ -404,8 +384,8 @@ impl<A: HalApi> BufferTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_single(&mut self, id: BufferId, resource: Arc<Buffer<A>>, state: BufferUses) {
let index = id.unzip().0 as usize;
pub fn insert_single(&mut self, resource: Arc<Buffer<A>>, state: BufferUses) {
let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@ -440,7 +420,7 @@ impl<A: HalApi> BufferTracker<A> {
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_single(&mut self, buffer: &Arc<Buffer<A>>, state: BufferUses) -> SetSingleResult<A> {
let index: usize = buffer.as_info().id().unzip().0 as usize;
let index: usize = buffer.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -561,16 +541,15 @@ impl<A: HalApi> BufferTracker<A> {
pub unsafe fn set_and_remove_from_usage_scope_sparse(
&mut self,
scope: &mut BufferUsageScope<A>,
id_source: impl IntoIterator<Item = BufferId>,
index_source: impl IntoIterator<Item = TrackerIndex>,
) {
let incoming_size = scope.state.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
for id in id_source {
let (index32, _, _) = id.unzip();
let index = index32 as usize;
for index in index_source {
let index = index.as_usize();
scope.tracker_assert_in_bounds(index);
@ -599,8 +578,8 @@ impl<A: HalApi> BufferTracker<A> {
}
#[allow(dead_code)]
pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
let index = id.unzip().0 as usize;
pub fn get(&self, index: TrackerIndex) -> Option<&Arc<Buffer<A>>> {
let index = index.as_usize();
if index > self.metadata.size() {
return None;
}
@ -785,11 +764,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_buffer(
BufferId::zip(
index32,
unsafe { metadata_provider.get_epoch(index) },
A::VARIANT,
),
unsafe { metadata_provider.get_own(index).info.id() },
*current_state,
new_state,
));

View file

@ -1,6 +1,6 @@
//! The `ResourceMetadata` type.
use crate::{resource::Resource, Epoch};
use crate::resource::Resource;
use bit_vec::BitVec;
use std::{borrow::Cow, mem, sync::Arc};
use wgt::strict_assert;
@ -194,15 +194,6 @@ impl<T: Resource> ResourceMetadataProvider<'_, T> {
}
}
}
/// Get the epoch from this.
///
/// # Safety
///
/// - The index must be in bounds of the metadata tracker if this uses an indirect source.
#[inline(always)]
pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch {
unsafe { self.get_own(index).as_info().id().unzip().1 }
}
}
/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is.

View file

@ -102,16 +102,11 @@ mod stateless;
mod texture;
use crate::{
binding_model, command, conv,
hal_api::HalApi,
id::{self, Id},
pipeline, resource,
snatch::SnatchGuard,
storage::Storage,
binding_model, command, conv, hal_api::HalApi, id, pipeline, resource, snatch::SnatchGuard,
};
use parking_lot::RwLock;
use std::{fmt, ops};
use parking_lot::{Mutex, RwLock};
use std::{fmt, ops, sync::Arc};
use thiserror::Error;
pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope};
@ -122,6 +117,130 @@ pub(crate) use texture::{
};
use wgt::strict_assert_ne;
#[repr(transparent)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub(crate) struct TrackerIndex(u32);
impl TrackerIndex {
/// A dummy value to place in ResourceInfo for resources that are never tracked.
pub const INVALID: Self = TrackerIndex(u32::MAX);
pub fn as_usize(self) -> usize {
debug_assert!(self != Self::INVALID);
self.0 as usize
}
}
/// wgpu-core internally use some array-like storage for tracking resources.
/// To that end, there needs to be a uniquely assigned index for each live resource
/// of a certain type. This index is separate from the resource ID for various reasons:
/// - There can be multiple resource IDs pointing the the same resource.
/// - IDs of dead handles can be recycled while resources are internally held alive (and tracked).
/// - The plan is to remove IDs in the long run (https://github.com/gfx-rs/wgpu/issues/5121).
/// In order to produce these tracker indices, there is a shared TrackerIndexAllocator
/// per resource type. Indices have the same lifetime as the internal resource they
/// are associated to (alloc happens when creating the resource and free is called when
/// the resource is dropped).
struct TrackerIndexAllocator {
unused: Vec<TrackerIndex>,
next_index: TrackerIndex,
}
impl TrackerIndexAllocator {
pub fn new() -> Self {
TrackerIndexAllocator {
unused: Vec::new(),
next_index: TrackerIndex(0),
}
}
pub fn alloc(&mut self) -> TrackerIndex {
if let Some(index) = self.unused.pop() {
return index;
}
let index = self.next_index;
self.next_index.0 += 1;
index
}
pub fn free(&mut self, index: TrackerIndex) {
self.unused.push(index);
}
// This is used to pre-allocate the tracker storage.
pub fn size(&self) -> usize {
self.next_index.0 as usize
}
}
impl std::fmt::Debug for TrackerIndexAllocator {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
Ok(())
}
}
/// See TrackerIndexAllocator.
#[derive(Debug)]
pub(crate) struct SharedTrackerIndexAllocator {
inner: Mutex<TrackerIndexAllocator>,
}
impl SharedTrackerIndexAllocator {
pub fn new() -> Self {
SharedTrackerIndexAllocator {
inner: Mutex::new(TrackerIndexAllocator::new()),
}
}
pub fn alloc(&self) -> TrackerIndex {
self.inner.lock().alloc()
}
pub fn free(&self, index: TrackerIndex) {
self.inner.lock().free(index);
}
pub fn size(&self) -> usize {
self.inner.lock().size()
}
}
pub(crate) struct TrackerIndexAllocators {
pub buffers: Arc<SharedTrackerIndexAllocator>,
pub staging_buffers: Arc<SharedTrackerIndexAllocator>,
pub textures: Arc<SharedTrackerIndexAllocator>,
pub texture_views: Arc<SharedTrackerIndexAllocator>,
pub samplers: Arc<SharedTrackerIndexAllocator>,
pub bind_groups: Arc<SharedTrackerIndexAllocator>,
pub bind_group_layouts: Arc<SharedTrackerIndexAllocator>,
pub compute_pipelines: Arc<SharedTrackerIndexAllocator>,
pub render_pipelines: Arc<SharedTrackerIndexAllocator>,
pub pipeline_layouts: Arc<SharedTrackerIndexAllocator>,
pub bundles: Arc<SharedTrackerIndexAllocator>,
pub query_sets: Arc<SharedTrackerIndexAllocator>,
}
impl TrackerIndexAllocators {
pub fn new() -> Self {
TrackerIndexAllocators {
buffers: Arc::new(SharedTrackerIndexAllocator::new()),
staging_buffers: Arc::new(SharedTrackerIndexAllocator::new()),
textures: Arc::new(SharedTrackerIndexAllocator::new()),
texture_views: Arc::new(SharedTrackerIndexAllocator::new()),
samplers: Arc::new(SharedTrackerIndexAllocator::new()),
bind_groups: Arc::new(SharedTrackerIndexAllocator::new()),
bind_group_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
compute_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
render_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
pipeline_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
bundles: Arc::new(SharedTrackerIndexAllocator::new()),
query_sets: Arc::new(SharedTrackerIndexAllocator::new()),
}
}
}
/// A structure containing all the information about a particular resource
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
@ -359,31 +478,14 @@ pub(crate) struct RenderBundleScope<A: HalApi> {
impl<A: HalApi> RenderBundleScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
pub fn new(
buffers: &Storage<resource::Buffer<A>>,
textures: &Storage<resource::Texture<A>>,
bind_groups: &Storage<binding_model::BindGroup<A>>,
render_pipelines: &Storage<pipeline::RenderPipeline<A>>,
query_sets: &Storage<resource::QuerySet<A>>,
) -> Self {
let value = Self {
pub fn new() -> Self {
Self {
buffers: RwLock::new(BufferUsageScope::new()),
textures: RwLock::new(TextureUsageScope::new()),
bind_groups: RwLock::new(StatelessTracker::new()),
render_pipelines: RwLock::new(StatelessTracker::new()),
query_sets: RwLock::new(StatelessTracker::new()),
};
value.buffers.write().set_size(buffers.len());
value.textures.write().set_size(textures.len());
value.bind_groups.write().set_size(bind_groups.len());
value
.render_pipelines
.write()
.set_size(render_pipelines.len());
value.query_sets.write().set_size(query_sets.len());
value
}
}
/// Merge the inner contents of a bind group into the render bundle tracker.
@ -420,17 +522,14 @@ pub(crate) struct UsageScope<A: HalApi> {
impl<A: HalApi> UsageScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
pub fn new(
buffers: &Storage<resource::Buffer<A>>,
textures: &Storage<resource::Texture<A>>,
) -> Self {
pub fn new(tracker_indices: &TrackerIndexAllocators) -> Self {
let mut value = Self {
buffers: BufferUsageScope::new(),
textures: TextureUsageScope::new(),
};
value.buffers.set_size(buffers.len());
value.textures.set_size(textures.len());
value.buffers.set_size(tracker_indices.buffers.size());
value.textures.set_size(tracker_indices.textures.size());
value
}
@ -478,11 +577,8 @@ impl<A: HalApi> UsageScope<A> {
}
}
pub(crate) trait ResourceTracker<R>
where
R: resource::Resource,
{
fn remove_abandoned(&mut self, id: Id<R::Marker>) -> bool;
pub(crate) trait ResourceTracker {
fn remove_abandoned(&mut self, index: TrackerIndex) -> bool;
}
/// A full double sided tracker used by CommandBuffers and the Device.
@ -513,48 +609,6 @@ impl<A: HalApi> Tracker<A> {
}
}
/// Pull the maximum IDs from the hubs.
pub fn set_size(
&mut self,
buffers: Option<&Storage<resource::Buffer<A>>>,
textures: Option<&Storage<resource::Texture<A>>>,
views: Option<&Storage<resource::TextureView<A>>>,
samplers: Option<&Storage<resource::Sampler<A>>>,
bind_groups: Option<&Storage<binding_model::BindGroup<A>>>,
compute_pipelines: Option<&Storage<pipeline::ComputePipeline<A>>>,
render_pipelines: Option<&Storage<pipeline::RenderPipeline<A>>>,
bundles: Option<&Storage<command::RenderBundle<A>>>,
query_sets: Option<&Storage<resource::QuerySet<A>>>,
) {
if let Some(buffers) = buffers {
self.buffers.set_size(buffers.len());
};
if let Some(textures) = textures {
self.textures.set_size(textures.len());
};
if let Some(views) = views {
self.views.set_size(views.len());
};
if let Some(samplers) = samplers {
self.samplers.set_size(samplers.len());
};
if let Some(bind_groups) = bind_groups {
self.bind_groups.set_size(bind_groups.len());
};
if let Some(compute_pipelines) = compute_pipelines {
self.compute_pipelines.set_size(compute_pipelines.len());
}
if let Some(render_pipelines) = render_pipelines {
self.render_pipelines.set_size(render_pipelines.len());
};
if let Some(bundles) = bundles {
self.bundles.set_size(bundles.len());
};
if let Some(query_sets) = query_sets {
self.query_sets.set_size(query_sets.len());
};
}
/// Iterates through all resources in the given bind group and adopts
/// the state given for those resources in the UsageScope. It also
/// removes all touched resources from the usage scope.
@ -585,7 +639,7 @@ impl<A: HalApi> Tracker<A> {
unsafe {
self.buffers.set_and_remove_from_usage_scope_sparse(
&mut scope.buffers,
bind_group.buffers.used_ids(),
bind_group.buffers.used_tracker_indices(),
)
};
unsafe {

View file

@ -10,7 +10,7 @@ use parking_lot::Mutex;
use crate::{id::Id, resource::Resource, resource_log, storage::Storage, track::ResourceMetadata};
use super::ResourceTracker;
use super::{ResourceTracker, TrackerIndex};
/// Satisfy clippy.
type Pair<T> = (Id<<T as Resource>::Marker>, Arc<T>);
@ -74,7 +74,7 @@ pub(crate) struct StatelessTracker<T: Resource> {
metadata: ResourceMetadata<T>,
}
impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
impl<T: Resource> ResourceTracker for StatelessTracker<T> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@ -82,14 +82,14 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
fn remove_abandoned(&mut self, id: Id<T::Marker>) -> bool {
let index = id.unzip().0 as usize;
fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
let index = index.as_usize();
if index >= self.metadata.size() {
return false;
}
resource_log!("StatelessTracker::remove_abandoned {id:?}");
resource_log!("StatelessTracker::remove_abandoned {index:?}");
self.tracker_assert_in_bounds(index);
@ -100,17 +100,10 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
log::trace!("{} {:?} is not tracked anymore", T::TYPE, id,);
return true;
} else {
log::trace!(
"{} {:?} is still referenced from {}",
T::TYPE,
id,
existing_ref_count
);
return false;
}
return false;
}
}
true
@ -160,9 +153,8 @@ impl<T: Resource> StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_single(&mut self, id: Id<T::Marker>, resource: Arc<T>) {
let (index32, _epoch, _) = id.unzip();
let index = index32 as usize;
pub fn insert_single(&mut self, resource: Arc<T>) {
let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -184,8 +176,7 @@ impl<T: Resource> StatelessTracker<T> {
) -> Option<&'a Arc<T>> {
let resource = storage.get(id).ok()?;
let (index32, _epoch, _) = id.unzip();
let index = index32 as usize;
let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -221,18 +212,4 @@ impl<T: Resource> StatelessTracker<T> {
}
}
}
pub fn get(&self, id: Id<T::Marker>) -> Option<&Arc<T>> {
let index = id.unzip().0 as usize;
if index > self.metadata.size() {
return None;
}
self.tracker_assert_in_bounds(index);
unsafe {
if self.metadata.contains_unchecked(index) {
return Some(self.metadata.get_resource_unchecked(index));
}
}
None
}
}

View file

@ -19,10 +19,11 @@
* will treat the contents as junk.
!*/
use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker};
use super::{
range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker, TrackerIndex,
};
use crate::{
hal_api::HalApi,
id::TextureId,
resource::{Resource, Texture, TextureInner},
snatch::SnatchGuard,
track::{
@ -173,7 +174,7 @@ impl<A: HalApi> TextureBindGroupState<A> {
/// accesses will be in a constant ascending order.
pub(crate) fn optimize(&self) {
let mut textures = self.textures.lock();
textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0);
textures.sort_unstable_by_key(|v| v.texture.as_info().tracker_index());
}
/// Returns a list of all textures tracked. May contain duplicates.
@ -359,7 +360,7 @@ impl<A: HalApi> TextureUsageScope<A> {
selector: Option<TextureSelector>,
new_state: TextureUses,
) -> Result<(), UsageConflict> {
let index = texture.as_info().id().unzip().0 as usize;
let index = texture.as_info().tracker_index().as_usize();
self.tracker_assert_in_bounds(index);
@ -393,7 +394,7 @@ pub(crate) struct TextureTracker<A: HalApi> {
_phantom: PhantomData<A>,
}
impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
impl<A: HalApi> ResourceTracker for TextureTracker<A> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@ -401,10 +402,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
fn remove_abandoned(&mut self, id: TextureId) -> bool {
let index = id.unzip().0 as usize;
fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
let index = index.as_usize();
if index > self.metadata.size() {
if index >= self.metadata.size() {
return false;
}
@ -419,16 +420,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
self.start_set.complex.remove(&index);
self.end_set.complex.remove(&index);
self.metadata.remove(index);
log::trace!("Texture {:?} is not tracked anymore", id,);
return true;
} else {
log::trace!(
"Texture {:?} is still referenced from {}",
id,
existing_ref_count
);
return false;
}
return false;
}
}
true
@ -518,8 +513,8 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_single(&mut self, id: TextureId, resource: Arc<Texture<A>>, usage: TextureUses) {
let index = id.unzip().0 as usize;
pub fn insert_single(&mut self, resource: Arc<Texture<A>>, usage: TextureUses) {
let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@ -560,7 +555,7 @@ impl<A: HalApi> TextureTracker<A> {
selector: TextureSelector,
new_state: TextureUses,
) -> Option<Drain<'_, PendingTransition<TextureUses>>> {
let index = texture.as_info().id().unzip().0 as usize;
let index = texture.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -694,7 +689,7 @@ impl<A: HalApi> TextureTracker<A> {
let textures = bind_group_state.textures.lock();
for t in textures.iter() {
let index = t.texture.as_info().id().unzip().0 as usize;
let index = t.texture.as_info().tracker_index().as_usize();
scope.tracker_assert_in_bounds(index);
if unsafe { !scope.metadata.contains_unchecked(index) } {
@ -727,10 +722,10 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
pub fn remove(&mut self, id: TextureId) -> bool {
let index = id.unzip().0 as usize;
pub fn remove(&mut self, index: TrackerIndex) -> bool {
let index = index.as_usize();
if index > self.metadata.size() {
if index >= self.metadata.size() {
return false;
}
@ -1080,11 +1075,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
TextureId::zip(
index as _,
unsafe { metadata_provider.get_epoch(index) },
A::VARIANT,
),
unsafe { metadata_provider.get_own(index).info.id() },
texture_selector.clone(),
*current_simple,
new_simple,
@ -1111,11 +1102,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
TextureId::zip(
index as _,
unsafe { metadata_provider.get_epoch(index) },
A::VARIANT,
),
unsafe { metadata_provider.get_own(index).info.id() },
selector,
*current_simple,
new_state,
@ -1156,11 +1143,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
TextureId::zip(
index as _,
unsafe { metadata_provider.get_epoch(index) },
A::VARIANT,
),
unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
@ -1201,11 +1184,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
TextureId::zip(
index as _,
unsafe { metadata_provider.get_epoch(index) },
A::VARIANT,
),
unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),

View file

@ -1,4 +1,8 @@
use crate::{device::bgl, FastHashMap, FastHashSet};
use crate::{
device::bgl,
id::{markers::Buffer, Id},
FastHashMap, FastHashSet,
};
use arrayvec::ArrayVec;
use std::{collections::hash_map::Entry, fmt};
use thiserror::Error;
@ -134,8 +138,11 @@ pub struct Interface {
}
#[derive(Clone, Debug, Error)]
#[error("Buffer usage is {actual:?} which does not contain required usage {expected:?}")]
#[error(
"Usage flags {actual:?} for buffer {id:?} do not contain required usage flags {expected:?}"
)]
pub struct MissingBufferUsageError {
pub(crate) id: Id<Buffer>,
pub(crate) actual: wgt::BufferUsages,
pub(crate) expected: wgt::BufferUsages,
}
@ -143,11 +150,16 @@ pub struct MissingBufferUsageError {
/// Checks that the given buffer usage contains the required buffer usage,
/// returns an error otherwise.
pub fn check_buffer_usage(
id: Id<Buffer>,
actual: wgt::BufferUsages,
expected: wgt::BufferUsages,
) -> Result<(), MissingBufferUsageError> {
if !actual.contains(expected) {
Err(MissingBufferUsageError { actual, expected })
Err(MissingBufferUsageError {
id,
actual,
expected,
})
} else {
Ok(())
}
@ -892,9 +904,15 @@ impl Interface {
class,
},
naga::TypeInner::Sampler { comparison } => ResourceType::Sampler { comparison },
naga::TypeInner::Array { stride, .. } => ResourceType::Buffer {
size: wgt::BufferSize::new(stride as u64).unwrap(),
},
naga::TypeInner::Array { stride, size, .. } => {
let size = match size {
naga::ArraySize::Constant(size) => size.get() * stride,
naga::ArraySize::Dynamic => stride,
};
ResourceType::Buffer {
size: wgt::BufferSize::new(size as u64).unwrap(),
}
}
ref other => ResourceType::Buffer {
size: wgt::BufferSize::new(other.size(module.to_ctx()) as u64).unwrap(),
},
@ -1246,3 +1264,29 @@ impl Interface {
.map(|ep| ep.dual_source_blending)
}
}
// https://gpuweb.github.io/gpuweb/#abstract-opdef-calculating-color-attachment-bytes-per-sample
pub fn validate_color_attachment_bytes_per_sample(
attachment_formats: impl Iterator<Item = Option<wgt::TextureFormat>>,
limit: u32,
) -> Result<(), u32> {
let mut total_bytes_per_sample = 0;
for format in attachment_formats {
let Some(format) = format else { continue; };
let byte_cost = format.target_pixel_byte_cost().unwrap();
let alignment = format.target_component_alignment().unwrap();
let rem = total_bytes_per_sample % alignment;
if rem != 0 {
total_bytes_per_sample += alignment - rem;
}
total_bytes_per_sample += byte_cost;
}
if total_bytes_per_sample > limit {
return Err(total_bytes_per_sample);
}
Ok(())
}

File diff suppressed because one or more lines are too long

View file

@ -95,6 +95,7 @@ cfg_aliases = "0.1"
[features]
default = ["link"]
device_lost_panic = []
dx12 = [
"naga/hlsl-out",
"d3d12",
@ -117,11 +118,13 @@ gles = [
"khronos-egl",
"libloading",
]
internal_error_panic = []
link = ["metal/link"]
metal = [
"naga/msl-out",
"block",
]
oom_panic = []
renderdoc = [
"libloading",
"renderdoc-sys",

View file

@ -21,8 +21,26 @@ impl HResult<()> for i32 {
Err(Cow::Borrowed(description))
}
fn into_device_result(self, description: &str) -> Result<(), crate::DeviceError> {
#![allow(unreachable_code)]
self.into_result().map_err(|err| {
log::error!("{} failed: {}", description, err);
match self {
winerror::E_OUTOFMEMORY => {
#[cfg(feature = "oom_panic")]
panic!("{description} failed: Out of memory");
}
winerror::DXGI_ERROR_DEVICE_RESET | winerror::DXGI_ERROR_DEVICE_REMOVED => {
#[cfg(feature = "device_lost_panic")]
panic!("{description} failed: Device lost ({err})");
}
_ => {
#[cfg(feature = "internal_error_panic")]
panic!("{description} failed: {err}");
}
}
if self == winerror::E_OUTOFMEMORY {
crate::DeviceError::OutOfMemory
} else {

View file

@ -242,6 +242,7 @@ impl super::Adapter {
| wgt::Features::POLYGON_MODE_LINE
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| wgt::Features::TIMESTAMP_QUERY
| wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS
| wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES
| wgt::Features::TEXTURE_COMPRESSION_BC
| wgt::Features::CLEAR_TEXTURE
@ -307,6 +308,12 @@ impl super::Adapter {
downlevel.flags -=
wgt::DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW;
// See https://learn.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels#feature-level-support
let max_color_attachments = 8;
// TODO: determine this programmatically if possible.
// https://github.com/gpuweb/gpuweb/issues/2965#issuecomment-1361315447
let max_color_attachment_bytes_per_sample = 64;
Some(crate::ExposedAdapter {
adapter: super::Adapter {
raw: adapter,
@ -377,6 +384,8 @@ impl super::Adapter {
d3d12_ty::D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT,
min_storage_buffer_offset_alignment: 4,
max_inter_stage_shader_components: base.max_inter_stage_shader_components,
max_color_attachments,
max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: base.max_compute_workgroup_storage_size, //TODO?
max_compute_invocations_per_workgroup:
d3d12_ty::D3D12_CS_4_X_THREAD_GROUP_MAX_THREADS_PER_GROUP,

View file

@ -56,6 +56,13 @@ impl super::Temp {
}
}
impl Drop for super::CommandEncoder {
fn drop(&mut self) {
use crate::CommandEncoder;
unsafe { self.discard_encoding() }
}
}
impl super::CommandEncoder {
unsafe fn begin_pass(&mut self, kind: super::PassKind, label: crate::Label) {
let list = self.list.as_ref().unwrap();

View file

@ -663,11 +663,7 @@ impl crate::Device<super::Api> for super::Device {
end_of_pass_timer_query: None,
})
}
unsafe fn destroy_command_encoder(&self, encoder: super::CommandEncoder) {
if let Some(list) = encoder.list {
list.close();
}
}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
unsafe fn create_bind_group_layout(
&self,

View file

@ -472,6 +472,7 @@ impl super::Adapter {
features.set(wgt::Features::SHADER_UNUSED_VERTEX_OUTPUT, true);
if extensions.contains("GL_ARB_timer_query") {
features.set(wgt::Features::TIMESTAMP_QUERY, true);
features.set(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS, true);
features.set(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES, true);
}
let gl_bcn_exts = [
@ -652,6 +653,15 @@ impl super::Adapter {
0
};
let max_color_attachments = unsafe {
gl.get_parameter_i32(glow::MAX_COLOR_ATTACHMENTS)
.min(gl.get_parameter_i32(glow::MAX_DRAW_BUFFERS))
.min(crate::MAX_COLOR_ATTACHMENTS as i32) as u32
};
// TODO: programmatically determine this.
let max_color_attachment_bytes_per_sample = 32;
let limits = wgt::Limits {
max_texture_dimension_1d: max_texture_size,
max_texture_dimension_2d: max_texture_size,
@ -722,6 +732,8 @@ impl super::Adapter {
max_inter_stage_shader_components: unsafe {
gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS)
} as u32,
max_color_attachments,
max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: if supports_work_group_params {
(unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) } as u32)
} else {

View file

@ -93,6 +93,13 @@ impl super::CommandBuffer {
}
}
impl Drop for super::CommandEncoder {
fn drop(&mut self) {
use crate::CommandEncoder;
unsafe { self.discard_encoding() }
}
}
impl super::CommandEncoder {
fn rebind_stencil_func(&mut self) {
fn make(s: &super::StencilSide, face: u32) -> C {

View file

@ -1194,13 +1194,16 @@ impl crate::Device<super::Api> for super::Device {
let sampler = desc.samplers[entry.resource_index as usize];
super::RawBinding::Sampler(sampler.raw)
}
wgt::BindingType::Texture { .. } => {
wgt::BindingType::Texture { view_dimension, .. } => {
let view = desc.textures[entry.resource_index as usize].view;
if view.array_layers.start != 0 {
log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
"This is an implementation problem of wgpu-hal/gles backend.")
}
let (raw, target) = view.inner.as_native();
super::Texture::log_failing_target_heuristics(view_dimension, target);
super::RawBinding::Texture {
raw,
target,

View file

@ -161,7 +161,7 @@ impl Drop for DisplayOwner {
fn open_x_display() -> Option<DisplayOwner> {
log::debug!("Loading X11 library to get the current display");
unsafe {
let library = libloading::Library::new("libX11.so").ok()?;
let library = find_library(&["libX11.so.6", "libX11.so"])?;
let func: libloading::Symbol<XOpenDisplayFun> = library.get(b"XOpenDisplay").unwrap();
let result = func(ptr::null());
ptr::NonNull::new(result).map(|ptr| DisplayOwner {

View file

@ -366,6 +366,8 @@ impl Texture {
/// Returns the `target`, whether the image is 3d and whether the image is a cubemap.
fn get_info_from_desc(desc: &TextureDescriptor) -> u32 {
match desc.dimension {
// WebGL (1 and 2) as well as some GLES versions do not have 1D textures, so we are
// doing `TEXTURE_2D` instead
wgt::TextureDimension::D1 => glow::TEXTURE_2D,
wgt::TextureDimension::D2 => {
// HACK: detect a cube map; forces cube compatible textures to be cube textures
@ -379,6 +381,43 @@ impl Texture {
wgt::TextureDimension::D3 => glow::TEXTURE_3D,
}
}
/// More information can be found in issues #1614 and #1574
fn log_failing_target_heuristics(view_dimension: wgt::TextureViewDimension, target: u32) {
let expected_target = match view_dimension {
wgt::TextureViewDimension::D1 => glow::TEXTURE_2D,
wgt::TextureViewDimension::D2 => glow::TEXTURE_2D,
wgt::TextureViewDimension::D2Array => glow::TEXTURE_2D_ARRAY,
wgt::TextureViewDimension::Cube => glow::TEXTURE_CUBE_MAP,
wgt::TextureViewDimension::CubeArray => glow::TEXTURE_CUBE_MAP_ARRAY,
wgt::TextureViewDimension::D3 => glow::TEXTURE_3D,
};
if expected_target == target {
return;
}
let buffer;
let got = match target {
glow::TEXTURE_2D => "D2",
glow::TEXTURE_2D_ARRAY => "D2Array",
glow::TEXTURE_CUBE_MAP => "Cube",
glow::TEXTURE_CUBE_MAP_ARRAY => "CubeArray",
glow::TEXTURE_3D => "D3",
target => {
buffer = target.to_string();
&buffer
}
};
log::error!(
"wgpu-hal heuristics assumed that the view dimension will be equal to `{got}` rather than `{view_dimension:?}`.\n{}\n{}\n{}\n{}",
"`D2` textures with `depth_or_array_layers == 1` are assumed to have view dimension `D2`",
"`D2` textures with `depth_or_array_layers > 1` are assumed to have view dimension `D2Array`",
"`D2` textures with `depth_or_array_layers == 6` are assumed to have view dimension `Cube`",
"`D2` textures with `depth_or_array_layers > 6 && depth_or_array_layers % 6 == 0` are assumed to have view dimension `CubeArray`",
);
}
}
#[derive(Clone, Debug)]

View file

@ -329,6 +329,9 @@ pub trait Device<A: Api>: WasmNotSendSync {
unsafe fn create_sampler(&self, desc: &SamplerDescriptor) -> Result<A::Sampler, DeviceError>;
unsafe fn destroy_sampler(&self, sampler: A::Sampler);
/// Create a fresh [`CommandEncoder`].
///
/// The new `CommandEncoder` is in the "closed" state.
unsafe fn create_command_encoder(
&self,
desc: &CommandEncoderDescriptor<A>,
@ -429,19 +432,95 @@ pub trait Queue<A: Api>: WasmNotSendSync {
unsafe fn get_timestamp_period(&self) -> f32;
}
/// Encoder for commands in command buffers.
/// Serves as a parent for all the encoded command buffers.
/// Works in bursts of action: one or more command buffers are recorded,
/// then submitted to a queue, and then it needs to be `reset_all()`.
/// Encoder and allocation pool for `CommandBuffer`.
///
/// The life cycle of a `CommandBuffer` is as follows:
///
/// - Call [`Device::create_command_encoder`] to create a new
/// `CommandEncoder`, in the "closed" state.
///
/// - Call `begin_encoding` on a closed `CommandEncoder` to begin
/// recording commands. This puts the `CommandEncoder` in the
/// "recording" state.
///
/// - Call methods like `copy_buffer_to_buffer`, `begin_render_pass`,
/// etc. on a "recording" `CommandEncoder` to add commands to the
/// list.
///
/// - Call `end_encoding` on a recording `CommandEncoder` to close the
/// encoder and construct a fresh `CommandBuffer` consisting of the
/// list of commands recorded up to that point.
///
/// - Call `discard_encoding` on a recording `CommandEncoder` to drop
/// the commands recorded thus far and close the encoder.
///
/// - Call `reset_all` on a closed `CommandEncoder`, passing all the
/// live `CommandBuffers` built from it. All the `CommandBuffer`s
/// are destroyed, and their resources are freed.
///
/// # Safety
///
/// - The `CommandEncoder` must be in the states described above to
/// make the given calls.
///
/// - A `CommandBuffer` that has been submitted for execution on the
/// GPU must live until its execution is complete.
///
/// - A `CommandBuffer` must not outlive the `CommandEncoder` that
/// built it.
///
/// - A `CommandEncoder` must not outlive its `Device`.
pub trait CommandEncoder<A: Api>: WasmNotSendSync + fmt::Debug {
/// Begin encoding a new command buffer.
///
/// This puts this `CommandEncoder` in the "recording" state.
///
/// # Safety
///
/// This `CommandEncoder` must be in the "closed" state.
unsafe fn begin_encoding(&mut self, label: Label) -> Result<(), DeviceError>;
/// Discard currently recorded list, if any.
/// Discard the command list under construction, if any.
///
/// This puts this `CommandEncoder` in the "closed" state.
///
/// # Safety
///
/// This `CommandEncoder` must be in the "recording" state.
unsafe fn discard_encoding(&mut self);
/// Return a fresh [`CommandBuffer`] holding the recorded commands.
///
/// The returned [`CommandBuffer`] holds all the commands recorded
/// on this `CommandEncoder` since the last call to
/// [`begin_encoding`].
///
/// This puts this `CommandEncoder` in the "closed" state.
///
/// # Safety
///
/// This `CommandEncoder` must be in the "recording" state.
///
/// The returned [`CommandBuffer`] must not outlive this
/// `CommandEncoder`. Implementations are allowed to build
/// `CommandBuffer`s that depend on storage owned by this
/// `CommandEncoder`.
///
/// [`CommandBuffer`]: Api::CommandBuffer
/// [`begin_encoding`]: CommandEncoder::begin_encoding
unsafe fn end_encoding(&mut self) -> Result<A::CommandBuffer, DeviceError>;
/// Reclaims all resources that are allocated for this encoder.
/// Must get all of the produced command buffers back,
/// and they must not be used by GPU at this moment.
/// Reclaim all resources belonging to this `CommandEncoder`.
///
/// # Safety
///
/// This `CommandEncoder` must be in the "closed" state.
///
/// The `command_buffers` iterator must produce all the live
/// [`CommandBuffer`]s built using this `CommandEncoder` --- that
/// is, every extant `CommandBuffer` returned from `end_encoding`.
///
/// [`CommandBuffer`]: Api::CommandBuffer
unsafe fn reset_all<I>(&mut self, command_buffers: I)
where
I: Iterator<Item = A::CommandBuffer>;

View file

@ -731,6 +731,12 @@ impl super::PrivateCapabilities {
} else {
4
},
// Per https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
max_color_attachment_bytes_per_sample: if device.supports_family(MTLGPUFamily::Apple4) {
64
} else {
32
},
max_varying_components: if device
.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1)
{
@ -833,7 +839,7 @@ impl super::PrivateCapabilities {
self.indirect_draw_dispatch,
);
features.set(
F::TIMESTAMP_QUERY,
F::TIMESTAMP_QUERY | F::TIMESTAMP_QUERY_INSIDE_ENCODERS,
self.timestamp_query_support
.contains(TimestampQuerySupport::STAGE_BOUNDARIES),
);
@ -940,6 +946,10 @@ impl super::PrivateCapabilities {
min_uniform_buffer_offset_alignment: self.buffer_alignment as u32,
min_storage_buffer_offset_alignment: self.buffer_alignment as u32,
max_inter_stage_shader_components: self.max_varying_components,
max_color_attachments: (self.max_color_render_targets as u32)
.min(crate::MAX_COLOR_ATTACHMENTS as u32),
max_color_attachment_bytes_per_sample: self.max_color_attachment_bytes_per_sample
as u32,
max_compute_workgroup_storage_size: self.max_total_threadgroup_memory,
max_compute_invocations_per_workgroup: self.max_threads_per_group,
max_compute_workgroup_size_x: self.max_threads_per_group,

View file

@ -248,6 +248,7 @@ struct PrivateCapabilities {
max_texture_layers: u64,
max_fragment_input_components: u64,
max_color_render_targets: u8,
max_color_attachment_bytes_per_sample: u8,
max_varying_components: u32,
max_threads_per_group: u32,
max_total_threadgroup_memory: u32,

View file

@ -369,6 +369,7 @@ impl PhysicalDeviceFeatures {
| F::ADDRESS_MODE_CLAMP_TO_BORDER
| F::ADDRESS_MODE_CLAMP_TO_ZERO
| F::TIMESTAMP_QUERY
| F::TIMESTAMP_QUERY_INSIDE_ENCODERS
| F::TIMESTAMP_QUERY_INSIDE_PASSES
| F::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| F::CLEAR_TEXTURE;
@ -827,6 +828,11 @@ impl PhysicalDeviceCapabilities {
u64::MAX
};
// TODO: programmatically determine this, if possible. It's unclear whether we can
// as of https://github.com/gpuweb/gpuweb/issues/2965#issuecomment-1361315447.
// We could increase the limit when we aren't on a tiled GPU.
let max_color_attachment_bytes_per_sample = 32;
wgt::Limits {
max_texture_dimension_1d: limits.max_image_dimension1_d,
max_texture_dimension_2d: limits.max_image_dimension2_d,
@ -862,6 +868,10 @@ impl PhysicalDeviceCapabilities {
max_inter_stage_shader_components: limits
.max_vertex_output_components
.min(limits.max_fragment_input_components),
max_color_attachments: limits
.max_color_attachments
.min(crate::MAX_COLOR_ATTACHMENTS as u32),
max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: limits.max_compute_shared_memory_size,
max_compute_invocations_per_workgroup: limits.max_compute_work_group_invocations,
max_compute_workgroup_size_x: max_compute_workgroup_sizes[0],

View file

@ -6,6 +6,7 @@ use std::{
thread,
};
use arrayvec::ArrayVec;
use ash::{
extensions::{ext, khr},
vk,
@ -211,6 +212,22 @@ impl super::Instance {
&self.shared
}
fn enumerate_instance_extension_properties(
entry: &ash::Entry,
layer_name: Option<&CStr>,
) -> Result<Vec<vk::ExtensionProperties>, crate::InstanceError> {
let instance_extensions = {
profiling::scope!("vkEnumerateInstanceExtensionProperties");
entry.enumerate_instance_extension_properties(layer_name)
};
instance_extensions.map_err(|e| {
crate::InstanceError::with_source(
String::from("enumerate_instance_extension_properties() failed"),
e,
)
})
}
/// Return the instance extension names wgpu would like to enable.
///
/// Return a vector of the names of instance extensions actually available
@ -229,16 +246,7 @@ impl super::Instance {
_instance_api_version: u32,
flags: wgt::InstanceFlags,
) -> Result<Vec<&'static CStr>, crate::InstanceError> {
let instance_extensions = {
profiling::scope!("vkEnumerateInstanceExtensionProperties");
entry.enumerate_instance_extension_properties(None)
};
let instance_extensions = instance_extensions.map_err(|e| {
crate::InstanceError::with_source(
String::from("enumerate_instance_extension_properties() failed"),
e,
)
})?;
let instance_extensions = Self::enumerate_instance_extension_properties(entry, None)?;
// Check our extensions against the available extensions
let mut extensions: Vec<&'static CStr> = Vec::new();
@ -643,6 +651,31 @@ impl crate::Instance<super::Api> for super::Instance {
.find(|inst_layer| cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(name))
}
let validation_layer_name =
CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap();
let validation_layer_properties = find_layer(&instance_layers, validation_layer_name);
// Determine if VK_EXT_validation_features is available, so we can enable
// GPU assisted validation and synchronization validation.
let validation_features_are_enabled = if validation_layer_properties.is_some() {
// Get the all the instance extension properties.
let exts =
Self::enumerate_instance_extension_properties(&entry, Some(validation_layer_name))?;
// Convert all the names of the extensions into an iterator of CStrs.
let mut ext_names = exts
.iter()
.filter_map(|ext| cstr_from_bytes_until_nul(&ext.extension_name));
// Find the validation features extension.
ext_names.any(|ext_name| ext_name == vk::ExtValidationFeaturesFn::name())
} else {
false
};
let should_enable_gpu_based_validation = desc
.flags
.intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION)
&& validation_features_are_enabled;
let nv_optimus_layer = CStr::from_bytes_with_nul(b"VK_LAYER_NV_optimus\0").unwrap();
let has_nv_optimus = find_layer(&instance_layers, nv_optimus_layer).is_some();
@ -653,10 +686,10 @@ impl crate::Instance<super::Api> for super::Instance {
// Request validation layer if asked.
let mut debug_utils = None;
if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) {
let validation_layer_name =
CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap();
if let Some(layer_properties) = find_layer(&instance_layers, validation_layer_name) {
if desc.flags.intersects(wgt::InstanceFlags::VALIDATION)
|| should_enable_gpu_based_validation
{
if let Some(layer_properties) = validation_layer_properties {
layers.push(validation_layer_name);
if extensions.contains(&ext::DebugUtils::name()) {
@ -756,6 +789,28 @@ impl crate::Instance<super::Api> for super::Instance {
create_info = create_info.push_next(vk_create_info);
}
// Enable explicit validation features if available
let mut validation_features;
let mut validation_feature_list: ArrayVec<_, 3>;
if validation_features_are_enabled {
validation_feature_list = ArrayVec::new();
// Always enable synchronization validation
validation_feature_list
.push(vk::ValidationFeatureEnableEXT::SYNCHRONIZATION_VALIDATION);
// Only enable GPU assisted validation if requested.
if should_enable_gpu_based_validation {
validation_feature_list.push(vk::ValidationFeatureEnableEXT::GPU_ASSISTED);
validation_feature_list
.push(vk::ValidationFeatureEnableEXT::GPU_ASSISTED_RESERVE_BINDING_SLOT);
}
validation_features = vk::ValidationFeaturesEXT::builder()
.enabled_validation_features(&validation_feature_list);
create_info = create_info.push_next(&mut validation_features);
}
unsafe {
profiling::scope!("vkCreateInstance");
entry.create_instance(&create_info, None)

View file

@ -724,13 +724,25 @@ impl crate::Queue<Api> for Queue {
impl From<vk::Result> for crate::DeviceError {
fn from(result: vk::Result) -> Self {
#![allow(unreachable_code)]
match result {
vk::Result::ERROR_OUT_OF_HOST_MEMORY | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
#[cfg(feature = "oom_panic")]
panic!("Out of memory ({result:?})");
Self::OutOfMemory
}
vk::Result::ERROR_DEVICE_LOST => Self::Lost,
vk::Result::ERROR_DEVICE_LOST => {
#[cfg(feature = "device_lost_panic")]
panic!("Device lost");
Self::Lost
}
_ => {
log::warn!("Unrecognized device error {:?}", result);
#[cfg(feature = "internal_error_panic")]
panic!("Internal error: {result:?}");
log::warn!("Unrecognized device error {result:?}");
Self::Lost
}
}

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"6b0d7ddecc26e3b72cb6d47793770203147f851f048da8d1f5d8f508e40d4f82","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"33e1cf343a848c5deecbac6949d5a1378a70da0a48b2120fc62d600ce98a2da2","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}
{"files":{"Cargo.toml":"6b0d7ddecc26e3b72cb6d47793770203147f851f048da8d1f5d8f508e40d4f82","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"7e6e5bf95d87689caf5b6aea92ab9ed889728301fac787437d137bd4f8bbaa60","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}

View file

@ -267,15 +267,74 @@ bitflags::bitflags! {
///
/// This is a web and native feature.
const DEPTH_CLIP_CONTROL = 1 << 0;
/// Allows for explicit creation of textures of format [`TextureFormat::Depth32FloatStencil8`]
///
/// Supported platforms:
/// - Vulkan (mostly)
/// - DX12
/// - Metal
///
/// This is a web and native feature.
const DEPTH32FLOAT_STENCIL8 = 1 << 1;
/// Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
/// with 8 or 16 bytes per block.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for BCn formats.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - desktops
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_BC = 1 << 2;
/// Enables ETC family of compressed textures. All ETC textures use 4x4 pixel blocks.
/// ETC2 RGB and RGBA1 are 8 bytes per block. RTC2 RGBA8 and EAC are 16 bytes per block.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ETC2 formats.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - Vulkan on Intel
/// - Mobile (some)
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_ETC2 = 1 << 3;
/// Enables ASTC family of compressed textures. ASTC textures use pixel blocks varying from 4x4 to 12x12.
/// Blocks are always 16 bytes.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ASTC formats with Unorm/UnormSrgb channel type.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - Vulkan on Intel
/// - Mobile (some)
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_ASTC = 1 << 4;
/// Enables use of Timestamp Queries. These queries tell the current gpu timestamp when
/// all work before the query is finished.
///
/// This feature allows the use of
/// - [`CommandEncoder::write_timestamp`]
/// - [`RenderPassDescriptor::timestamp_writes`]
/// - [`ComputePassDescriptor::timestamp_writes`]
/// to write out timestamps.
/// For timestamps within passes refer to [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`]
///
/// For arbitrary timestamp write commands on encoders refer to [`Features::TIMESTAMP_QUERY_INSIDE_ENCODERS`].
/// For arbitrary timestamp write commands on passes refer to [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`].
///
/// They must be resolved using [`CommandEncoder::resolve_query_sets`] into a buffer,
/// then the result must be multiplied by the timestamp period [`Queue::get_timestamp_period`]
@ -288,7 +347,8 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a web and native feature.
const TIMESTAMP_QUERY = 1 << 1;
const TIMESTAMP_QUERY = 1 << 5;
/// Allows non-zero value for the `first_instance` member in indirect draw calls.
///
/// If this feature is not enabled, and the `first_instance` member is non-zero, the behavior may be:
@ -306,11 +366,7 @@ bitflags::bitflags! {
/// - OpenGL ES / WebGL
///
/// This is a web and native feature.
const INDIRECT_FIRST_INSTANCE = 1 << 2;
// 3..8 available
// Shader:
const INDIRECT_FIRST_INSTANCE = 1 << 6;
/// Allows shaders to acquire the FP16 ability
///
@ -321,18 +377,18 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a web and native feature.
const SHADER_F16 = 1 << 8;
const SHADER_F16 = 1 << 7;
// 9..14 available
// Texture Formats:
// The features starting with a ? are features that might become part of the spec or
// at the very least we can implement as native features; since they should cover all
// possible formats and capabilities across backends.
//
// ? const FORMATS_TIER_1 = 1 << 14; (https://github.com/gpuweb/gpuweb/issues/3837)
// ? const RW_STORAGE_TEXTURE_TIER_1 = 1 << 15; (https://github.com/gpuweb/gpuweb/issues/3838)
/// Allows for usage of textures of format [`TextureFormat::Rg11b10Float`] as a render target
///
/// Supported platforms:
/// - Vulkan
/// - DX12
/// - Metal
///
/// This is a web and native feature.
const RG11B10UFLOAT_RENDERABLE = 1 << 8;
/// Allows the [`wgpu::TextureUsages::STORAGE_BINDING`] usage on textures with format [`TextureFormat::Bgra8unorm`]
///
@ -342,10 +398,8 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a web and native feature.
const BGRA8UNORM_STORAGE = 1 << 16;
const BGRA8UNORM_STORAGE = 1 << 9;
// ? const NORM16_FILTERABLE = 1 << 17; (https://github.com/gpuweb/gpuweb/issues/3839)
// ? const NORM16_RESOLVE = 1 << 18; (https://github.com/gpuweb/gpuweb/issues/3839)
/// Allows textures with formats "r32float", "rg32float", and "rgba32float" to be filterable.
///
@ -356,81 +410,11 @@ bitflags::bitflags! {
/// - GL with one of `GL_ARB_color_buffer_float`/`GL_EXT_color_buffer_float`/`OES_texture_float_linear`
///
/// This is a web and native feature.
const FLOAT32_FILTERABLE = 1 << 19;
const FLOAT32_FILTERABLE = 1 << 10;
// ? const FLOAT32_BLENDABLE = 1 << 20; (https://github.com/gpuweb/gpuweb/issues/3556)
// ? const 32BIT_FORMAT_MULTISAMPLE = 1 << 21; (https://github.com/gpuweb/gpuweb/issues/3844)
// ? const 32BIT_FORMAT_RESOLVE = 1 << 22; (https://github.com/gpuweb/gpuweb/issues/3844)
/// Allows for usage of textures of format [`TextureFormat::Rg11b10Float`] as a render target
///
/// Supported platforms:
/// - Vulkan
/// - DX12
/// - Metal
///
/// This is a web and native feature.
const RG11B10UFLOAT_RENDERABLE = 1 << 23;
/// Allows for explicit creation of textures of format [`TextureFormat::Depth32FloatStencil8`]
///
/// Supported platforms:
/// - Vulkan (mostly)
/// - DX12
/// - Metal
///
/// This is a web and native feature.
const DEPTH32FLOAT_STENCIL8 = 1 << 24;
/// Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
/// with 8 or 16 bytes per block.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for BCn formats.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - desktops
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_BC = 1 << 25;
/// Enables ETC family of compressed textures. All ETC textures use 4x4 pixel blocks.
/// ETC2 RGB and RGBA1 are 8 bytes per block. RTC2 RGBA8 and EAC are 16 bytes per block.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ETC2 formats.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - Vulkan on Intel
/// - Mobile (some)
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_ETC2 = 1 << 26;
/// Enables ASTC family of compressed textures. ASTC textures use pixel blocks varying from 4x4 to 12x12.
/// Blocks are always 16 bytes.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
/// bandwidth usage.
///
/// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ASTC formats with Unorm/UnormSrgb channel type.
/// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
///
/// Supported Platforms:
/// - Vulkan on Intel
/// - Mobile (some)
///
/// This is a web and native feature.
const TEXTURE_COMPRESSION_ASTC = 1 << 27;
// ? const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 28; (https://github.com/gpuweb/gpuweb/issues/3856)
// 29..32 should be available but are for now occupied by native only texture related features
// TEXTURE_FORMAT_16BIT_NORM & TEXTURE_COMPRESSION_ASTC_HDR will most likely become web features as well
// TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES might not be necessary if we have all the texture features implemented
// Bits 11-19 available for webgpu features. Should you chose to use some of them for
// for native features, don't forget to update `all_webgpu_mask` and `all_native_mask`
// accordingly.
//
// ---- Restart Numbering for Native Features ---
@ -438,6 +422,21 @@ bitflags::bitflags! {
// Native Features:
//
// The features starting with a ? are features that might become part of the spec or
// at the very least we can implement as native features; since they should cover all
// possible formats and capabilities across backends.
//
// ? const FORMATS_TIER_1 = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3837)
// ? const RW_STORAGE_TEXTURE_TIER_1 = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3838)
// ? const NORM16_FILTERABLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3839)
// ? const NORM16_RESOLVE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3839)
// ? const FLOAT32_BLENDABLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3556)
// ? const 32BIT_FORMAT_MULTISAMPLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3844)
// ? const 32BIT_FORMAT_RESOLVE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3844)
// ? const TEXTURE_COMPRESSION_ASTC_HDR = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3856)
// TEXTURE_FORMAT_16BIT_NORM & TEXTURE_COMPRESSION_ASTC_HDR will most likely become web features as well
// TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES might not be necessary if we have all the texture features implemented
// Texture Formats:
/// Enables normalized `16-bit` texture formats.
@ -448,7 +447,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
const TEXTURE_FORMAT_16BIT_NORM = 1 << 29;
const TEXTURE_FORMAT_16BIT_NORM = 1 << 20;
/// Enables ASTC HDR family of compressed textures.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
@ -463,7 +462,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 30;
const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 21;
/// Enables device specific texture format features.
///
/// See `TextureFormatFeatures` for a listing of the features in question.
@ -475,7 +474,7 @@ bitflags::bitflags! {
/// This extension does not enable additional formats.
///
/// This is a native only feature.
const TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES = 1 << 31;
const TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES = 1 << 22;
// API:
@ -491,11 +490,25 @@ bitflags::bitflags! {
/// - DX12
///
/// This is a native only feature with a [proposal](https://github.com/gpuweb/gpuweb/blob/0008bd30da2366af88180b511a5d0d0c1dffbc36/proposals/pipeline-statistics-query.md) for the web.
const PIPELINE_STATISTICS_QUERY = 1 << 32;
/// Allows for timestamp queries inside render passes.
const PIPELINE_STATISTICS_QUERY = 1 << 23;
/// Allows for timestamp queries directly on command encoders.
///
/// Implies [`Features::TIMESTAMP_QUERY`] is supported.
///
/// Additionally allows for timestamp writes on command encoders
/// using [`CommandEncoder::write_timestamp`].
///
/// Supported platforms:
/// - Vulkan
/// - DX12
/// - Metal
///
/// This is a native only feature.
const TIMESTAMP_QUERY_INSIDE_ENCODERS = 1 << 24;
/// Allows for timestamp queries directly on command encoders.
///
/// Implies [`Features::TIMESTAMP_QUERY`] & [`Features::TIMESTAMP_QUERY_INSIDE_ENCODERS`] is supported.
///
/// Additionally allows for timestamp queries to be used inside render & compute passes using:
/// - [`RenderPassEncoder::write_timestamp`]
/// - [`ComputePassEncoder::write_timestamp`]
@ -508,7 +521,7 @@ bitflags::bitflags! {
/// This is generally not available on tile-based rasterization GPUs.
///
/// This is a native only feature with a [proposal](https://github.com/gpuweb/gpuweb/blob/0008bd30da2366af88180b511a5d0d0c1dffbc36/proposals/timestamp-query-inside-passes.md) for the web.
const TIMESTAMP_QUERY_INSIDE_PASSES = 1 << 33;
const TIMESTAMP_QUERY_INSIDE_PASSES = 1 << 25;
/// Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
/// COPY_DST and COPY_SRC respectively. This removes this requirement.
///
@ -522,7 +535,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
const MAPPABLE_PRIMARY_BUFFERS = 1 << 34;
const MAPPABLE_PRIMARY_BUFFERS = 1 << 26;
/// Allows the user to create uniform arrays of textures in shaders:
///
/// ex.
@ -545,7 +558,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const TEXTURE_BINDING_ARRAY = 1 << 35;
const TEXTURE_BINDING_ARRAY = 1 << 27;
/// Allows the user to create arrays of buffers in shaders:
///
/// ex.
@ -567,7 +580,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const BUFFER_BINDING_ARRAY = 1 << 36;
const BUFFER_BINDING_ARRAY = 1 << 28;
/// Allows the user to create uniform arrays of storage buffers or textures in shaders,
/// if resp. [`Features::BUFFER_BINDING_ARRAY`] or [`Features::TEXTURE_BINDING_ARRAY`]
/// is supported.
@ -580,7 +593,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const STORAGE_RESOURCE_BINDING_ARRAY = 1 << 37;
const STORAGE_RESOURCE_BINDING_ARRAY = 1 << 29;
/// Allows shaders to index sampled texture and storage buffer resource arrays with dynamically non-uniform values:
///
/// ex. `texture_array[vertex_data]`
@ -605,7 +618,7 @@ bitflags::bitflags! {
/// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderSampledImageArrayNonUniformIndexing & shaderStorageBufferArrayNonUniformIndexing feature)
///
/// This is a native only feature.
const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 1 << 38;
const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 1 << 30;
/// Allows shaders to index uniform buffer and storage texture resource arrays with dynamically non-uniform values:
///
/// ex. `texture_array[vertex_data]`
@ -630,11 +643,11 @@ bitflags::bitflags! {
/// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderUniformBufferArrayNonUniformIndexing & shaderStorageTextureArrayNonUniformIndexing feature)
///
/// This is a native only feature.
const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 1 << 39;
const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 1 << 31;
/// Allows the user to create bind groups containing arrays with less bindings than the BindGroupLayout.
///
/// This is a native only feature.
const PARTIALLY_BOUND_BINDING_ARRAY = 1 << 40;
const PARTIALLY_BOUND_BINDING_ARRAY = 1 << 32;
/// Allows the user to call [`RenderPass::multi_draw_indirect`] and [`RenderPass::multi_draw_indexed_indirect`].
///
/// Allows multiple indirect calls to be dispatched from a single buffer.
@ -648,7 +661,7 @@ bitflags::bitflags! {
///
/// [`RenderPass::multi_draw_indirect`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indirect
/// [`RenderPass::multi_draw_indexed_indirect`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indexed_indirect
const MULTI_DRAW_INDIRECT = 1 << 41;
const MULTI_DRAW_INDIRECT = 1 << 33;
/// Allows the user to call [`RenderPass::multi_draw_indirect_count`] and [`RenderPass::multi_draw_indexed_indirect_count`].
///
/// This allows the use of a buffer containing the actual number of draw calls.
@ -661,7 +674,7 @@ bitflags::bitflags! {
///
/// [`RenderPass::multi_draw_indirect_count`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indirect_count
/// [`RenderPass::multi_draw_indexed_indirect_count`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indexed_indirect_count
const MULTI_DRAW_INDIRECT_COUNT = 1 << 42;
const MULTI_DRAW_INDIRECT_COUNT = 1 << 34;
/// Allows the use of push constants: small, fast bits of memory that can be updated
/// inside a [`RenderPass`].
///
@ -681,7 +694,7 @@ bitflags::bitflags! {
/// [`RenderPass`]: ../wgpu/struct.RenderPass.html
/// [`PipelineLayoutDescriptor`]: ../wgpu/struct.PipelineLayoutDescriptor.html
/// [`RenderPass::set_push_constants`]: ../wgpu/struct.RenderPass.html#method.set_push_constants
const PUSH_CONSTANTS = 1 << 43;
const PUSH_CONSTANTS = 1 << 35;
/// Allows the use of [`AddressMode::ClampToBorder`] with a border color
/// of [`SamplerBorderColor::Zero`].
///
@ -692,7 +705,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
const ADDRESS_MODE_CLAMP_TO_ZERO = 1 << 44;
const ADDRESS_MODE_CLAMP_TO_ZERO = 1 << 36;
/// Allows the use of [`AddressMode::ClampToBorder`] with a border color
/// other than [`SamplerBorderColor::Zero`].
///
@ -703,7 +716,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
const ADDRESS_MODE_CLAMP_TO_BORDER = 1 << 45;
const ADDRESS_MODE_CLAMP_TO_BORDER = 1 << 37;
/// Allows the user to set [`PolygonMode::Line`] in [`PrimitiveState::polygon_mode`]
///
/// This allows drawing polygons/triangles as lines (wireframe) instead of filled
@ -714,7 +727,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
const POLYGON_MODE_LINE = 1 << 46;
const POLYGON_MODE_LINE = 1 << 38;
/// Allows the user to set [`PolygonMode::Point`] in [`PrimitiveState::polygon_mode`]
///
/// This allows only drawing the vertices of polygons/triangles instead of filled
@ -723,7 +736,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const POLYGON_MODE_POINT = 1 << 47;
const POLYGON_MODE_POINT = 1 << 39;
/// Allows the user to set a overestimation-conservative-rasterization in [`PrimitiveState::conservative`]
///
/// Processing of degenerate triangles/lines is hardware specific.
@ -733,7 +746,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const CONSERVATIVE_RASTERIZATION = 1 << 48;
const CONSERVATIVE_RASTERIZATION = 1 << 40;
/// Enables bindings of writable storage buffers and textures visible to vertex shaders.
///
/// Note: some (tiled-based) platforms do not support vertex shaders with any side-effects.
@ -742,14 +755,14 @@ bitflags::bitflags! {
/// - All
///
/// This is a native only feature.
const VERTEX_WRITABLE_STORAGE = 1 << 49;
const VERTEX_WRITABLE_STORAGE = 1 << 41;
/// Enables clear to zero for textures.
///
/// Supported platforms:
/// - All
///
/// This is a native only feature.
const CLEAR_TEXTURE = 1 << 50;
const CLEAR_TEXTURE = 1 << 42;
/// Enables creating shader modules from SPIR-V binary data (unsafe).
///
/// SPIR-V data is not parsed or interpreted in any way; you can use
@ -761,7 +774,7 @@ bitflags::bitflags! {
/// Vulkan implementation.
///
/// This is a native only feature.
const SPIRV_SHADER_PASSTHROUGH = 1 << 51;
const SPIRV_SHADER_PASSTHROUGH = 1 << 43;
/// Enables multiview render passes and `builtin(view_index)` in vertex shaders.
///
/// Supported platforms:
@ -769,7 +782,7 @@ bitflags::bitflags! {
/// - OpenGL (web only)
///
/// This is a native only feature.
const MULTIVIEW = 1 << 52;
const MULTIVIEW = 1 << 44;
/// Enables using 64-bit types for vertex attributes.
///
/// Requires SHADER_FLOAT64.
@ -777,7 +790,7 @@ bitflags::bitflags! {
/// Supported Platforms: N/A
///
/// This is a native only feature.
const VERTEX_ATTRIBUTE_64BIT = 1 << 53;
const VERTEX_ATTRIBUTE_64BIT = 1 << 45;
/// Allows vertex shaders to have outputs which are not consumed
/// by the fragment shader.
///
@ -785,7 +798,7 @@ bitflags::bitflags! {
/// - Vulkan
/// - Metal
/// - OpenGL
const SHADER_UNUSED_VERTEX_OUTPUT = 1 << 54;
const SHADER_UNUSED_VERTEX_OUTPUT = 1 << 46;
/// Allows for creation of textures of format [`TextureFormat::NV12`]
///
/// Supported platforms:
@ -793,16 +806,14 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const TEXTURE_FORMAT_NV12 = 1 << 55;
const TEXTURE_FORMAT_NV12 = 1 << 47;
/// Allows for the creation of ray-tracing acceleration structures.
///
/// Supported platforms:
/// - Vulkan
///
/// This is a native-only feature.
const RAY_TRACING_ACCELERATION_STRUCTURE = 1 << 56;
// 57 available
const RAY_TRACING_ACCELERATION_STRUCTURE = 1 << 48;
// Shader:
@ -812,7 +823,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native-only feature.
const RAY_QUERY = 1 << 58;
const RAY_QUERY = 1 << 49;
/// Enables 64-bit floating point types in SPIR-V shaders.
///
/// Note: even when supported by GPU hardware, 64-bit floating point operations are
@ -822,14 +833,14 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
const SHADER_F64 = 1 << 59;
const SHADER_F64 = 1 << 50;
/// Allows shaders to use i16. Not currently supported in `naga`, only available through `spirv-passthrough`.
///
/// Supported platforms:
/// - Vulkan
///
/// This is a native only feature.
const SHADER_I16 = 1 << 60;
const SHADER_I16 = 1 << 51;
/// Enables `builtin(primitive_index)` in fragment shaders.
///
/// Note: enables geometry processing for pipelines using the builtin.
@ -843,14 +854,14 @@ bitflags::bitflags! {
/// - OpenGL (some)
///
/// This is a native only feature.
const SHADER_PRIMITIVE_INDEX = 1 << 61;
const SHADER_PRIMITIVE_INDEX = 1 << 52;
/// Allows shaders to use the `early_depth_test` attribute.
///
/// Supported platforms:
/// - GLES 3.1+
///
/// This is a native only feature.
const SHADER_EARLY_DEPTH_TEST = 1 << 62;
const SHADER_EARLY_DEPTH_TEST = 1 << 53;
/// Allows two outputs from a shader to be used for blending.
/// Note that dual-source blending doesn't support multiple render targets.
///
@ -861,7 +872,7 @@ bitflags::bitflags! {
/// - Metal (with MSL 1.2+)
/// - Vulkan (with dualSrcBlend)
/// - DX12
const DUAL_SOURCE_BLENDING = 1 << 63;
const DUAL_SOURCE_BLENDING = 1 << 54;
}
}
@ -870,12 +881,12 @@ impl_bitflags!(Features);
impl Features {
/// Mask of all features which are part of the upstream WebGPU standard.
pub const fn all_webgpu_mask() -> Self {
Self::from_bits_truncate(0x0000_0000_0000_FFFF)
Self::from_bits_truncate(0xFFFFF)
}
/// Mask of all features that are only available when targeting native (not web).
pub const fn all_native_mask() -> Self {
Self::from_bits_truncate(0xFFFF_FFFF_FFFF_0000)
Self::from_bits_truncate(!Self::all_webgpu_mask().bits())
}
}
@ -904,13 +915,15 @@ bitflags::bitflags! {
/// This mainly applies to a Vulkan driver's compliance version. If the major compliance version
/// is `0`, then the driver is ignored. This flag allows that driver to be enabled for testing.
const ALLOW_UNDERLYING_NONCOMPLIANT_ADAPTER = 1 << 3;
/// Enable GPU-based validation. Currently, this only changes behavior on the DX12
/// backend.
/// Enable GPU-based validation. Implies [`Self::VALIDATION`]. Currently, this only changes
/// behavior on the DX12 and Vulkan backends.
///
/// Supported platforms:
///
/// - D3D12; called ["GPU-based validation", or
/// "GBV"](https://web.archive.org/web/20230206120404/https://learn.microsoft.com/en-us/windows/win32/direct3d12/using-d3d12-debug-layer-gpu-based-validation)
/// - Vulkan, via the `VK_LAYER_KHRONOS_validation` layer; called ["GPU-Assisted
/// Validation"](https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/e45aeb85079e0835694cb8f03e6681fd18ae72c9/docs/gpu_validation.md#gpu-assisted-validation)
const GPU_BASED_VALIDATION = 1 << 4;
}
}
@ -924,7 +937,12 @@ impl Default for InstanceFlags {
impl InstanceFlags {
/// Enable recommended debugging and validation flags.
pub fn debugging() -> Self {
InstanceFlags::DEBUG | InstanceFlags::VALIDATION | InstanceFlags::GPU_BASED_VALIDATION
InstanceFlags::DEBUG | InstanceFlags::VALIDATION
}
/// Enable advanced debugging and validation flags (potentially very slow).
pub fn advanced_debugging() -> Self {
Self::debugging() | InstanceFlags::GPU_BASED_VALIDATION
}
/// Infer good defaults from the build type
@ -1078,6 +1096,11 @@ pub struct Limits {
/// inter-stage communication (vertex outputs to fragment inputs). Defaults to 60.
/// Higher is "better".
pub max_inter_stage_shader_components: u32,
/// The maximum allowed number of color attachments.
pub max_color_attachments: u32,
/// The maximum number of bytes necessary to hold one sample (pixel or subpixel) of render
/// pipeline output data, across all color attachments.
pub max_color_attachment_bytes_per_sample: u32,
/// Maximum number of bytes used for workgroup memory in a compute entry point. Defaults to
/// 16352. Higher is "better".
pub max_compute_workgroup_storage_size: u32,
@ -1139,6 +1162,8 @@ impl Default for Limits {
min_uniform_buffer_offset_alignment: 256,
min_storage_buffer_offset_alignment: 256,
max_inter_stage_shader_components: 60,
max_color_attachments: 8,
max_color_attachment_bytes_per_sample: 32,
max_compute_workgroup_storage_size: 16384,
max_compute_invocations_per_workgroup: 256,
max_compute_workgroup_size_x: 256,
@ -1180,6 +1205,8 @@ impl Limits {
/// min_uniform_buffer_offset_alignment: 256,
/// min_storage_buffer_offset_alignment: 256,
/// max_inter_stage_shader_components: 60,
/// max_color_attachments: 8,
/// max_color_attachment_bytes_per_sample: 32,
/// max_compute_workgroup_storage_size: 16352,
/// max_compute_invocations_per_workgroup: 256,
/// max_compute_workgroup_size_x: 256,
@ -1214,6 +1241,8 @@ impl Limits {
min_uniform_buffer_offset_alignment: 256,
min_storage_buffer_offset_alignment: 256,
max_inter_stage_shader_components: 60,
max_color_attachments: 8,
max_color_attachment_bytes_per_sample: 32,
max_compute_workgroup_storage_size: 16352,
max_compute_invocations_per_workgroup: 256,
max_compute_workgroup_size_x: 256,
@ -1254,6 +1283,8 @@ impl Limits {
/// min_uniform_buffer_offset_alignment: 256,
/// min_storage_buffer_offset_alignment: 256,
/// max_inter_stage_shader_components: 31,
/// max_color_attachments: 8,
/// max_color_attachment_bytes_per_sample: 32,
/// max_compute_workgroup_storage_size: 0, // +
/// max_compute_invocations_per_workgroup: 0, // +
/// max_compute_workgroup_size_x: 0, // +
@ -3522,6 +3553,87 @@ impl TextureFormat {
}
}
/// The number of bytes occupied per pixel in a color attachment
/// <https://gpuweb.github.io/gpuweb/#render-target-pixel-byte-cost>
pub fn target_pixel_byte_cost(&self) -> Option<u32> {
match *self {
Self::R8Unorm | Self::R8Uint | Self::R8Sint => Some(1),
Self::Rg8Unorm
| Self::Rg8Uint
| Self::Rg8Sint
| Self::R16Uint
| Self::R16Sint
| Self::R16Float => Some(2),
Self::Rgba8Uint
| Self::Rgba8Sint
| Self::Rg16Uint
| Self::Rg16Sint
| Self::Rg16Float
| Self::R32Uint
| Self::R32Sint
| Self::R32Float => Some(4),
Self::Rgba8Unorm
| Self::Rgba8UnormSrgb
| Self::Bgra8Unorm
| Self::Bgra8UnormSrgb
| Self::Rgba16Uint
| Self::Rgba16Sint
| Self::Rgba16Float
| Self::Rg32Uint
| Self::Rg32Sint
| Self::Rg32Float
| Self::Rgb10a2Uint
| Self::Rgb10a2Unorm
| Self::Rg11b10Float => Some(8),
Self::Rgba32Uint | Self::Rgba32Sint | Self::Rgba32Float => Some(16),
Self::Rgba8Snorm | Self::Rg8Snorm | Self::R8Snorm => None,
_ => None,
}
}
/// See <https://gpuweb.github.io/gpuweb/#render-target-component-alignment>
pub fn target_component_alignment(&self) -> Option<u32> {
match self {
Self::R8Unorm
| Self::R8Snorm
| Self::R8Uint
| Self::R8Sint
| Self::Rg8Unorm
| Self::Rg8Snorm
| Self::Rg8Uint
| Self::Rg8Sint
| Self::Rgba8Unorm
| Self::Rgba8UnormSrgb
| Self::Rgba8Snorm
| Self::Rgba8Uint
| Self::Rgba8Sint
| Self::Bgra8Unorm
| Self::Bgra8UnormSrgb => Some(1),
Self::R16Uint
| Self::R16Sint
| Self::R16Float
| Self::Rg16Uint
| Self::Rg16Sint
| Self::Rg16Float
| Self::Rgba16Uint
| Self::Rgba16Sint
| Self::Rgba16Float => Some(2),
Self::R32Uint
| Self::R32Sint
| Self::R32Float
| Self::Rg32Uint
| Self::Rg32Sint
| Self::Rg32Float
| Self::Rgba32Uint
| Self::Rgba32Sint
| Self::Rgba32Float
| Self::Rgb10a2Uint
| Self::Rgb10a2Unorm
| Self::Rg11b10Float => Some(4),
_ => None,
}
}
/// Returns the number of components this format has.
pub fn components(&self) -> u8 {
self.components_with_aspect(TextureAspect::All)