Bug 1884879 - [css-syntax] Implement recent syntax spec change to avoid parsing custom properties that look like selectors. r=firefox-style-system-reviewers,zrhoffman,supply-chain-reviewers,glandium

This implements the:

> If the first two non-<whitespace-token> values of rule’s prelude are
> an <ident-token> whose value starts with "--" followed by a
> <colon-token>, then...

From https://drafts.csswg.org/css-syntax/#consume-qualified-rule

Differential Revision: https://phabricator.services.mozilla.com/D207796
This commit is contained in:
Emilio Cobos Álvarez 2024-04-30 23:49:44 +00:00
parent dd51a800c4
commit 13f6ad5ef7
32 changed files with 287 additions and 363 deletions

View file

@ -105,11 +105,6 @@ git = "https://github.com/seanmonstar/warp"
rev = "9d081461ae1167eb321585ce424f4fef6cf0092b"
replace-with = "vendored-sources"
[source."git+https://github.com/servo/rust-cssparser?rev=aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0"]
git = "https://github.com/servo/rust-cssparser"
rev = "aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0"
replace-with = "vendored-sources"
[source."git+https://github.com/servo/unicode-bidi?rev=ca612daf1c08c53abe07327cb3e6ef6e0a760f0c"]
git = "https://github.com/servo/unicode-bidi"
rev = "ca612daf1c08c53abe07327cb3e6ef6e0a760f0c"

8
Cargo.lock generated
View file

@ -1105,8 +1105,9 @@ dependencies = [
[[package]]
name = "cssparser"
version = "0.33.0"
source = "git+https://github.com/servo/rust-cssparser?rev=aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0#aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0"
version = "0.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7c66d1cd8ed61bf80b38432613a7a2f09401ab8d0501110655f8b341484a3e3"
dependencies = [
"cssparser-macros",
"dtoa-short",
@ -1118,7 +1119,8 @@ dependencies = [
[[package]]
name = "cssparser-macros"
version = "0.6.1"
source = "git+https://github.com/servo/rust-cssparser?rev=aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0#aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331"
dependencies = [
"quote",
"syn",

View file

@ -188,8 +188,6 @@ moz_asserts = { path = "mozglue/static/rust/moz_asserts" }
rure = { path = "third_party/rust/rure" }
# To-be-published changes.
cssparser = { git = "https://github.com/servo/rust-cssparser", rev = "aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0" }
cssparser-macros = { git = "https://github.com/servo/rust-cssparser", rev = "aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0" }
unicode-bidi = { git = "https://github.com/servo/unicode-bidi", rev = "ca612daf1c08c53abe07327cb3e6ef6e0a760f0c" }
# Other overrides

View file

@ -31,7 +31,7 @@ accountable-refcell = { version = "0.2.0", optional = true }
app_units = "0.7"
content-security-policy = { version = "0.4.0", features = ["serde"], optional = true }
crossbeam-channel = { version = "0.4", optional = true }
cssparser = "0.33"
cssparser = "0.34"
dom = { path = "../../../dom/base/rust" }
euclid = "0.22"
hyper = { version = "0.12", optional = true }

View file

@ -19,7 +19,7 @@ bench = []
[dependencies]
bitflags = "2"
cssparser = "0.33"
cssparser = "0.34"
derive_more = { version = "0.99", default-features = false, features = ["add", "add_assign"] }
fxhash = "0.2"
log = "0.4"

View file

@ -30,7 +30,7 @@ arrayvec = "0.7"
atomic_refcell = "0.1"
bitflags = "2"
byteorder = "1.0"
cssparser = "0.33"
cssparser = "0.34"
derive_more = { version = "0.99", default-features = false, features = ["add", "add_assign", "deref", "deref_mut", "from"] }
dom = { path = "../../../dom/base/rust" }
new_debug_unreachable = "1.0"

View file

@ -25,7 +25,6 @@ use cssparser::{
color::{clamp_floor_256_f32, clamp_unit_f32, parse_hash_color, PredefinedColorSpace, OPAQUE},
match_ignore_ascii_case, CowRcStr, Parser, Token,
};
use std::str::FromStr;
use style_traits::{ParseError, StyleParseErrorKind};
/// Returns true if the relative color syntax pref is enabled.
@ -411,14 +410,7 @@ fn parse_color_with_color_space<'i, 't>(
component_parser: &ComponentParser<'_, '_>,
arguments: &mut Parser<'i, 't>,
) -> Result<ColorFunction, ParseError<'i>> {
let color_space = {
let location = arguments.current_source_location();
let ident = arguments.expect_ident()?;
PredefinedColorSpace::from_str(ident)
.map_err(|_| location.new_unexpected_token_error(Token::Ident(ident.clone())))?
};
let color_space = PredefinedColorSpace::parse(arguments)?;
let component_parser = ComponentParser {
context: component_parser.context,
origin_color: component_parser

View file

@ -16,7 +16,7 @@ gecko = ["nsstring"]
[dependencies]
app_units = "0.7"
bitflags = "2"
cssparser = "0.33"
cssparser = "0.34"
euclid = "0.22"
lazy_static = "1"
malloc_size_of = { path = "../malloc_size_of" }

View file

@ -14,7 +14,7 @@ servo = ["cssparser/serde", "string_cache"]
gecko = []
[dependencies]
cssparser = "0.33"
cssparser = "0.34"
servo_arc = { path = "../servo_arc" }
smallbitvec = "2.1.1"
smallvec = "1.0"

View file

@ -15,7 +15,7 @@ gecko_refcount_logging = ["style/gecko_refcount_logging", "servo_arc/gecko_refco
[dependencies]
atomic_refcell = "0.1"
bincode = "1.0"
cssparser = "0.33"
cssparser = "0.34"
cstr = "0.2"
dom = { path = "../../../dom/base/rust" }
gecko-profiler = { path = "../../../tools/profiler/rust-api" }

View file

@ -12,7 +12,7 @@ doctest = false
[dependencies]
byteorder = "1.0"
app_units = "0.7"
cssparser = "0.33"
cssparser = "0.34"
euclid = "0.22"
html5ever = "0.22"
parking_lot = "0.10"

View file

@ -1191,6 +1191,12 @@ criteria = "safe-to-deploy"
delta = "0.33.0 -> 0.33.0@git:aaa966d9d6ae70c4b8a62bb5e3a14c068bb7dff0"
notes = "Only one minimal change exposing a previously-private enumeration."
[[audits.cssparser]]
who = "Emilio Cobos Álvarez <emilio@crisal.io>"
criteria = "safe-to-deploy"
delta = "0.33.0 -> 0.34.0"
notes = "I'm the publisher of the crate, and either myself or other Mozilla folks have been authors or reviewers of all the changes."
[[audits.cssparser-color]]
who = "Emilio Cobos Álvarez <emilio@crisal.io>"
criteria = "safe-to-deploy"

View file

@ -43,14 +43,6 @@ notes = "This is a pinned version of the upstream code, presumably to get a fix
audit-as-crates-io = true
notes = "This is upstream plus a warning fix from bug 1823866."
[policy.cssparser]
audit-as-crates-io = true
notes = "Upstream release plus a couple unpublished changes"
[policy.cssparser-macros]
audit-as-crates-io = true
notes = "Upstream release plus a couple unpublished changes"
[policy.d3d12]
audit-as-crates-io = true
notes = "Part of the wgpu repository, pinned as the rest of wgpu crates."

View file

@ -1,12 +0,0 @@
[custom-property-rule-ambiguity.html]
[Rule that looks like a custom property declaration is ignored]
expected: FAIL
[Rule that looks like an invalid custom property declaration is ignored]
expected: FAIL
[Nested rule that looks like a custom property declaration]
expected: FAIL
[Nested rule that looks like an invalid custom property declaration]
expected: FAIL

View file

@ -47,7 +47,7 @@
assert_equals(rules[0].selectorText, 'div');
let div = rules[0];
let x = div.style.getPropertyValue('--x');
assert_equals(x, 'hover { }\n .b { }');
assert_equals(x.trim(), 'hover { }\n .b { }');
let childRules = div.cssRules;
assert_equals(childRules.length, 1);
assert_equals(childRules[0].selectorText, '& .a');

View file

@ -1 +1 @@
{"files":{"Cargo.toml":"b3a51cde73d95cac878371677b7e3a847b8726b49ab61204682c691dc1b1b81c","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","lib.rs":"10e68d5a92a053ff498cb1caa8290e508f691e32b73222a5a4737ee9a4097ce2"},"package":null}
{"files":{"Cargo.toml":"d4a43ad31d5048cf19ee80ec38de90fa98b9b9902b97d61e4edc940246806295","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","lib.rs":"10e68d5a92a053ff498cb1caa8290e508f691e32b73222a5a4737ee9a4097ce2"},"package":"13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331"}

View file

@ -23,8 +23,8 @@ repository = "https://github.com/servo/rust-cssparser"
path = "lib.rs"
proc-macro = true
[dependencies]
quote = "1"
[dependencies.quote]
version = "1"
[dependencies.syn]
version = "2"

View file

@ -1 +1 @@
{"files":{".github/workflows/main.yml":"9fb6be1c14d9107ac4613e660d111d469722839ddf8a59e781c54a3607676e9e","Cargo.toml":"2c12f0dd7e94af4ca4ae29a741d2de2447c705f83fec0ab601b3548d2b7c64f4","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"53a6805edd80f642473514cb93f1f4197e17a911d66a2dfcefc3dc5e82bac206","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"eedf03d8ba8ca54a744617fdd945c80cbae73f99b6dff06f43a39764a93a3ac5","src/cow_rc_str.rs":"4d172d3633ef55af815784fbaee03cbcf85796a380765a0af09bbb6ca5b6fbab","src/from_bytes.rs":"b1cf15c4e975523fef46b575598737a39f3c63e5ce0b2bfd6ec627c69c6ea54a","src/lib.rs":"13be989c091fb59ecab3e855e76e7c3468f465f63e7391303fa51f251441916a","src/macros.rs":"c6e06fd014ee8c6212c72928e8b474fb1cd13a0b604055e9943ed05179a0e63b","src/nth.rs":"2fc26915f0a36cb22ac45dd9a7ecbdc64c327b2ec135370258ec3db9f9985460","src/parser.rs":"51d86df7f788da4ee6bdef8e92474bf118ac26f8954f82a14d11f1f578b6998e","src/rules_and_declarations.rs":"180c797c75a1f7298c4e47dc819cd5f8c8d911d20492eac88f10d910fd5258d4","src/serializer.rs":"b3d59a3b72a67f7bcd0f949497445d756f584661424682d03a3a1030ed4862b1","src/size_of_tests.rs":"da0cbcaa304f7800e9122e2bce0a11d42a70b9012e646a723cb23ee74a6b858c","src/tests.rs":"aa67c41be76b2a944d4d6dd162c3e8a77be1f877e94ac62e8f065adb5407a669","src/tokenizer.rs":"1f690582d4cdba930a379e5808d54f4085e3c6b60345e55c1141df7e263c722a","src/unicode_range.rs":"20d96f06fbb73921e308cc340c9fe065e27f19843005689fb259007a6a372bcc"},"package":null}
{"files":{"Cargo.toml":"f93c7e90c8e06349e2c3faee56f48c9121ab0a1571db502143c8c50df75c98a4","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"95e81e8f22062ba196eb8229a749327c063620ccf31ce1dd01b7ea0529840280","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"5edc02f840f6837580e800c860e91c8ea28c77f0dd157bffdf648827c476d01c","src/cow_rc_str.rs":"9bb6f4ca76ec51bcf85ec3ff23b80c76a0113df0856a60f34fbcd43e869a19ad","src/from_bytes.rs":"b1cf15c4e975523fef46b575598737a39f3c63e5ce0b2bfd6ec627c69c6ea54a","src/lib.rs":"13be989c091fb59ecab3e855e76e7c3468f465f63e7391303fa51f251441916a","src/macros.rs":"64ad9e506e5cea52767a5177779ac4a1cbdac1b2188abaa1291e9feb8f1653bf","src/nth.rs":"972cc94275126e747c95621e8c5f56ce5d869924e60bb0dc990f4c98f3d74890","src/parser.rs":"beb4327ada3ae9f0f6cef29a88ef6d210b9942dc459369f7ffc4529a5f413f47","src/rules_and_declarations.rs":"4b16d61e017de50c81ac3aa7ff78eeb186af1e233bbd1e93b31c2c3aff944ddc","src/serializer.rs":"807ae7f49abd6a0a83172321ec95624de2266f6caa687b014c58e9f9660b629a","src/size_of_tests.rs":"da0cbcaa304f7800e9122e2bce0a11d42a70b9012e646a723cb23ee74a6b858c","src/tests.rs":"00c370284ff862faec40e580507593dad51bff608360f8562634fb6948eee2f0","src/tokenizer.rs":"99977cf09f2e8d1b45fe98a4db2eda89defd64cb99c948885c0cec2122951b41","src/unicode_range.rs":"db0217629bf70dafef5cc93a9615d54dd0f2a5bfd19d31e1d06bf4c7b006dd1e"},"package":"b7c66d1cd8ed61bf80b38432613a7a2f09401ab8d0501110655f8b341484a3e3"}

View file

@ -1,78 +0,0 @@
name: CI
on:
push:
branches: [master]
pull_request:
workflow_dispatch:
merge_group:
types: [checks_requested]
jobs:
linux-ci:
name: Linux
runs-on: ubuntu-latest
strategy:
matrix:
toolchain:
- nightly
- beta
- stable
- 1.63.0
features:
-
- --features dummy_match_byte
include:
- toolchain: nightly
features: --features bench
- toolchain: nightly
features: --features bench,dummy_match_byte
steps:
- uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.toolchain }}
override: true
components: ${{ matrix.toolchain == 'nightly' && 'miri,rust-src' || '' }}
- name: Cargo build
run: cargo build ${{ matrix.features }}
- name: Cargo doc
run: cargo doc ${{ matrix.features }}
- name: Cargo test
run: cargo test ${{ matrix.features }}
- name: macros build
run: cargo build
working-directory: macros
- name: Color build
run: cargo build
working-directory: color
- name: Color test
run: cargo test
working-directory: color
- name: Cargo miri test
if: "matrix.toolchain == 'nightly'"
run: cargo miri test --features skip_long_tests ${{ matrix.features }}
build_result:
name: Result
runs-on: ubuntu-latest
needs:
- "linux-ci"
steps:
- name: Mark the job as successful
run: exit 0
if: success()
- name: Mark the job as unsuccessful
run: exit 1
if: "!success()"

View file

@ -13,7 +13,7 @@
edition = "2018"
rust-version = "1.63"
name = "cssparser"
version = "0.33.0"
version = "0.34.0"
authors = ["Simon Sapin <simon.sapin@exyr.org>"]
exclude = [
"src/css-parsing-tests/**",
@ -30,36 +30,41 @@ keywords = [
license = "MPL-2.0"
repository = "https://github.com/servo/rust-cssparser"
[dependencies]
dtoa-short = "0.3"
itoa = "1.0"
smallvec = "1.0"
[profile.profiling]
debug = 2
inherits = "release"
[dependencies.cssparser-macros]
version = "0.6.1"
path = "./macros"
[dependencies.dtoa-short]
version = "0.3"
[dependencies.itoa]
version = "1.0"
[dependencies.phf]
version = ">=0.8,<=0.11"
version = "0.11.2"
features = ["macros"]
[dependencies.serde]
version = "1.0"
features = ["derive"]
optional = true
[dev-dependencies]
difference = "2.0"
encoding_rs = "0.8"
serde_json = "1.0"
[dependencies.smallvec]
version = "1.0"
[dev-dependencies.difference]
version = "2.0"
[dev-dependencies.encoding_rs]
version = "0.8"
[dev-dependencies.serde_json]
version = "1.0.25"
[features]
bench = []
dummy_match_byte = []
skip_long_tests = []
[workspace]
members = [
".",
"./macros",
"./color",
]

View file

@ -3,7 +3,7 @@ rust-cssparser
[![Build Status](https://github.com/servo/rust-cssparser/actions/workflows/main.yml/badge.svg)](https://github.com/servo/rust-cssparser/actions)
[Documentation](https://docs.rs/cssparser/)
[Documentation](https://docs.rs/cssparser)
Rust implementation of
[CSS Syntax Module Level 3](https://drafts.csswg.org/css-syntax/)
@ -53,5 +53,5 @@ Parsing CSS involves a series of steps:
It does however provide some helper functions to parse [CSS colors](src/color.rs)
and [An+B](src/nth.rs) (the argument to `:nth-child()` and related selectors.
See [Servos `style` crate](https://github.com/servo/servo/tree/master/components/style)
See [Servos `style` crate](https://github.com/servo/stylo/tree/main/style)
for an example of a parser based on rust-cssparser.

View file

@ -14,9 +14,8 @@
/// The opaque alpha value of 1.0.
pub const OPAQUE: f32 = 1.0;
use crate::ToCss;
use crate::{BasicParseError, Parser, ToCss, Token};
use std::fmt;
use std::str::FromStr;
/// Clamp a 0..1 number to a 0..255 range to u8.
///
@ -76,7 +75,9 @@ pub fn serialize_color_alpha(
/// A Predefined color space specified in:
/// <https://drafts.csswg.org/css-color-4/#predefined>
#[derive(Clone, Copy, PartialEq, Debug)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(tag = "type"))]
pub enum PredefinedColorSpace {
/// <https://drafts.csswg.org/css-color-4/#predefined-sRGB>
Srgb,
@ -97,36 +98,21 @@ pub enum PredefinedColorSpace {
}
impl PredefinedColorSpace {
/// Returns the string value of the predefined color space.
pub fn as_str(&self) -> &str {
match self {
PredefinedColorSpace::Srgb => "srgb",
PredefinedColorSpace::SrgbLinear => "srgb-linear",
PredefinedColorSpace::DisplayP3 => "display-p3",
PredefinedColorSpace::A98Rgb => "a98-rgb",
PredefinedColorSpace::ProphotoRgb => "prophoto-rgb",
PredefinedColorSpace::Rec2020 => "rec2020",
PredefinedColorSpace::XyzD50 => "xyz-d50",
PredefinedColorSpace::XyzD65 => "xyz-d65",
}
}
}
/// Parse a PredefinedColorSpace from the given input.
pub fn parse<'i>(input: &mut Parser<'i, '_>) -> Result<Self, BasicParseError<'i>> {
let location = input.current_source_location();
impl FromStr for PredefinedColorSpace {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match_ignore_ascii_case! { s,
"srgb" => PredefinedColorSpace::Srgb,
"srgb-linear" => PredefinedColorSpace::SrgbLinear,
"display-p3" => PredefinedColorSpace::DisplayP3,
"a98-rgb" => PredefinedColorSpace::A98Rgb,
"prophoto-rgb" => PredefinedColorSpace::ProphotoRgb,
"rec2020" => PredefinedColorSpace::Rec2020,
"xyz-d50" => PredefinedColorSpace::XyzD50,
"xyz" | "xyz-d65" => PredefinedColorSpace::XyzD65,
_ => return Err(()),
let ident = input.expect_ident()?;
Ok(match_ignore_ascii_case! { ident,
"srgb" => Self::Srgb,
"srgb-linear" => Self::SrgbLinear,
"display-p3" => Self::DisplayP3,
"a98-rgb" => Self::A98Rgb,
"prophoto-rgb" => Self::ProphotoRgb,
"rec2020" => Self::Rec2020,
"xyz-d50" => Self::XyzD50,
"xyz" | "xyz-d65" => Self::XyzD65,
_ => return Err(location.new_basic_unexpected_token_error(Token::Ident(ident.clone()))),
})
}
}
@ -136,11 +122,21 @@ impl ToCss for PredefinedColorSpace {
where
W: fmt::Write,
{
dest.write_str(self.as_str())
dest.write_str(match self {
Self::Srgb => "srgb",
Self::SrgbLinear => "srgb-linear",
Self::DisplayP3 => "display-p3",
Self::A98Rgb => "a98-rgb",
Self::ProphotoRgb => "prophoto-rgb",
Self::Rec2020 => "rec2020",
Self::XyzD50 => "xyz-d50",
Self::XyzD65 => "xyz-d65",
})
}
}
/// Parse a color hash, without the leading '#' character.
#[allow(clippy::result_unit_err)]
#[inline]
pub fn parse_hash_color(value: &[u8]) -> Result<(u8, u8, u8, f32), ()> {
Ok(match value.len() {
@ -328,6 +324,7 @@ ascii_case_insensitive_phf_map! {
/// Returns the named color with the given name.
/// <https://drafts.csswg.org/css-color-4/#typedef-named-color>
#[allow(clippy::result_unit_err)]
#[inline]
pub fn parse_named_color(ident: &str) -> Result<(u8, u8, u8), ()> {
named_colors::get(ident).copied().ok_or(())

View file

@ -4,7 +4,7 @@
use std::borrow::{Borrow, Cow};
use std::rc::Rc;
use std::{cmp, fmt, hash, marker, mem, ops, slice, str, ptr};
use std::{cmp, fmt, hash, marker, mem, ops, ptr, slice, str};
/// A string that is either shared (heap-allocated and reference-counted) or borrowed.
///
@ -23,9 +23,9 @@ pub struct CowRcStr<'a> {
phantom: marker::PhantomData<Result<&'a str, Rc<String>>>,
}
fn _static_assert_same_size<'a>() {
fn _static_assert_same_size() {
// "Instantiate" the generic function without calling it.
let _ = mem::transmute::<CowRcStr<'a>, Option<CowRcStr<'a>>>;
let _ = mem::transmute::<CowRcStr<'_>, Option<CowRcStr<'_>>>;
}
impl<'a> From<Cow<'a, str>> for CowRcStr<'a> {

View file

@ -182,7 +182,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
let input_bytes =
unsafe { &*(input.as_bytes() as *const [u8] as *const [MaybeUninit<u8>]) };
buffer.copy_from_slice(&*input_bytes);
buffer.copy_from_slice(input_bytes);
// Same as above re layout, plus these bytes have been initialized:
let buffer = unsafe { &mut *(buffer as *mut [MaybeUninit<u8>] as *mut [u8]) };
@ -195,7 +195,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
}
Some(
match input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) {
match input.bytes().position(|byte| byte.is_ascii_uppercase()) {
Some(first_uppercase) => make_ascii_lowercase(buffer, input, first_uppercase),
// common case: input is already lower-case
None => input,

View file

@ -7,8 +7,8 @@ use super::{BasicParseError, Parser, ParserInput, Token};
/// Parse the *An+B* notation, as found in the `:nth-child()` selector.
/// The input is typically the arguments of a function,
/// in which case the caller needs to check if the arguments parser is exhausted.
/// Return `Ok((A, B))`, or `Err(())` for a syntax error.
pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> {
/// Return `Ok((A, B))`, or an `Err(..)` for a syntax error.
pub fn parse_nth<'i>(input: &mut Parser<'i, '_>) -> Result<(i32, i32), BasicParseError<'i>> {
match *input.next()? {
Token::Number {
int_value: Some(b), ..
@ -22,7 +22,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
unit,
"n" => Ok(parse_b(input, a)?),
"n-" => Ok(parse_signless_b(input, a, -1)?),
_ => match parse_n_dash_digits(&*unit) {
_ => match parse_n_dash_digits(unit) {
Ok(b) => Ok((a, b)),
Err(()) => {
let unit = unit.clone();
@ -40,8 +40,8 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
"n-" => Ok(parse_signless_b(input, 1, -1)?),
"-n-" => Ok(parse_signless_b(input, -1, -1)?),
_ => {
let (slice, a) = if value.starts_with("-") {
(&value[1..], -1)
let (slice, a) = if let Some(stripped) = value.strip_prefix('-') {
(stripped, -1)
} else {
(&**value, 1)
};
@ -81,7 +81,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
}
}
fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
fn parse_b<'i>(input: &mut Parser<'i, '_>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
let start = input.state();
match input.next() {
Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1),
@ -98,8 +98,8 @@ fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), Bas
}
}
fn parse_signless_b<'i, 't>(
input: &mut Parser<'i, 't>,
fn parse_signless_b<'i>(
input: &mut Parser<'i, '_>,
a: i32,
b_sign: i32,
) -> Result<(i32, i32), BasicParseError<'i>> {
@ -118,7 +118,7 @@ fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
let bytes = string.as_bytes();
if bytes.len() >= 3
&& bytes[..2].eq_ignore_ascii_case(b"n-")
&& bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9'))
&& bytes[2..].iter().all(|&c| c.is_ascii_digit())
{
Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
} else {

View file

@ -53,7 +53,7 @@ impl ParserState {
///
/// Would need to scan the whole {} block to find a semicolon, only for parsing getting restarted
/// as a qualified rule later.
#[derive(Clone, Copy, Debug, PartialEq)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseUntilErrorBehavior {
/// Consume until we see the relevant delimiter or the end of the stream.
Consume,
@ -116,18 +116,30 @@ impl<'i, T> From<BasicParseError<'i>> for ParseError<'i, T> {
impl SourceLocation {
/// Create a new BasicParseError at this location for an unexpected token
#[inline]
pub fn new_basic_unexpected_token_error<'i>(self, token: Token<'i>) -> BasicParseError<'i> {
pub fn new_basic_unexpected_token_error(self, token: Token<'_>) -> BasicParseError<'_> {
self.new_basic_error(BasicParseErrorKind::UnexpectedToken(token))
}
/// Create a new BasicParseError at this location
#[inline]
pub fn new_basic_error(self, kind: BasicParseErrorKind<'_>) -> BasicParseError<'_> {
BasicParseError {
kind: BasicParseErrorKind::UnexpectedToken(token),
kind,
location: self,
}
}
/// Create a new ParseError at this location for an unexpected token
#[inline]
pub fn new_unexpected_token_error<'i, E>(self, token: Token<'i>) -> ParseError<'i, E> {
pub fn new_unexpected_token_error<E>(self, token: Token<'_>) -> ParseError<'_, E> {
self.new_error(BasicParseErrorKind::UnexpectedToken(token))
}
/// Create a new basic ParseError at the current location
#[inline]
pub fn new_error<E>(self, kind: BasicParseErrorKind<'_>) -> ParseError<'_, E> {
ParseError {
kind: ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(token)),
kind: ParseErrorKind::Basic(kind),
location: self,
}
}
@ -450,19 +462,13 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// Create a new BasicParseError at the current location
#[inline]
pub fn new_basic_error(&self, kind: BasicParseErrorKind<'i>) -> BasicParseError<'i> {
BasicParseError {
kind,
location: self.current_source_location(),
}
self.current_source_location().new_basic_error(kind)
}
/// Create a new basic ParseError at the current location
#[inline]
pub fn new_error<E>(&self, kind: BasicParseErrorKind<'i>) -> ParseError<'i, E> {
ParseError {
kind: ParseErrorKind::Basic(kind),
location: self.current_source_location(),
}
self.current_source_location().new_error(kind)
}
/// Create a new custom BasicParseError at the current location
@ -606,6 +612,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// See the `Parser::parse_nested_block` method to parse the content of functions or blocks.
///
/// This only returns a closing token when it is unmatched (and therefore an error).
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
self.skip_whitespace();
self.next_including_whitespace_and_comments()
@ -652,9 +659,8 @@ impl<'i: 't, 't> Parser<'i, 't> {
let token = if using_cached_token {
let cached_token = self.input.cached_token.as_ref().unwrap();
self.input.tokenizer.reset(&cached_token.end_state);
match cached_token.token {
Token::Function(ref name) => self.input.tokenizer.see_function(name),
_ => {}
if let Token::Function(ref name) = cached_token.token {
self.input.tokenizer.see_function(name)
}
&cached_token.token
} else {
@ -678,7 +684,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
}
/// Have the given closure parse something, then check the the input is exhausted.
/// The result is overridden to `Err(())` if some input remains.
/// The result is overridden to an `Err(..)` if some input remains.
///
/// This can help tell e.g. `color: green;` from `color: green 4px;`
#[inline]
@ -699,7 +705,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
///
/// Successful results are accumulated in a vector.
///
/// This method returns `Err(())` the first time that a closure call does,
/// This method returns an`Err(..)` the first time that a closure call does,
/// or if a closure call leaves some input before the next comma or the end
/// of the input.
#[inline]
@ -748,7 +754,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
match self.parse_until_before(Delimiter::Comma, &mut parse_one) {
Ok(v) => values.push(v),
Err(e) if !ignore_errors => return Err(e),
Err(_) => {},
Err(_) => {}
}
match self.next() {
Err(_) => return Ok(values),
@ -768,7 +774,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// The given closure is called with a "delimited" parser
/// that stops at the end of the block or function (at the matching closing token).
///
/// The result is overridden to `Err(())` if the closure leaves some input before that point.
/// The result is overridden to an `Err(..)` if the closure leaves some input before that point.
#[inline]
pub fn parse_nested_block<F, T, E>(&mut self, parse: F) -> Result<T, ParseError<'i, E>>
where
@ -784,7 +790,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// that stops before the first character at this block/function nesting level
/// that matches the given set of delimiters, or at the end of the input.
///
/// The result is overridden to `Err(())` if the closure leaves some input before that point.
/// The result is overridden to an `Err(..)` if the closure leaves some input before that point.
#[inline]
pub fn parse_until_before<F, T, E>(
&mut self,
@ -835,7 +841,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_ident, but clone the CowRcStr
#[inline]
pub fn expect_ident_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_ident().map(|s| s.clone())
self.expect_ident().cloned()
}
/// Parse a <ident-token> whose unescaped value is an ASCII-insensitive match for the given value.
@ -860,7 +866,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_string, but clone the CowRcStr
#[inline]
pub fn expect_string_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
self.expect_string().map(|s| s.clone())
self.expect_string().cloned()
}
/// Parse either a <ident-token> or a <string-token>, and return the unescaped value.
@ -879,7 +885,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::UnquotedUrl(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
input.expect_string().map_err(Into::into).map(|s| s.clone())
input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}
@ -894,7 +900,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::QuotedString(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
input.expect_string().map_err(Into::into).map(|s| s.clone())
input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}

View file

@ -4,9 +4,7 @@
// https://drafts.csswg.org/css-syntax/#parsing
use super::{
BasicParseError, BasicParseErrorKind, Delimiter, Delimiters, ParseError, Parser, Token,
};
use super::{BasicParseError, BasicParseErrorKind, Delimiter, ParseError, Parser, Token};
use crate::cow_rc_str::CowRcStr;
use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavior, ParserState};
@ -14,7 +12,7 @@ use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavi
///
/// Typical usage is `input.try_parse(parse_important).is_ok()`
/// at the end of a `DeclarationParser::parse_value` implementation.
pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
pub fn parse_important<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> {
input.expect_delim('!')?;
input.expect_ident_matching("important")
}
@ -34,7 +32,7 @@ pub trait DeclarationParser<'i> {
///
/// Return the finished representation for the declaration
/// as returned by `DeclarationListParser::next`,
/// or `Err(())` to ignore the entire declaration as invalid.
/// or an `Err(..)` to ignore the entire declaration as invalid.
///
/// Declaration name matching should be case-insensitive in the ASCII range.
/// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
@ -78,7 +76,7 @@ pub trait AtRuleParser<'i> {
/// Parse the prelude of an at-rule with the given `name`.
///
/// Return the representation of the prelude and the type of at-rule,
/// or `Err(())` to ignore the entire at-rule as invalid.
/// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// The prelude is the part after the at-keyword
/// and before the `;` semicolon or `{ /* ... */ }` block.
@ -106,6 +104,7 @@ pub trait AtRuleParser<'i> {
/// This is only called when `parse_prelude` returned `WithoutBlock`, and
/// either the `;` semicolon indeed follows the prelude, or parser is at
/// the end of the input.
#[allow(clippy::result_unit_err)]
fn rule_without_block(
&mut self,
prelude: Self::Prelude,
@ -122,7 +121,7 @@ pub trait AtRuleParser<'i> {
///
/// Return the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
/// or `Err(())` to ignore the entire at-rule as invalid.
/// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// This is only called when `parse_prelude` returned `WithBlock`, and a block
/// was indeed found following the prelude.
@ -161,7 +160,7 @@ pub trait QualifiedRuleParser<'i> {
/// Parse the prelude of a qualified rule. For style rules, this is as Selector list.
///
/// Return the representation of the prelude,
/// or `Err(())` to ignore the entire at-rule as invalid.
/// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// The prelude is the part before the `{ /* ... */ }` block.
///
@ -180,7 +179,7 @@ pub trait QualifiedRuleParser<'i> {
///
/// Return the finished representation of the qualified rule
/// as returned by `RuleListParser::next`,
/// or `Err(())` to ignore the entire at-rule as invalid.
/// or an `Err(..)` to ignore the entire at-rule as invalid.
fn parse_block<'t>(
&mut self,
prelude: Self::Prelude,
@ -253,10 +252,10 @@ where
self.input.skip_whitespace();
let start = self.input.state();
match self.input.next_including_whitespace_and_comments().ok()? {
Token::CloseCurlyBracket |
Token::WhiteSpace(..) |
Token::Semicolon |
Token::Comment(..) => continue,
Token::CloseCurlyBracket
| Token::WhiteSpace(..)
| Token::Semicolon
| Token::Comment(..) => continue,
Token::AtKeyword(ref name) => {
let name = name.clone();
return Some(parse_at_rule(&start, name, self.input, &mut *self.parser));
@ -292,9 +291,9 @@ where
&start,
self.input,
&mut *self.parser,
Delimiter::Semicolon | Delimiter::CurlyBracketBlock,
/* nested = */ true,
) {
return Some(Ok(qual))
return Some(Ok(qual));
}
}
@ -303,12 +302,8 @@ where
token => {
let result = if self.parser.parse_qualified() {
self.input.reset(&start);
let delimiters = if self.parser.parse_declarations() {
Delimiter::Semicolon | Delimiter::CurlyBracketBlock
} else {
Delimiter::CurlyBracketBlock
};
parse_qualified_rule(&start, self.input, &mut *self.parser, delimiters)
let nested = self.parser.parse_declarations();
parse_qualified_rule(&start, self.input, &mut *self.parser, nested)
} else {
let token = token.clone();
self.input.parse_until_after(Delimiter::Semicolon, |_| {
@ -353,7 +348,7 @@ where
}
}
/// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or `Err(())` for an invalid one.
/// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or an `Err(..)` for an invalid one.
impl<'i, 't, 'a, R, P, E: 'i> Iterator for StyleSheetParser<'i, 't, 'a, P>
where
P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E>
@ -367,7 +362,7 @@ where
let start = self.input.state();
let at_keyword = match self.input.next_byte()? {
b'@' => match self.input.next_including_whitespace_and_comments() {
Ok(&Token::AtKeyword(ref name)) => Some(name.clone()),
Ok(Token::AtKeyword(name)) => Some(name.clone()),
_ => {
self.input.reset(&start);
None
@ -397,7 +392,7 @@ where
&start,
self.input,
&mut *self.parser,
Delimiter::CurlyBracketBlock,
/* nested = */ false,
);
return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))));
}
@ -450,7 +445,7 @@ where
if let Some(name) = at_keyword {
parse_at_rule(&start, name, input, parser).map_err(|e| e.0)
} else {
parse_qualified_rule(&start, input, parser, Delimiter::CurlyBracketBlock)
parse_qualified_rule(&start, input, parser, /* nested = */ false)
}
})
}
@ -490,18 +485,54 @@ where
}
}
// If the first two non-<whitespace-token> values of rules prelude are an <ident-token> whose
// value starts with "--" followed by a <colon-token>, then...
fn looks_like_a_custom_property(input: &mut Parser) -> bool {
let ident = match input.expect_ident() {
Ok(i) => i,
Err(..) => return false,
};
ident.starts_with("--") && input.expect_colon().is_ok()
}
// https://drafts.csswg.org/css-syntax/#consume-a-qualified-rule
fn parse_qualified_rule<'i, 't, P, E>(
start: &ParserState,
input: &mut Parser<'i, 't>,
parser: &mut P,
delimiters: Delimiters,
nested: bool,
) -> Result<<P as QualifiedRuleParser<'i>>::QualifiedRule, ParseError<'i, E>>
where
P: QualifiedRuleParser<'i, Error = E>,
{
let prelude = input.parse_until_before(delimiters, |input| parser.parse_prelude(input));
input.skip_whitespace();
let prelude = {
let state = input.state();
if looks_like_a_custom_property(input) {
// If nested is true, consume the remnants of a bad declaration from input, with
// nested set to true, and return nothing.
// If nested is false, consume a block from input, and return nothing.
let delimiters = if nested {
Delimiter::Semicolon
} else {
Delimiter::CurlyBracketBlock
};
let _: Result<(), ParseError<()>> = input.parse_until_after(delimiters, |_| Ok(()));
return Err(state
.source_location()
.new_error(BasicParseErrorKind::QualifiedRuleInvalid));
}
let delimiters = if nested {
Delimiter::Semicolon | Delimiter::CurlyBracketBlock
} else {
Delimiter::CurlyBracketBlock
};
input.reset(&state);
input.parse_until_before(delimiters, |input| parser.parse_prelude(input))
};
input.expect_curly_bracket_block()?;
// Do this here so that we consume the `{` even if the prelude is `Err`.
let prelude = prelude?;
parse_nested_block(input, |input| parser.parse_block(prelude, &start, input))
parse_nested_block(input, |input| parser.parse_block(prelude, start, input))
}

View file

@ -3,8 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::match_byte;
use dtoa_short::{self, Notation};
use itoa;
use dtoa_short::Notation;
use std::fmt::{self, Write};
use std::str;
@ -49,10 +48,9 @@ where
dtoa_short::write(dest, value)?
};
if int_value.is_none() && value.fract() == 0. {
if !notation.decimal_point && !notation.scientific {
dest.write_str(".0")?;
}
if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific
{
dest.write_str(".0")?;
}
Ok(())
}
@ -63,10 +61,10 @@ impl<'a> ToCss for Token<'a> {
W: fmt::Write,
{
match *self {
Token::Ident(ref value) => serialize_identifier(&**value, dest)?,
Token::Ident(ref value) => serialize_identifier(value, dest)?,
Token::AtKeyword(ref value) => {
dest.write_str("@")?;
serialize_identifier(&**value, dest)?;
serialize_identifier(value, dest)?;
}
Token::Hash(ref value) => {
dest.write_str("#")?;
@ -74,12 +72,12 @@ impl<'a> ToCss for Token<'a> {
}
Token::IDHash(ref value) => {
dest.write_str("#")?;
serialize_identifier(&**value, dest)?;
serialize_identifier(value, dest)?;
}
Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
Token::QuotedString(ref value) => serialize_string(value, dest)?,
Token::UnquotedUrl(ref value) => {
dest.write_str("url(")?;
serialize_unquoted_url(&**value, dest)?;
serialize_unquoted_url(value, dest)?;
dest.write_str(")")?;
}
Token::Delim(value) => dest.write_char(value)?,
@ -134,7 +132,7 @@ impl<'a> ToCss for Token<'a> {
Token::CDC => dest.write_str("-->")?,
Token::Function(ref name) => {
serialize_identifier(&**name, dest)?;
serialize_identifier(name, dest)?;
dest.write_str("(")?;
}
Token::ParenthesisBlock => dest.write_str("(")?,
@ -167,7 +165,7 @@ fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
{
static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef";
static HEX_DIGITS: &[u8; 16] = b"0123456789abcdef";
let b3;
let b4;
let bytes = if ascii_byte > 0x0F {
@ -179,7 +177,7 @@ where
b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' '];
&b3[..]
};
dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
dest.write_str(unsafe { str::from_utf8_unchecked(bytes) })
}
fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
@ -199,9 +197,9 @@ where
return Ok(());
}
if value.starts_with("--") {
if let Some(value) = value.strip_prefix("--") {
dest.write_str("--")?;
serialize_name(&value[2..], dest)
serialize_name(value, dest)
} else if value == "-" {
dest.write_str("\\-")
} else {
@ -240,7 +238,7 @@ where
dest.write_str(&value[chunk_start..i])?;
if let Some(escaped) = escaped {
dest.write_str(escaped)?;
} else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
} else if (b'\x01'..=b'\x1F').contains(&b) || b == b'\x7F' {
hex_escape(b, dest)?;
} else {
char_escape(b, dest)?;
@ -340,7 +338,7 @@ where
macro_rules! impl_tocss_for_int {
($T: ty) => {
impl<'a> ToCss for $T {
impl ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
@ -363,7 +361,7 @@ impl_tocss_for_int!(u64);
macro_rules! impl_tocss_for_float {
($T: ty) => {
impl<'a> ToCss for $T {
impl ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where
W: fmt::Write,

View file

@ -5,8 +5,7 @@
#[cfg(feature = "bench")]
extern crate test;
use encoding_rs;
use serde_json::{self, json, Map, Value};
use serde_json::{json, Map, Value};
#[cfg(feature = "bench")]
use self::test::Bencher;
@ -25,25 +24,23 @@ macro_rules! JArray {
}
fn almost_equals(a: &Value, b: &Value) -> bool {
match (a, b) {
(&Value::Number(ref a), &Value::Number(ref b)) => {
let var_name = match (a, b) {
(Value::Number(a), Value::Number(b)) => {
let a = a.as_f64().unwrap();
let b = b.as_f64().unwrap();
(a - b).abs() <= a.abs() * 1e-6
}
(&Value::Bool(a), &Value::Bool(b)) => a == b,
(&Value::String(ref a), &Value::String(ref b)) => a == b,
(&Value::Array(ref a), &Value::Array(ref b)) => {
a.len() == b.len()
&& a.iter()
.zip(b.iter())
.all(|(ref a, ref b)| almost_equals(*a, *b))
(Value::String(a), Value::String(b)) => a == b,
(Value::Array(a), Value::Array(b)) => {
a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| almost_equals(a, b))
}
(&Value::Object(_), &Value::Object(_)) => panic!("Not implemented"),
(&Value::Null, &Value::Null) => true,
_ => false,
}
};
var_name
}
fn normalize(json: &mut Value) {
@ -77,7 +74,7 @@ fn assert_json_eq(results: Value, mut expected: Value, message: &str) {
}
}
fn run_raw_json_tests<F: Fn(Value, Value) -> ()>(json_data: &str, run: F) {
fn run_raw_json_tests<F: Fn(Value, Value)>(json_data: &str, run: F) {
let items = match serde_json::from_str(json_data) {
Ok(Value::Array(items)) => items,
other => panic!("Invalid JSON: {:?}", other),
@ -242,7 +239,7 @@ fn stylesheet_from_bytes() {
fn get_string<'a>(map: &'a Map<String, Value>, key: &str) -> Option<&'a str> {
match map.get(key) {
Some(&Value::String(ref s)) => Some(s),
Some(Value::String(s)) => Some(s),
Some(&Value::Null) => None,
None => None,
_ => panic!("Unexpected JSON"),
@ -393,7 +390,7 @@ fn unicode_range() {
if input.is_exhausted() {
Ok(result)
} else {
while let Ok(_) = input.next() {}
while input.next().is_ok() {}
Ok(None)
}
});
@ -433,11 +430,9 @@ fn serializer(preserve_comments: bool) {
preserve_comments: bool,
) {
while let Ok(token) = if preserve_comments {
input
.next_including_whitespace_and_comments()
.map(|t| t.clone())
input.next_including_whitespace_and_comments().cloned()
} else {
input.next_including_whitespace().map(|t| t.clone())
input.next_including_whitespace().cloned()
} {
let token_type = token.serialization_type();
if !preserve_comments && previous_token.needs_separator_when_before(token_type)
@ -593,8 +588,6 @@ fn line_numbers() {
#[test]
fn overflow() {
use std::iter::repeat;
let css = r"
2147483646
2147483647
@ -619,7 +612,7 @@ fn overflow() {
-3.402824e+38
"
.replace("{309 zeros}", &repeat('0').take(309).collect::<String>());
.replace("{309 zeros}", &"0".repeat(309));
let mut input = ParserInput::new(&css);
let mut input = Parser::new(&mut input);
@ -637,15 +630,13 @@ fn overflow() {
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_number(), Ok(3.30282347e+38));
assert_eq!(input.expect_number(), Ok(3.302_823_5e38));
assert_eq!(input.expect_number(), Ok(f32::MAX));
assert_eq!(input.expect_number(), Ok(f32::INFINITY));
assert!(f32::MAX != f32::INFINITY);
assert_eq!(input.expect_number(), Ok(-3.30282347e+38));
assert_eq!(input.expect_number(), Ok(-3.302_823_5e38));
assert_eq!(input.expect_number(), Ok(f32::MIN));
assert_eq!(input.expect_number(), Ok(f32::NEG_INFINITY));
assert!(f32::MIN != f32::NEG_INFINITY);
}
#[test]
@ -784,7 +775,7 @@ where
impl<'a> ToJson for CowRcStr<'a> {
fn to_json(&self) -> Value {
let s: &str = &*self;
let s: &str = self;
s.to_json()
}
}
@ -847,7 +838,7 @@ fn no_stack_overflow_multiple_nested_blocks() {
}
let mut input = ParserInput::new(&input);
let mut input = Parser::new(&mut input);
while let Ok(..) = input.next() {}
while input.next().is_ok() {}
}
impl<'i> DeclarationParser<'i> for JsonParser {
@ -863,18 +854,16 @@ impl<'i> DeclarationParser<'i> for JsonParser {
let mut important = false;
loop {
let start = input.state();
if let Ok(mut token) = input.next_including_whitespace().map(|t| t.clone()) {
if let Ok(mut token) = input.next_including_whitespace().cloned() {
// Hack to deal with css-parsing-tests assuming that
// `!important` in the middle of a declaration value is OK.
// This can never happen per spec
// (even CSS Variables forbid top-level `!`)
if token == Token::Delim('!') {
input.reset(&start);
if parse_important(input).is_ok() {
if input.is_exhausted() {
important = true;
break;
}
if parse_important(input).is_ok() && input.is_exhausted() {
important = true;
break;
}
input.reset(&start);
token = input.next_including_whitespace().unwrap().clone();
@ -905,7 +894,7 @@ impl<'i> AtRuleParser<'i> for JsonParser {
];
match_ignore_ascii_case! { &*name,
"charset" => {
Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone()).into()))
Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone())))
},
_ => Ok(prelude),
}
@ -968,7 +957,7 @@ impl<'i> RuleBodyItemParser<'i, Value, ()> for JsonParser {
fn component_values_to_json(input: &mut Parser) -> Vec<Value> {
let mut values = vec![];
while let Ok(token) = input.next_including_whitespace().map(|t| t.clone()) {
while let Ok(token) = input.next_including_whitespace().cloned() {
values.push(one_component_value_to_json(token, input));
}
values
@ -978,9 +967,9 @@ fn one_component_value_to_json(token: Token, input: &mut Parser) -> Value {
fn numeric(value: f32, int_value: Option<i32>, has_sign: bool) -> Vec<Value> {
vec![
Token::Number {
value: value,
int_value: int_value,
has_sign: has_sign,
value,
int_value,
has_sign,
}
.to_css_string()
.to_json(),
@ -1137,7 +1126,7 @@ fn parse_until_before_stops_at_delimiter_or_end_of_input() {
let ox = ix.next();
let oy = iy.next();
assert_eq!(ox, oy);
if let Err(_) = ox {
if ox.is_err() {
break;
}
}
@ -1223,7 +1212,7 @@ fn parse_sourcemapping_comments() {
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
while let Ok(_) = parser.next_including_whitespace() {}
while parser.next_including_whitespace().is_ok() {}
assert_eq!(parser.current_source_map_url(), test.1);
}
}
@ -1247,7 +1236,7 @@ fn parse_sourceurl_comments() {
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
while let Ok(_) = parser.next_including_whitespace() {}
while parser.next_including_whitespace().is_ok() {}
assert_eq!(parser.current_source_url(), test.1);
}
}
@ -1321,7 +1310,8 @@ fn utf16_columns() {
break;
}
Err(_) => {
assert!(false);
// should this be an explicit panic instead?
unreachable!();
}
Ok(_) => {}
};

View file

@ -255,10 +255,10 @@ impl<'a> Tokenizer<'a> {
#[inline]
pub fn see_function(&mut self, name: &str) {
if self.var_or_env_functions == SeenStatus::LookingForThem {
if name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env") {
self.var_or_env_functions = SeenStatus::SeenAtLeastOne;
}
if self.var_or_env_functions == SeenStatus::LookingForThem
&& (name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env"))
{
self.var_or_env_functions = SeenStatus::SeenAtLeastOne;
}
}
@ -322,10 +322,12 @@ impl<'a> Tokenizer<'a> {
pub fn current_source_line(&self) -> &'a str {
let current = self.position();
let start = self.slice(SourcePosition(0)..current)
let start = self
.slice(SourcePosition(0)..current)
.rfind(|c| matches!(c, '\r' | '\n' | '\x0C'))
.map_or(0, |start| start + 1);
let end = self.slice(current..SourcePosition(self.input.len()))
let end = self
.slice(current..SourcePosition(self.input.len()))
.find(|c| matches!(c, '\r' | '\n' | '\x0C'))
.map_or(self.input.len(), |end| current.0 + end);
self.slice(SourcePosition(start)..SourcePosition(end))
@ -424,7 +426,10 @@ impl<'a> Tokenizer<'a> {
#[inline]
fn next_char(&self) -> char {
unsafe { self.input.get_unchecked(self.position().0..) }.chars().next().unwrap()
unsafe { self.input.get_unchecked(self.position().0..) }
.chars()
.next()
.unwrap()
}
// Given that a newline has been seen, advance over the newline
@ -561,11 +566,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'#' => {
tokenizer.advance(1);
if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
else if !tokenizer.is_eof() &&
matches!(tokenizer.next_byte_unchecked(), b'0'..=b'9' | b'-') {
// Any other valid case here already resulted in IDHash.
b'0'..=b'9' | b'-' => true,
_ => false,
} { Hash(consume_name(tokenizer)) }
Hash(consume_name(tokenizer))
}
else { Delim('#') }
},
b'$' => {
@ -582,11 +587,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'+' => {
if (
tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
&& tokenizer.byte_at(1).is_ascii_digit()
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
&& tokenizer.byte_at(2).is_ascii_digit()
) {
consume_numeric(tokenizer)
} else {
@ -598,11 +603,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'-' => {
if (
tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
&& tokenizer.byte_at(1).is_ascii_digit()
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
&& matches!(tokenizer.byte_at(2), b'0'..=b'9')
&& tokenizer.byte_at(2).is_ascii_digit()
) {
consume_numeric(tokenizer)
} else if tokenizer.starts_with(b"-->") {
@ -617,8 +622,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
},
b'.' => {
if tokenizer.has_at_least(1)
&& matches!(tokenizer.byte_at(1), b'0'..=b'9'
) {
&& tokenizer.byte_at(1).is_ascii_digit() {
consume_numeric(tokenizer)
} else {
tokenizer.advance(1);
@ -1001,7 +1005,7 @@ fn byte_to_hex_digit(b: u8) -> Option<u32> {
}
fn byte_to_decimal_digit(b: u8) -> Option<u32> {
if b >= b'0' && b <= b'9' {
if b.is_ascii_digit() {
Some((b - b'0') as u32)
} else {
None
@ -1038,7 +1042,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut fractional_part: f64 = 0.;
if tokenizer.has_at_least(1)
&& tokenizer.next_byte_unchecked() == b'.'
&& matches!(tokenizer.byte_at(1), b'0'..=b'9')
&& tokenizer.byte_at(1).is_ascii_digit()
{
is_integer = false;
tokenizer.advance(1); // Consume '.'
@ -1055,32 +1059,32 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut value = sign * (integral_part + fractional_part);
if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
if matches!(tokenizer.byte_at(1), b'0'..=b'9')
if tokenizer.has_at_least(1)
&& matches!(tokenizer.next_byte_unchecked(), b'e' | b'E')
&& (tokenizer.byte_at(1).is_ascii_digit()
|| (tokenizer.has_at_least(2)
&& matches!(tokenizer.byte_at(1), b'+' | b'-')
&& matches!(tokenizer.byte_at(2), b'0'..=b'9'))
{
is_integer = false;
&& tokenizer.byte_at(2).is_ascii_digit()))
{
is_integer = false;
tokenizer.advance(1);
let (has_sign, sign) = match tokenizer.next_byte_unchecked() {
b'-' => (true, -1.),
b'+' => (true, 1.),
_ => (false, 1.),
};
if has_sign {
tokenizer.advance(1);
let (has_sign, sign) = match tokenizer.next_byte_unchecked() {
b'-' => (true, -1.),
b'+' => (true, 1.),
_ => (false, 1.),
};
if has_sign {
tokenizer.advance(1);
}
let mut exponent: f64 = 0.;
while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
exponent = exponent * 10. + digit as f64;
tokenizer.advance(1);
if tokenizer.is_eof() {
break;
}
}
value *= f64::powf(10., sign * exponent);
}
let mut exponent: f64 = 0.;
while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
exponent = exponent * 10. + digit as f64;
tokenizer.advance(1);
if tokenizer.is_eof() {
break;
}
}
value *= f64::powf(10., sign * exponent);
}
let int_value = if is_integer {
@ -1339,7 +1343,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
}
// (value, number of digits up to 6)
fn consume_hex_digits<'a>(tokenizer: &mut Tokenizer<'a>) -> (u32, u32) {
fn consume_hex_digits(tokenizer: &mut Tokenizer<'_>) -> (u32, u32) {
let mut value = 0;
let mut digits = 0;
while digits < 6 && !tokenizer.is_eof() {

View file

@ -24,7 +24,7 @@ pub struct UnicodeRange {
impl UnicodeRange {
/// https://drafts.csswg.org/css-syntax/#urange-syntax
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, BasicParseError<'i>> {
pub fn parse<'i>(input: &mut Parser<'i, '_>) -> Result<Self, BasicParseError<'i>> {
// <urange> =
// u '+' <ident-token> '?'* |
// u <dimension-token> '?'* |
@ -57,7 +57,7 @@ impl UnicodeRange {
}
}
fn parse_tokens<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
fn parse_tokens<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> {
match input.next_including_whitespace()?.clone() {
Token::Delim('+') => {
match *input.next_including_whitespace()? {
@ -123,15 +123,13 @@ fn parse_concatenated(text: &[u8]) -> Result<UnicodeRange, ()> {
start: first_hex_value,
end: first_hex_value,
});
} else {
if let Some((&b'-', mut text)) = text.split_first() {
let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
return Ok(UnicodeRange {
start: first_hex_value,
end: second_hex_value,
});
}
} else if let Some((&b'-', mut text)) = text.split_first() {
let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
return Ok(UnicodeRange {
start: first_hex_value,
end: second_hex_value,
});
}
}
Err(())