forked from mirrors/gecko-dev
Bug 1887638 - [devtools] Expose CSS token value in InspectorCSSToken. r=emilio.
In some cases, it can be more useful to only get the token value than the whole token text (e.g. for 'Function`, where the value is the function name, while the text includes the opening parenthesis) Refactor `test_lexer` to better test the tokens we get, including their value property. Differential Revision: https://phabricator.services.mozilla.com/D207400
This commit is contained in:
parent
f6bb5f8a6c
commit
e6a3d588fb
4 changed files with 209 additions and 52 deletions
|
|
@ -9,7 +9,11 @@ const jsLexer = require("resource://devtools/shared/css/lexer.js");
|
|||
|
||||
add_task(function test_lexer() {
|
||||
const LEX_TESTS = [
|
||||
["simple", ["ident:simple"], ["Ident:simple"]],
|
||||
[
|
||||
"simple",
|
||||
["ident:simple"],
|
||||
[{ tokenType: "Ident", text: "simple", value: "simple" }],
|
||||
],
|
||||
[
|
||||
"simple: { hi; }",
|
||||
[
|
||||
|
|
@ -24,20 +28,32 @@ add_task(function test_lexer() {
|
|||
"symbol:}",
|
||||
],
|
||||
[
|
||||
"Ident:simple",
|
||||
"Colon::",
|
||||
"WhiteSpace: ",
|
||||
"CurlyBracketBlock:{",
|
||||
"WhiteSpace: ",
|
||||
"Ident:hi",
|
||||
"Semicolon:;",
|
||||
"WhiteSpace: ",
|
||||
"CloseCurlyBracket:}",
|
||||
{ tokenType: "Ident", text: "simple", value: "simple" },
|
||||
{ tokenType: "Colon", text: ":" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "CurlyBracketBlock", text: "{" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "Ident", text: "hi", value: "hi" },
|
||||
{ tokenType: "Semicolon", text: ";" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "CloseCurlyBracket", text: "}" },
|
||||
],
|
||||
],
|
||||
["/* whatever */", ["comment"], ["Comment:/* whatever */"]],
|
||||
["'string'", ["string:string"], ["QuotedString:'string'"]],
|
||||
['"string"', ["string:string"], [`QuotedString:"string"`]],
|
||||
[
|
||||
"/* whatever */",
|
||||
["comment"],
|
||||
[{ tokenType: "Comment", text: "/* whatever */", value: " whatever " }],
|
||||
],
|
||||
[
|
||||
"'string'",
|
||||
["string:string"],
|
||||
[{ tokenType: "QuotedString", text: "'string'", value: "string" }],
|
||||
],
|
||||
[
|
||||
'"string"',
|
||||
["string:string"],
|
||||
[{ tokenType: "QuotedString", text: `"string"`, value: "string" }],
|
||||
],
|
||||
[
|
||||
"rgb(1,2,3)",
|
||||
[
|
||||
|
|
@ -50,67 +66,116 @@ add_task(function test_lexer() {
|
|||
"symbol:)",
|
||||
],
|
||||
[
|
||||
"Function:rgb(",
|
||||
"Number:1",
|
||||
"Comma:,",
|
||||
"Number:2",
|
||||
"Comma:,",
|
||||
"Number:3",
|
||||
"CloseParenthesis:)",
|
||||
{ tokenType: "Function", text: "rgb(", value: "rgb" },
|
||||
{ tokenType: "Number", text: "1", number: 1 },
|
||||
{ tokenType: "Comma", text: "," },
|
||||
{ tokenType: "Number", text: "2", number: 2 },
|
||||
{ tokenType: "Comma", text: "," },
|
||||
{ tokenType: "Number", text: "3", number: 3 },
|
||||
{ tokenType: "CloseParenthesis", text: ")" },
|
||||
],
|
||||
],
|
||||
["@media", ["at:media"], ["AtKeyword:@media"]],
|
||||
["#hibob", ["id:hibob"], ["IDHash:#hibob"]],
|
||||
["#123", ["hash:123"], ["Hash:#123"]],
|
||||
["23px", ["dimension:px"], ["Dimension:23px"]],
|
||||
["23%", ["percentage"], ["Percentage:23%"]],
|
||||
[
|
||||
"@media",
|
||||
["at:media"],
|
||||
[{ tokenType: "AtKeyword", text: "@media", value: "media" }],
|
||||
],
|
||||
[
|
||||
"#hibob",
|
||||
["id:hibob"],
|
||||
[{ tokenType: "IDHash", text: "#hibob", value: "hibob" }],
|
||||
],
|
||||
["#123", ["hash:123"], [{ tokenType: "Hash", text: "#123", value: "123" }]],
|
||||
[
|
||||
"23px",
|
||||
["dimension:px"],
|
||||
[{ tokenType: "Dimension", text: "23px", number: 23, unit: "px" }],
|
||||
],
|
||||
[
|
||||
"23%",
|
||||
["percentage"],
|
||||
[{ tokenType: "Percentage", text: "23%", number: 0.23 }],
|
||||
],
|
||||
[
|
||||
"url(http://example.com)",
|
||||
["url:http://example.com"],
|
||||
["UnquotedUrl:url(http://example.com)"],
|
||||
[
|
||||
{
|
||||
tokenType: "UnquotedUrl",
|
||||
text: "url(http://example.com)",
|
||||
value: "http://example.com",
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
"url('http://example.com')",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
"Function:url(",
|
||||
"QuotedString:'http://example.com'",
|
||||
"CloseParenthesis:)",
|
||||
{ tokenType: "Function", text: "url(", value: "url" },
|
||||
{
|
||||
tokenType: "QuotedString",
|
||||
text: "'http://example.com'",
|
||||
value: "http://example.com",
|
||||
},
|
||||
{ tokenType: "CloseParenthesis", text: ")" },
|
||||
],
|
||||
],
|
||||
[
|
||||
"url( 'http://example.com' )",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
"Function:url(",
|
||||
"WhiteSpace: ",
|
||||
"QuotedString:'http://example.com'",
|
||||
"WhiteSpace: ",
|
||||
"CloseParenthesis:)",
|
||||
{ tokenType: "Function", text: "url(", value: "url" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{
|
||||
tokenType: "QuotedString",
|
||||
text: "'http://example.com'",
|
||||
value: "http://example.com",
|
||||
},
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "CloseParenthesis", text: ")" },
|
||||
],
|
||||
],
|
||||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||
[
|
||||
"url(http://example.com",
|
||||
["url:http://example.com"],
|
||||
["UnquotedUrl:url(http://example.com"],
|
||||
[
|
||||
{
|
||||
tokenType: "UnquotedUrl",
|
||||
text: "url(http://example.com",
|
||||
value: "http://example.com",
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
"url(http://example.com @",
|
||||
["bad_url:http://example.com"],
|
||||
["BadUrl:url(http://example.com @"],
|
||||
[
|
||||
{
|
||||
tokenType: "BadUrl",
|
||||
text: "url(http://example.com @",
|
||||
value: "http://example.com @",
|
||||
},
|
||||
],
|
||||
],
|
||||
[
|
||||
"quo\\ting",
|
||||
["ident:quoting"],
|
||||
[{ tokenType: "Ident", text: "quo\\ting", value: "quoting" }],
|
||||
],
|
||||
["quo\\ting", ["ident:quoting"], ["Ident:quo\\ting"]],
|
||||
[
|
||||
"'bad string\n",
|
||||
["bad_string:bad string", "whitespace"],
|
||||
["BadString:'bad string", "WhiteSpace:\n"],
|
||||
[
|
||||
{ tokenType: "BadString", text: "'bad string", value: "bad string" },
|
||||
{ tokenType: "WhiteSpace", text: "\n" },
|
||||
],
|
||||
],
|
||||
["~=", ["includes"], ["IncludeMatch:~="]],
|
||||
["|=", ["dashmatch"], ["DashMatch:|="]],
|
||||
["^=", ["beginsmatch"], ["PrefixMatch:^="]],
|
||||
["$=", ["endsmatch"], ["SuffixMatch:$="]],
|
||||
["*=", ["containsmatch"], ["SubstringMatch:*="]],
|
||||
["~=", ["includes"], [{ tokenType: "IncludeMatch", text: "~=" }]],
|
||||
["|=", ["dashmatch"], [{ tokenType: "DashMatch", text: "|=" }]],
|
||||
["^=", ["beginsmatch"], [{ tokenType: "PrefixMatch", text: "^=" }]],
|
||||
["$=", ["endsmatch"], [{ tokenType: "SuffixMatch", text: "$=" }]],
|
||||
["*=", ["containsmatch"], [{ tokenType: "SubstringMatch", text: "*=" }]],
|
||||
|
||||
[
|
||||
"<!-- html comment -->",
|
||||
|
|
@ -124,19 +189,23 @@ add_task(function test_lexer() {
|
|||
"htmlcomment",
|
||||
],
|
||||
[
|
||||
"CDO:<!--",
|
||||
"WhiteSpace: ",
|
||||
"Ident:html",
|
||||
"WhiteSpace: ",
|
||||
"Ident:comment",
|
||||
"WhiteSpace: ",
|
||||
"CDC:-->",
|
||||
{ tokenType: "CDO", text: "<!--" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "Ident", text: "html", value: "html" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "Ident", text: "comment", value: "comment" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
{ tokenType: "CDC", text: "-->" },
|
||||
],
|
||||
],
|
||||
|
||||
// earlier versions of CSS had "bad comment" tokens, but in level 3,
|
||||
// unterminated comments are just comments.
|
||||
["/* bad comment", ["comment"], ["Comment:/* bad comment"]],
|
||||
[
|
||||
"/* bad comment",
|
||||
["comment"],
|
||||
[{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }],
|
||||
],
|
||||
];
|
||||
|
||||
const test = (cssText, useInspectorCSSParser, tokenTypes) => {
|
||||
|
|
@ -150,7 +219,28 @@ add_task(function test_lexer() {
|
|||
if (token.text) {
|
||||
combined += ":" + token.text;
|
||||
}
|
||||
equal(combined, tokenTypes[i]);
|
||||
if (useInspectorCSSParser) {
|
||||
const expectedToken = tokenTypes[i];
|
||||
Assert.deepEqual(
|
||||
{
|
||||
tokenType: token.tokenType,
|
||||
text: token.text,
|
||||
value: token.value,
|
||||
number: token.number,
|
||||
unit: token.unit,
|
||||
},
|
||||
{
|
||||
tokenType: expectedToken.tokenType,
|
||||
text: expectedToken.text,
|
||||
value: expectedToken.value ?? null,
|
||||
number: expectedToken.number ?? null,
|
||||
unit: expectedToken.unit ?? null,
|
||||
},
|
||||
`Got expected token #${i} for "${cssText}"`
|
||||
);
|
||||
} else {
|
||||
equal(combined, tokenTypes[i]);
|
||||
}
|
||||
Assert.greater(token.endOffset, token.startOffset);
|
||||
equal(token.startOffset, lastTokenEnd);
|
||||
lastTokenEnd = token.endOffset;
|
||||
|
|
|
|||
|
|
@ -235,6 +235,15 @@ dictionary InspectorCSSToken {
|
|||
// Text associated with the token.
|
||||
required UTF8String text;
|
||||
|
||||
// Value of the token. Might differ from `text`:
|
||||
// - for `Function` tokens, text contains the opening paren, `value` does not (e.g. `var(` vs `var`)
|
||||
// - for `AtKeyword` tokens, text contains the leading `@`, `value` does not (e.g. `@media` vs `media`)
|
||||
// - for `Hash` and `IDHash` tokens, text contains the leading `#`, `value` does not (e.g. `#myid` vs `myid`)
|
||||
// - for `UnquotedUrl` tokens, text contains the `url(` parts, `value` only holds the url (e.g. `url(test.jpg)` vs `test.jpg`)
|
||||
// - for `QuotedString` tokens, text contains the wrapping quotes, `value` does not (e.g. `"hello"` vs `hello`)
|
||||
// - for `Comment` tokens, text contains leading `/*` and trailing `*/`, `value` does not (e.g. `/* yo */` vs ` yo `)
|
||||
required UTF8String? value;
|
||||
|
||||
// Unit for Dimension tokens
|
||||
required UTF8String? unit;
|
||||
|
||||
|
|
|
|||
|
|
@ -48,6 +48,11 @@ void InspectorCSSParser::NextToken(Nullable<InspectorCSSToken>& aResult) {
|
|||
InspectorCSSToken& inspectorCssToken = aResult.SetValue();
|
||||
inspectorCssToken.mText.Append(cssToken.text);
|
||||
inspectorCssToken.mTokenType.Append(cssToken.token_type);
|
||||
if (cssToken.has_value) {
|
||||
inspectorCssToken.mValue.Append(cssToken.value);
|
||||
} else {
|
||||
inspectorCssToken.mValue.SetIsVoid(true);
|
||||
}
|
||||
if (cssToken.has_unit) {
|
||||
inspectorCssToken.mUnit.Append(cssToken.unit);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -9286,6 +9286,8 @@ pub struct CSSToken {
|
|||
pub unit: nsCString,
|
||||
pub has_number: bool,
|
||||
pub number: f32,
|
||||
pub has_value: bool,
|
||||
pub value: nsCString,
|
||||
// line and column at which the token starts
|
||||
pub line: u32,
|
||||
pub column: u32,
|
||||
|
|
@ -9373,6 +9375,53 @@ pub unsafe extern "C" fn Servo_CSSParser_NextToken(
|
|||
Token::CloseCurlyBracket => "CloseCurlyBracket",
|
||||
};
|
||||
|
||||
let token_value = match *token {
|
||||
Token::Ident(value) |
|
||||
Token::AtKeyword(value) |
|
||||
Token::Hash(value) |
|
||||
Token::IDHash(value) |
|
||||
Token::QuotedString(value) |
|
||||
Token::UnquotedUrl(value) |
|
||||
Token::Function(value) |
|
||||
Token::BadUrl(value) |
|
||||
Token::BadString(value) => {
|
||||
let mut text = nsCString::new();
|
||||
text.assign(value.as_bytes());
|
||||
Some(text)
|
||||
},
|
||||
// value is a str here, we need a different branch to handle it
|
||||
Token::Comment(value) => {
|
||||
let mut text = nsCString::new();
|
||||
text.assign(value.as_bytes());
|
||||
Some(text)
|
||||
},
|
||||
// Delim and WhiteSpace also have value, but they will be similar to text, so don't
|
||||
// include them
|
||||
Token::Delim(_) |
|
||||
Token::WhiteSpace(_) |
|
||||
// Number, Percentage and Dimension expose numeric values that will be exposed in `number`
|
||||
Token::Number{..} |
|
||||
Token::Percentage{..} |
|
||||
Token::Dimension{..} |
|
||||
// The rest of the tokens don't expose a string value
|
||||
Token::Colon |
|
||||
Token::Semicolon |
|
||||
Token::Comma |
|
||||
Token::IncludeMatch |
|
||||
Token::DashMatch |
|
||||
Token::PrefixMatch |
|
||||
Token::SuffixMatch |
|
||||
Token::SubstringMatch |
|
||||
Token::CDO |
|
||||
Token::CDC |
|
||||
Token::ParenthesisBlock |
|
||||
Token::SquareBracketBlock |
|
||||
Token::CurlyBracketBlock |
|
||||
Token::CloseParenthesis |
|
||||
Token::CloseSquareBracket |
|
||||
Token::CloseCurlyBracket => None
|
||||
};
|
||||
|
||||
let token_unit = match *token {
|
||||
Token::Dimension{
|
||||
ref unit, ..
|
||||
|
|
@ -9414,6 +9463,10 @@ pub unsafe extern "C" fn Servo_CSSParser_NextToken(
|
|||
|
||||
css_token.text = text;
|
||||
css_token.token_type = token_type.into();
|
||||
css_token.has_value = token_value.is_some();
|
||||
if css_token.has_value {
|
||||
css_token.value = token_value.unwrap();
|
||||
}
|
||||
css_token.has_unit = token_unit.is_some();
|
||||
if css_token.has_unit {
|
||||
css_token.unit = token_unit.unwrap();
|
||||
|
|
|
|||
Loading…
Reference in a new issue