forked from mirrors/gecko-dev
Bug 1892895 - [devtools] Remove JS CSS Lexer. r=devtools-reviewers,ochameau.
Differential Revision: https://phabricator.services.mozilla.com/D208318
This commit is contained in:
parent
e342530686
commit
99792f9e05
13 changed files with 103 additions and 1661 deletions
|
|
@ -12,7 +12,9 @@
|
|||
|
||||
"use strict";
|
||||
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
COMMENT_PARSING_HEURISTIC_BYPASS_CHAR,
|
||||
escapeCSSComment,
|
||||
|
|
@ -194,7 +196,7 @@ RuleRewriter.prototype = {
|
|||
// into "url(;)" by this code -- due to the way "url(...)" is
|
||||
// parsed as a single token.
|
||||
text = text.replace(/;$/, "");
|
||||
const lexer = getCSSLexer(text, true, true);
|
||||
const lexer = new InspectorCSSParserWrapper(text, { trackEOFChars: true });
|
||||
|
||||
let result = "";
|
||||
let previousOffset = 0;
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ loader.lazyRequireGetter(
|
|||
);
|
||||
loader.lazyRequireGetter(
|
||||
this,
|
||||
"getCSSLexer",
|
||||
"InspectorCSSParserWrapper",
|
||||
"resource://devtools/shared/css/lexer.js",
|
||||
true
|
||||
);
|
||||
|
|
@ -51,7 +51,7 @@ function advanceValidate(keyCode, value, insertionPoint) {
|
|||
// value. Otherwise it's been inserted in some spot where it has a
|
||||
// valid meaning, like a comment or string.
|
||||
value = value.slice(0, insertionPoint) + ";" + value.slice(insertionPoint);
|
||||
const lexer = getCSSLexer(value, true);
|
||||
const lexer = new InspectorCSSParserWrapper(value);
|
||||
while (true) {
|
||||
const token = lexer.nextToken();
|
||||
if (token.endOffset > insertionPoint) {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,9 @@
|
|||
|
||||
const SPECIALVALUES = new Set(["initial", "inherit", "unset"]);
|
||||
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
|
||||
loader.lazyRequireGetter(
|
||||
this,
|
||||
|
|
@ -68,7 +70,7 @@ CssAngle.prototype = {
|
|||
},
|
||||
|
||||
get valid() {
|
||||
const token = getCSSLexer(this.authored, true).nextToken();
|
||||
const token = new InspectorCSSParserWrapper(this.authored).nextToken();
|
||||
if (!token) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,9 @@ const {
|
|||
angleUtils,
|
||||
} = require("resource://devtools/client/shared/css-angle.js");
|
||||
const { colorUtils } = require("resource://devtools/shared/css/color.js");
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
appendText,
|
||||
} = require("resource://devtools/client/inspector/shared/utils.js");
|
||||
|
|
@ -733,7 +735,7 @@ class OutputParser {
|
|||
text = text.trim();
|
||||
this.#parsed.length = 0;
|
||||
|
||||
const tokenStream = getCSSLexer(text, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(text);
|
||||
return this.#doParse(text, options, tokenStream, false);
|
||||
}
|
||||
|
||||
|
|
@ -931,7 +933,7 @@ class OutputParser {
|
|||
*/
|
||||
// eslint-disable-next-line complexity
|
||||
#addPolygonPointNodes(coords, container) {
|
||||
const tokenStream = getCSSLexer(coords, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||
let token = tokenStream.nextToken();
|
||||
let coord = "";
|
||||
let i = 0;
|
||||
|
|
@ -1081,7 +1083,7 @@ class OutputParser {
|
|||
*/
|
||||
// eslint-disable-next-line complexity
|
||||
#addCirclePointNodes(coords, container) {
|
||||
const tokenStream = getCSSLexer(coords, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||
let token = tokenStream.nextToken();
|
||||
let depth = 0;
|
||||
let coord = "";
|
||||
|
|
@ -1242,7 +1244,7 @@ class OutputParser {
|
|||
*/
|
||||
// eslint-disable-next-line complexity
|
||||
#addEllipsePointNodes(coords, container) {
|
||||
const tokenStream = getCSSLexer(coords, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||
let token = tokenStream.nextToken();
|
||||
let depth = 0;
|
||||
let coord = "";
|
||||
|
|
@ -1413,7 +1415,7 @@ class OutputParser {
|
|||
// eslint-disable-next-line complexity
|
||||
#addInsetPointNodes(coords, container) {
|
||||
const insetPoints = ["top", "right", "bottom", "left"];
|
||||
const tokenStream = getCSSLexer(coords, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||
let token = tokenStream.nextToken();
|
||||
let depth = 0;
|
||||
let coord = "";
|
||||
|
|
@ -1777,7 +1779,9 @@ class OutputParser {
|
|||
*/
|
||||
#sanitizeURL(url) {
|
||||
// Re-lex the URL and add any needed termination characters.
|
||||
const urlTokenizer = getCSSLexer(url, true, true);
|
||||
const urlTokenizer = new InspectorCSSParserWrapper(url, {
|
||||
trackEOFChars: true,
|
||||
});
|
||||
// Just read until EOF; there will only be a single token.
|
||||
while (urlTokenizer.nextToken()) {
|
||||
// Nothing.
|
||||
|
|
|
|||
|
|
@ -1101,7 +1101,7 @@ CSSCompleter.prototype = {
|
|||
}
|
||||
|
||||
let prevToken = undefined;
|
||||
const tokensIterator = cssTokenizer(lineText, true);
|
||||
const tokensIterator = cssTokenizer(lineText);
|
||||
let found = false;
|
||||
const ech = line == caret.line ? caret.ch : 0;
|
||||
for (let token of tokensIterator) {
|
||||
|
|
@ -1165,7 +1165,7 @@ CSSCompleter.prototype = {
|
|||
lineText = lineText.substring(0, caret.ch);
|
||||
}
|
||||
|
||||
const tokens = Array.from(cssTokenizer(lineText, true));
|
||||
const tokens = Array.from(cssTokenizer(lineText));
|
||||
let found = false;
|
||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||
let token = tokens[i];
|
||||
|
|
@ -1246,7 +1246,7 @@ CSSCompleter.prototype = {
|
|||
};
|
||||
} else if (state == CSS_STATES.property) {
|
||||
// A property can only be a single word and thus very easy to calculate.
|
||||
const tokens = cssTokenizer(sourceArray[line], true);
|
||||
const tokens = cssTokenizer(sourceArray[line]);
|
||||
for (const token of tokens) {
|
||||
// Note that, because we're tokenizing a single line, the
|
||||
// token's offset is also the column number.
|
||||
|
|
|
|||
|
|
@ -31,7 +31,9 @@ const {
|
|||
PRESETS,
|
||||
DEFAULT_PRESET_CATEGORY,
|
||||
} = require("resource://devtools/client/shared/widgets/CubicBezierPresets.js");
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
||||
|
||||
/**
|
||||
|
|
@ -918,7 +920,7 @@ function parseTimingFunction(value) {
|
|||
return PREDEFINED[value];
|
||||
}
|
||||
|
||||
const tokenStream = getCSSLexer(value, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(value);
|
||||
const getNextToken = () => {
|
||||
while (true) {
|
||||
const token = tokenStream.nextToken();
|
||||
|
|
|
|||
|
|
@ -1030,7 +1030,7 @@ function tokenizeFilterValue(css) {
|
|||
let state = "initial";
|
||||
let name;
|
||||
let contents;
|
||||
for (const token of cssTokenizer(css, true)) {
|
||||
for (const token of cssTokenizer(css)) {
|
||||
switch (state) {
|
||||
case "initial":
|
||||
if (token.tokenType === "Function") {
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
*/
|
||||
|
||||
const EventEmitter = require("devtools/shared/event-emitter");
|
||||
const { getCSSLexer } = require("devtools/shared/css/lexer");
|
||||
const { InspectorCSSParserWrapper } = require("devtools/shared/css/lexer");
|
||||
const { throttle } = require("devtools/shared/throttle");
|
||||
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
||||
const SVG_NS = "http://www.w3.org/2000/svg";
|
||||
|
|
@ -578,7 +578,7 @@ class TimingFunctionPreviewWidget {
|
|||
*/
|
||||
function parseTimingFunction(value) {
|
||||
value = value.trim();
|
||||
const tokenStream = getCSSLexer(value, true);
|
||||
const tokenStream = new InspectorCSSParserWrapper(value);
|
||||
const getNextToken = () => {
|
||||
while (true) {
|
||||
const token = tokenStream.nextToken();
|
||||
|
|
|
|||
|
|
@ -9,7 +9,9 @@ const {
|
|||
styleRuleSpec,
|
||||
} = require("resource://devtools/shared/specs/style-rule.js");
|
||||
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
const TrackChangeEmitter = require("resource://devtools/server/actors/utils/track-change-emitter.js");
|
||||
const {
|
||||
getRuleText,
|
||||
|
|
@ -1316,7 +1318,7 @@ function getSelectorOffsets(initialText, line, column) {
|
|||
line,
|
||||
column
|
||||
);
|
||||
const lexer = getCSSLexer(text, true);
|
||||
const lexer = new InspectorCSSParserWrapper(text);
|
||||
|
||||
// Search forward for the opening brace.
|
||||
let endOffset;
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -11,7 +11,9 @@
|
|||
|
||||
"use strict";
|
||||
|
||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
|
||||
loader.lazyRequireGetter(
|
||||
this,
|
||||
|
|
@ -35,22 +37,18 @@ const COMMENT_PARSING_HEURISTIC_BYPASS_CHAR =
|
|||
* CSS tokens. Comment tokens are dropped.
|
||||
*
|
||||
* @param {String} CSS source string
|
||||
* @param {Boolean} useInspectorCSSParser Set to true to use InspectorCSSParser.
|
||||
* @yield {CSSToken} The next CSSToken that is lexed
|
||||
* @see CSSToken for details about the returned tokens
|
||||
*/
|
||||
function* cssTokenizer(string, useInspectorCSSParser = false) {
|
||||
const lexer = getCSSLexer(string, useInspectorCSSParser);
|
||||
function* cssTokenizer(string) {
|
||||
const lexer = new InspectorCSSParserWrapper(string);
|
||||
while (true) {
|
||||
const token = lexer.nextToken();
|
||||
if (!token) {
|
||||
break;
|
||||
}
|
||||
// None of the existing consumers want comments.
|
||||
if (
|
||||
token.tokenType !== "comment" ||
|
||||
(useInspectorCSSParser && token.tokenType !== "Comment")
|
||||
) {
|
||||
if (token.tokenType !== "Comment") {
|
||||
yield token;
|
||||
}
|
||||
}
|
||||
|
|
@ -76,7 +74,7 @@ function* cssTokenizer(string, useInspectorCSSParser = false) {
|
|||
* line and column information.
|
||||
*/
|
||||
function cssTokenizerWithLineColumn(string) {
|
||||
const lexer = getCSSLexer(string, true);
|
||||
const lexer = new InspectorCSSParserWrapper(string);
|
||||
const result = [];
|
||||
let prevToken = undefined;
|
||||
while (true) {
|
||||
|
|
@ -299,7 +297,9 @@ function parseDeclarationsInternal(
|
|||
throw new Error("empty input string");
|
||||
}
|
||||
|
||||
const lexer = getCSSLexer(inputString, true, true);
|
||||
const lexer = new InspectorCSSParserWrapper(inputString, {
|
||||
trackEOFChars: true,
|
||||
});
|
||||
|
||||
let declarations = [getEmptyDeclaration()];
|
||||
let lastProp = declarations[0];
|
||||
|
|
@ -642,11 +642,7 @@ function parsePseudoClassesAndAttributes(value) {
|
|||
|
||||
// See InspectorCSSToken dictionnary in InspectorUtils.webidl for more information
|
||||
// about the tokens.
|
||||
const tokensIterator = cssTokenizer(
|
||||
value,
|
||||
// useInspectorCSSParser
|
||||
true
|
||||
);
|
||||
const tokensIterator = cssTokenizer(value);
|
||||
const result = [];
|
||||
let current = "";
|
||||
let functionCount = 0;
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ const MAX_DATA_URL_LENGTH = 40;
|
|||
|
||||
loader.lazyRequireGetter(
|
||||
this,
|
||||
"getCSSLexer",
|
||||
"InspectorCSSParserWrapper",
|
||||
"resource://devtools/shared/css/lexer.js",
|
||||
true
|
||||
);
|
||||
|
|
@ -290,7 +290,7 @@ function prettifyCSS(text, ruleCount) {
|
|||
// minified file.
|
||||
let indent = "";
|
||||
let indentLevel = 0;
|
||||
const lexer = getCSSLexer(text, true);
|
||||
const lexer = new InspectorCSSParserWrapper(text);
|
||||
// List of mappings of token positions from original source to prettified source.
|
||||
const mappings = [];
|
||||
// Line and column offsets used to shift the token positions after prettyfication.
|
||||
|
|
|
|||
|
|
@ -5,28 +5,15 @@
|
|||
|
||||
"use strict";
|
||||
|
||||
const jsLexer = require("resource://devtools/shared/css/lexer.js");
|
||||
const {
|
||||
InspectorCSSParserWrapper,
|
||||
} = require("resource://devtools/shared/css/lexer.js");
|
||||
|
||||
add_task(function test_lexer() {
|
||||
const LEX_TESTS = [
|
||||
[
|
||||
"simple",
|
||||
["ident:simple"],
|
||||
[{ tokenType: "Ident", text: "simple", value: "simple" }],
|
||||
],
|
||||
["simple", [{ tokenType: "Ident", text: "simple", value: "simple" }]],
|
||||
[
|
||||
"simple: { hi; }",
|
||||
[
|
||||
"ident:simple",
|
||||
"symbol::",
|
||||
"whitespace",
|
||||
"symbol:{",
|
||||
"whitespace",
|
||||
"ident:hi",
|
||||
"symbol:;",
|
||||
"whitespace",
|
||||
"symbol:}",
|
||||
],
|
||||
[
|
||||
{ tokenType: "Ident", text: "simple", value: "simple" },
|
||||
{ tokenType: "Colon", text: ":" },
|
||||
|
|
@ -41,30 +28,18 @@ add_task(function test_lexer() {
|
|||
],
|
||||
[
|
||||
"/* whatever */",
|
||||
["comment"],
|
||||
[{ tokenType: "Comment", text: "/* whatever */", value: " whatever " }],
|
||||
],
|
||||
[
|
||||
"'string'",
|
||||
["string:string"],
|
||||
[{ tokenType: "QuotedString", text: "'string'", value: "string" }],
|
||||
],
|
||||
[
|
||||
'"string"',
|
||||
["string:string"],
|
||||
[{ tokenType: "QuotedString", text: `"string"`, value: "string" }],
|
||||
],
|
||||
[
|
||||
"rgb(1,2,3)",
|
||||
[
|
||||
"function:rgb",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:)",
|
||||
],
|
||||
[
|
||||
{ tokenType: "Function", text: "rgb(", value: "rgb" },
|
||||
{ tokenType: "Number", text: "1", number: 1 },
|
||||
|
|
@ -75,30 +50,16 @@ add_task(function test_lexer() {
|
|||
{ tokenType: "CloseParenthesis", text: ")" },
|
||||
],
|
||||
],
|
||||
[
|
||||
"@media",
|
||||
["at:media"],
|
||||
[{ tokenType: "AtKeyword", text: "@media", value: "media" }],
|
||||
],
|
||||
[
|
||||
"#hibob",
|
||||
["id:hibob"],
|
||||
[{ tokenType: "IDHash", text: "#hibob", value: "hibob" }],
|
||||
],
|
||||
["#123", ["hash:123"], [{ tokenType: "Hash", text: "#123", value: "123" }]],
|
||||
["@media", [{ tokenType: "AtKeyword", text: "@media", value: "media" }]],
|
||||
["#hibob", [{ tokenType: "IDHash", text: "#hibob", value: "hibob" }]],
|
||||
["#123", [{ tokenType: "Hash", text: "#123", value: "123" }]],
|
||||
[
|
||||
"23px",
|
||||
["dimension:px"],
|
||||
[{ tokenType: "Dimension", text: "23px", number: 23, unit: "px" }],
|
||||
],
|
||||
[
|
||||
"23%",
|
||||
["percentage"],
|
||||
[{ tokenType: "Percentage", text: "23%", number: 0.23 }],
|
||||
],
|
||||
["23%", [{ tokenType: "Percentage", text: "23%", number: 0.23 }]],
|
||||
[
|
||||
"url(http://example.com)",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
{
|
||||
tokenType: "UnquotedUrl",
|
||||
|
|
@ -109,7 +70,6 @@ add_task(function test_lexer() {
|
|||
],
|
||||
[
|
||||
"url('http://example.com')",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
{ tokenType: "Function", text: "url(", value: "url" },
|
||||
{
|
||||
|
|
@ -122,7 +82,6 @@ add_task(function test_lexer() {
|
|||
],
|
||||
[
|
||||
"url( 'http://example.com' )",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
{ tokenType: "Function", text: "url(", value: "url" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
|
|
@ -138,7 +97,6 @@ add_task(function test_lexer() {
|
|||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||
[
|
||||
"url(http://example.com",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
{
|
||||
tokenType: "UnquotedUrl",
|
||||
|
|
@ -149,7 +107,6 @@ add_task(function test_lexer() {
|
|||
],
|
||||
[
|
||||
"url(http://example.com @",
|
||||
["bad_url:http://example.com"],
|
||||
[
|
||||
{
|
||||
tokenType: "BadUrl",
|
||||
|
|
@ -160,34 +117,23 @@ add_task(function test_lexer() {
|
|||
],
|
||||
[
|
||||
"quo\\ting",
|
||||
["ident:quoting"],
|
||||
[{ tokenType: "Ident", text: "quo\\ting", value: "quoting" }],
|
||||
],
|
||||
[
|
||||
"'bad string\n",
|
||||
["bad_string:bad string", "whitespace"],
|
||||
[
|
||||
{ tokenType: "BadString", text: "'bad string", value: "bad string" },
|
||||
{ tokenType: "WhiteSpace", text: "\n" },
|
||||
],
|
||||
],
|
||||
["~=", ["includes"], [{ tokenType: "IncludeMatch", text: "~=" }]],
|
||||
["|=", ["dashmatch"], [{ tokenType: "DashMatch", text: "|=" }]],
|
||||
["^=", ["beginsmatch"], [{ tokenType: "PrefixMatch", text: "^=" }]],
|
||||
["$=", ["endsmatch"], [{ tokenType: "SuffixMatch", text: "$=" }]],
|
||||
["*=", ["containsmatch"], [{ tokenType: "SubstringMatch", text: "*=" }]],
|
||||
["~=", [{ tokenType: "IncludeMatch", text: "~=" }]],
|
||||
["|=", [{ tokenType: "DashMatch", text: "|=" }]],
|
||||
["^=", [{ tokenType: "PrefixMatch", text: "^=" }]],
|
||||
["$=", [{ tokenType: "SuffixMatch", text: "$=" }]],
|
||||
["*=", [{ tokenType: "SubstringMatch", text: "*=" }]],
|
||||
|
||||
[
|
||||
"<!-- html comment -->",
|
||||
[
|
||||
"htmlcomment",
|
||||
"whitespace",
|
||||
"ident:html",
|
||||
"whitespace",
|
||||
"ident:comment",
|
||||
"whitespace",
|
||||
"htmlcomment",
|
||||
],
|
||||
[
|
||||
{ tokenType: "CDO", text: "<!--" },
|
||||
{ tokenType: "WhiteSpace", text: " " },
|
||||
|
|
@ -203,44 +149,36 @@ add_task(function test_lexer() {
|
|||
// unterminated comments are just comments.
|
||||
[
|
||||
"/* bad comment",
|
||||
["comment"],
|
||||
[{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }],
|
||||
],
|
||||
];
|
||||
|
||||
const test = (cssText, useInspectorCSSParser, tokenTypes) => {
|
||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
||||
const test = (cssText, tokenTypes) => {
|
||||
const lexer = new InspectorCSSParserWrapper(cssText);
|
||||
let reconstructed = "";
|
||||
let lastTokenEnd = 0;
|
||||
let i = 0;
|
||||
let token;
|
||||
while ((token = lexer.nextToken())) {
|
||||
let combined = token.tokenType;
|
||||
if (token.text) {
|
||||
combined += ":" + token.text;
|
||||
}
|
||||
if (useInspectorCSSParser) {
|
||||
const expectedToken = tokenTypes[i];
|
||||
Assert.deepEqual(
|
||||
{
|
||||
tokenType: token.tokenType,
|
||||
text: token.text,
|
||||
value: token.value,
|
||||
number: token.number,
|
||||
unit: token.unit,
|
||||
},
|
||||
{
|
||||
tokenType: expectedToken.tokenType,
|
||||
text: expectedToken.text,
|
||||
value: expectedToken.value ?? null,
|
||||
number: expectedToken.number ?? null,
|
||||
unit: expectedToken.unit ?? null,
|
||||
},
|
||||
`Got expected token #${i} for "${cssText}"`
|
||||
);
|
||||
} else {
|
||||
equal(combined, tokenTypes[i]);
|
||||
}
|
||||
const expectedToken = tokenTypes[i];
|
||||
Assert.deepEqual(
|
||||
{
|
||||
tokenType: token.tokenType,
|
||||
text: token.text,
|
||||
value: token.value,
|
||||
number: token.number,
|
||||
unit: token.unit,
|
||||
},
|
||||
{
|
||||
tokenType: expectedToken.tokenType,
|
||||
text: expectedToken.text,
|
||||
value: expectedToken.value ?? null,
|
||||
number: expectedToken.number ?? null,
|
||||
unit: expectedToken.unit ?? null,
|
||||
},
|
||||
`Got expected token #${i} for "${cssText}"`
|
||||
);
|
||||
|
||||
Assert.greater(token.endOffset, token.startOffset);
|
||||
equal(token.startOffset, lastTokenEnd);
|
||||
lastTokenEnd = token.endOffset;
|
||||
|
|
@ -253,41 +191,29 @@ add_task(function test_lexer() {
|
|||
equal(reconstructed, cssText);
|
||||
};
|
||||
|
||||
for (const [cssText, jsTokenTypes, rustTokenTypes] of LEX_TESTS) {
|
||||
info(`Test "${cssText}" with js-based lexer`);
|
||||
test(cssText, false, jsTokenTypes);
|
||||
|
||||
info(`Test "${cssText}" with rust-based lexer`);
|
||||
test(cssText, true, rustTokenTypes);
|
||||
for (const [cssText, rustTokenTypes] of LEX_TESTS) {
|
||||
info(`Test "${cssText}"`);
|
||||
test(cssText, rustTokenTypes);
|
||||
}
|
||||
});
|
||||
|
||||
add_task(function test_lexer_linecol() {
|
||||
const LINECOL_TESTS = [
|
||||
["simple", ["ident:0:0", ":0:6"], ["Ident:0:0", ":0:6"]],
|
||||
[
|
||||
"\n stuff",
|
||||
["whitespace:0:0", "ident:1:4", ":1:9"],
|
||||
["WhiteSpace:0:0", "Ident:1:4", ":1:9"],
|
||||
],
|
||||
["simple", ["Ident:0:0", ":0:6"]],
|
||||
["\n stuff", ["WhiteSpace:0:0", "Ident:1:4", ":1:9"]],
|
||||
[
|
||||
'"string with \\\nnewline" \r\n',
|
||||
["string:0:0", "whitespace:1:8", ":2:0"],
|
||||
["QuotedString:0:0", "WhiteSpace:1:8", ":2:0"],
|
||||
],
|
||||
];
|
||||
|
||||
const test = (cssText, useInspectorCSSParser, locations) => {
|
||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
||||
const test = (cssText, locations) => {
|
||||
const lexer = new InspectorCSSParserWrapper(cssText);
|
||||
let i = 0;
|
||||
let token;
|
||||
const testLocation = () => {
|
||||
const startLine = useInspectorCSSParser
|
||||
? lexer.parser.lineNumber
|
||||
: lexer.lineNumber;
|
||||
const startColumn = useInspectorCSSParser
|
||||
? lexer.parser.columnNumber
|
||||
: lexer.columnNumber;
|
||||
const startLine = lexer.parser.lineNumber;
|
||||
const startColumn = lexer.parser.columnNumber;
|
||||
|
||||
// We do this in a bit of a funny way so that we can also test the
|
||||
// location of the EOF.
|
||||
|
|
@ -308,12 +234,9 @@ add_task(function test_lexer_linecol() {
|
|||
equal(i, locations.length);
|
||||
};
|
||||
|
||||
for (const [cssText, jsLocations, rustLocations] of LINECOL_TESTS) {
|
||||
info(`Test "${cssText}" with js-based lexer`);
|
||||
test(cssText, false, jsLocations);
|
||||
|
||||
info(`Test "${cssText}" with rust-based lexer`);
|
||||
test(cssText, true, rustLocations);
|
||||
for (const [cssText, rustLocations] of LINECOL_TESTS) {
|
||||
info(`Test "${cssText}"`);
|
||||
test(cssText, rustLocations);
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -340,17 +263,13 @@ add_task(function test_lexer_eofchar() {
|
|||
["'\\", "\\'", "'", ""],
|
||||
];
|
||||
|
||||
const test = (
|
||||
cssText,
|
||||
useInspectorCSSParser,
|
||||
expectedAppend,
|
||||
expectedNoAppend,
|
||||
argText
|
||||
) => {
|
||||
const test = (cssText, expectedAppend, expectedNoAppend, argText) => {
|
||||
if (!expectedNoAppend) {
|
||||
expectedNoAppend = expectedAppend;
|
||||
}
|
||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser, true);
|
||||
const lexer = new InspectorCSSParserWrapper(cssText, {
|
||||
trackEOFChars: true,
|
||||
});
|
||||
while (lexer.nextToken()) {
|
||||
// We don't need to do anything with the tokens. We only want to consume the iterator
|
||||
// so we can safely call performEOFFixup.
|
||||
|
|
@ -371,10 +290,7 @@ add_task(function test_lexer_eofchar() {
|
|||
expectedNoAppend,
|
||||
argText = cssText,
|
||||
] of EOFCHAR_TESTS) {
|
||||
info(`Test "${cssText}" with js-based lexer`);
|
||||
test(cssText, false, expectedAppend, expectedNoAppend, argText);
|
||||
|
||||
info(`Test "${cssText}" with rust-based lexer`);
|
||||
test(cssText, true, expectedAppend, expectedNoAppend, argText);
|
||||
info(`Test "${cssText}"`);
|
||||
test(cssText, expectedAppend, expectedNoAppend, argText);
|
||||
}
|
||||
});
|
||||
|
|
|
|||
Loading…
Reference in a new issue